Sorry in advance if this is a naive question or the wrong place to ask this, I’m very new to the details of dask. I’m trying to run some relatively simple code but with very large files/arrays. I’m using an HPC with Jupyterhub, and haven’t found any solutions to my errors in other related forums. Below is a code snippet giving me grief, just trying to open and concatenate some files, as well as the full error message.
dirc = '/glade/campaign/cesm/development/bgcwg/projects/hi-res_CESM1_CORE/g.e11.G.T62_t12.eco.006/ocn/hist/'
test = xr.open_mfdataset(dirc+'g.e11.G.T62_t12.eco.006.pop.h.0001-01-05.nc', parallel=True, decode_times=True)
varlist = list(test.variables)
keeplist = ['DpCO2','FG_CO2','SALT','TEMP','UVEL','VVEL','WVEL','photoC_diat','photoC_diaz','photoC_sp']
droplist = list(filter(lambda item: item not in keeplist, varlist))
test = xr.open_mfdataset(dirc+'*.nc', parallel=True, decode_times=True,
drop_variables=droplist, combine='nested', coords='time')
test
Error:
2025-09-29 13:06:23,021 - distributed.protocol.core - CRITICAL - Failed to deserialize
Traceback (most recent call last):
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/protocol/core.py", line 175, in loads
return msgpack.loads(
^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 136, in unpackb
raise ExtraData(ret, unpacker._get_extradata())
msgpack.exceptions.ExtraData: unpack(b) received extra data.
2025-09-29 13:06:23,024 - distributed.core - ERROR - Exception while handling op register-client
Traceback (most recent call last):
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/core.py", line 831, in _handle_comm
result = await result
^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/scheduler.py", line 5902, in add_client
await self.handle_stream(comm=comm, extra={"client": client})
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/core.py", line 886, in handle_stream
msgs = await comm.read()
^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/comm/tcp.py", line 247, in read
msg = await from_frames(
^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/comm/utils.py", line 78, in from_frames
res = _from_frames()
^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/comm/utils.py", line 61, in _from_frames
return protocol.loads(
^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/protocol/core.py", line 175, in loads
return msgpack.loads(
^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 136, in unpackb
raise ExtraData(ret, unpacker._get_extradata())
msgpack.exceptions.ExtraData: unpack(b) received extra data.
IOPub data rate exceeded.
The Jupyter server will temporarily stop sending output
to the client in order to avoid crashing it.
To change this limit, set the config variable
`--ServerApp.iopub_data_rate_limit`.
Current values:
ServerApp.iopub_data_rate_limit=1000000.0 (bytes/sec)
ServerApp.rate_limit_window=3.0 (secs)
---------------------------------------------------------------------------
FutureCancelledError Traceback (most recent call last)
Cell In[95], line 6
4 keeplist = ['DpCO2','FG_CO2','SALT','TEMP','UVEL','VVEL','WVEL','photoC_diat','photoC_diaz','photoC_sp']
5 droplist = list(filter(lambda item: item not in keeplist, varlist))
----> 6 test = xr.open_mfdataset(dirc+'*.nc', parallel=True, decode_times=True,
7 drop_variables=droplist, combine = 'nested', coords='time')
8 test
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/backends/api.py:1053, in open_mfdataset(paths, chunks, concat_dim, compat, preprocess, engine, data_vars, coords, combine, parallel, join, attrs_file, combine_attrs, **kwargs)
1049 try:
1050 if combine == "nested":
1051 # Combined nested list by successive concat and merge operations
1052 # along each dimension, using structure given by "ids"
-> 1053 combined = _nested_combine(
1054 datasets,
1055 concat_dims=concat_dim,
1056 compat=compat,
1057 data_vars=data_vars,
1058 coords=coords,
1059 ids=ids,
1060 join=join,
1061 combine_attrs=combine_attrs,
1062 )
1063 elif combine == "by_coords":
1064 # Redo ordering from coordinates, ignoring how they were ordered
1065 # previously
1066 combined = combine_by_coords(
1067 datasets,
1068 compat=compat,
(...)
1072 combine_attrs=combine_attrs,
1073 )
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/combine.py:359, in _nested_combine(datasets, concat_dims, compat, data_vars, coords, ids, fill_value, join, combine_attrs)
356 _check_shape_tile_ids(combined_ids)
358 # Apply series of concatenate or merge operations along each dimension
--> 359 combined = _combine_nd(
360 combined_ids,
361 concat_dims,
362 compat=compat,
363 data_vars=data_vars,
364 coords=coords,
365 fill_value=fill_value,
366 join=join,
367 combine_attrs=combine_attrs,
368 )
369 return combined
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/combine.py:235, in _combine_nd(combined_ids, concat_dims, data_vars, coords, compat, fill_value, join, combine_attrs)
231 # Each iteration of this loop reduces the length of the tile_ids tuples
232 # by one. It always combines along the first dimension, removing the first
233 # element of the tuple
234 for concat_dim in concat_dims:
--> 235 combined_ids = _combine_all_along_first_dim(
236 combined_ids,
237 dim=concat_dim,
238 data_vars=data_vars,
239 coords=coords,
240 compat=compat,
241 fill_value=fill_value,
242 join=join,
243 combine_attrs=combine_attrs,
244 )
245 (combined_ds,) = combined_ids.values()
246 return combined_ds
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/combine.py:270, in _combine_all_along_first_dim(combined_ids, dim, data_vars, coords, compat, fill_value, join, combine_attrs)
268 combined_ids = dict(sorted(group))
269 datasets = combined_ids.values()
--> 270 new_combined_ids[new_id] = _combine_1d(
271 datasets, dim, compat, data_vars, coords, fill_value, join, combine_attrs
272 )
273 return new_combined_ids
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/combine.py:315, in _combine_1d(datasets, concat_dim, compat, data_vars, coords, fill_value, join, combine_attrs)
313 raise
314 else:
--> 315 combined = merge(
316 datasets,
317 compat=compat,
318 fill_value=fill_value,
319 join=join,
320 combine_attrs=combine_attrs,
321 )
323 return combined
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/merge.py:1025, in merge(objects, compat, join, fill_value, combine_attrs)
1022 obj = obj.to_dataset(promote_attrs=True) if isinstance(obj, DataArray) else obj
1023 dict_like_objects.append(obj)
-> 1025 merge_result = merge_core(
1026 dict_like_objects,
1027 compat,
1028 join,
1029 combine_attrs=combine_attrs,
1030 fill_value=fill_value,
1031 )
1032 return Dataset._construct_direct(**merge_result._asdict())
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/merge.py:757, in merge_core(objects, compat, join, combine_attrs, priority_arg, explicit_coords, indexes, fill_value)
755 collected = collect_variables_and_indexes(aligned, indexes=indexes)
756 prioritized = _get_priority_vars_and_indexes(aligned, priority_arg, compat=compat)
--> 757 variables, out_indexes = merge_collected(
758 collected, prioritized, compat=compat, combine_attrs=combine_attrs
759 )
761 dims = calculate_dimensions(variables)
763 coord_names, noncoord_names = determine_coords(coerced)
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/merge.py:291, in merge_collected(grouped, prioritized, compat, combine_attrs, equals)
289 variables = [variable for variable, _ in elements_list]
290 try:
--> 291 merged_vars[name] = unique_variable(
292 name, variables, compat, equals.get(name, None)
293 )
294 except MergeError:
295 if compat != "minimal":
296 # we need more than "minimal" compatibility (for which
297 # we drop conflicting coordinates)
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/merge.py:140, in unique_variable(name, variables, compat, equals)
138 out = out.compute()
139 for var in variables[1:]:
--> 140 equals = getattr(out, compat)(var)
141 if not equals:
142 break
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/variable.py:2184, in Variable.no_conflicts(self, other, equiv)
2177 def no_conflicts(self, other, equiv=duck_array_ops.array_notnull_equiv):
2178 """True if the intersection of two Variable's non-null data is
2179 equal; otherwise false.
2180
2181 Variables can thus still be equal if there are locations where either,
2182 or both, contain NaN values.
2183 """
-> 2184 return self.broadcast_equals(other, equiv=equiv)
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/variable.py:2166, in Variable.broadcast_equals(self, other, equiv)
2164 except (ValueError, AttributeError):
2165 return False
-> 2166 return self.equals(other, equiv=equiv)
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/variable.py:2150, in Variable.equals(self, other, equiv)
2147 other = getattr(other, "variable", other)
2148 try:
2149 return self.dims == other.dims and (
-> 2150 self._data is other._data or equiv(self.data, other.data)
2151 )
2152 except (TypeError, AttributeError):
2153 return False
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/duck_array_ops.py:282, in array_notnull_equiv(arr1, arr2)
280 warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
281 flag_array = (arr1 == arr2) | isnull(arr1) | isnull(arr2)
--> 282 return bool(flag_array.all())
283 else:
284 return lazy_equiv
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/dask/array/core.py:1861, in Array.__bool__(self)
1856 raise ValueError(
1857 f"The truth value of a {self.__class__.__name__} is ambiguous. "
1858 "Use a.any() or a.all()."
1859 )
1860 else:
-> 1861 return bool(self.compute())
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/dask/base.py:376, in DaskMethodsMixin.compute(self, **kwargs)
352 def compute(self, **kwargs):
353 """Compute this dask collection
354
355 This turns a lazy Dask collection into its in-memory equivalent.
(...)
374 dask.compute
375 """
--> 376 (result,) = compute(self, traverse=False, **kwargs)
377 return result
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/dask/base.py:664, in compute(traverse, optimize_graph, scheduler, get, *args, **kwargs)
661 postcomputes.append(x.__dask_postcompute__())
663 with shorten_traceback():
--> 664 results = schedule(dsk, keys, **kwargs)
666 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/client.py:2414, in Client._gather(self, futures, errors, direct, local_worker)
2412 exception = st.exception
2413 traceback = st.traceback
-> 2414 raise exception.with_traceback(traceback)
2415 if errors == "skip":
2416 bad_keys.add(key)
FutureCancelledError: ('all-aggregate-71b3cdadc2a8164d9744ffc8180819a3',) cancelled for reason: scheduler-connection-lost.
Client lost the connection to the scheduler. Please check your connection and re-run your work.
The other very common problem I’m having is with relatively simple operations such as filtering frequencies or calculating standard deviations, and it gives me a similar scheduler connection error, except with a 0-dim memory problem instead of the data rate limit.
2025-08-29 23:10:03,387 - distributed.protocol.core - CRITICAL - Failed to Serialize
Traceback (most recent call last):
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/protocol/core.py", line 109, in dumps
frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/__init__.py", line 35, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 885, in pack
self._pack(obj)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 864, in _pack
return self._pack_map_pairs(
^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 970, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 819, in _pack
n = len(obj) * obj.itemsize
^^^^^^^^
TypeError: 0-dim memory has no length
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/protocol/core.py", line 130, in dumps
frames[0] = msgpack.dumps(
^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/__init__.py", line 35, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 885, in pack
self._pack(obj)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 864, in _pack
return self._pack_map_pairs(
^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 970, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 819, in _pack
n = len(obj) * obj.itemsize
^^^^^^^^
TypeError: 0-dim memory has no length
2025-08-29 23:10:03,390 - distributed.comm.utils - ERROR - 0-dim memory has no length
Traceback (most recent call last):
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/protocol/core.py", line 109, in dumps
frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/__init__.py", line 35, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 885, in pack
self._pack(obj)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 864, in _pack
return self._pack_map_pairs(
^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 970, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 819, in _pack
n = len(obj) * obj.itemsize
^^^^^^^^
TypeError: 0-dim memory has no length
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/comm/utils.py", line 34, in _to_frames
return list(protocol.dumps(msg, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/protocol/core.py", line 130, in dumps
frames[0] = msgpack.dumps(
^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/__init__.py", line 35, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 885, in pack
self._pack(obj)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 864, in _pack
return self._pack_map_pairs(
^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 970, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 819, in _pack
n = len(obj) * obj.itemsize
^^^^^^^^
TypeError: 0-dim memory has no length
2025-08-29 23:10:03,393 - distributed.batched - ERROR - Error in batched write
Traceback (most recent call last):
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/protocol/core.py", line 109, in dumps
frames[0] = msgpack.dumps(msg, default=_encode_default, use_bin_type=True)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/__init__.py", line 35, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 885, in pack
self._pack(obj)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 864, in _pack
return self._pack_map_pairs(
^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 970, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 819, in _pack
n = len(obj) * obj.itemsize
^^^^^^^^
TypeError: 0-dim memory has no length
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/batched.py", line 115, in _background_send
nbytes = yield coro
^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/tornado/gen.py", line 766, in run
value = future.result()
^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/comm/tcp.py", line 264, in write
frames = await to_frames(
^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/comm/utils.py", line 48, in to_frames
return await offload(_to_frames)
^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/utils.py", line 1520, in run_in_executor_with_context
return await loop.run_in_executor(
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/utils.py", line 1521, in <lambda>
executor, lambda: context.run(func, *args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/comm/utils.py", line 34, in _to_frames
return list(protocol.dumps(msg, **kwargs))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/protocol/core.py", line 130, in dumps
frames[0] = msgpack.dumps(
^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/__init__.py", line 35, in packb
return Packer(**kwargs).pack(o)
^^^^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 885, in pack
self._pack(obj)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 864, in _pack
return self._pack_map_pairs(
^^^^^^^^^^^^^^^^^^^^^
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 970, in _pack_map_pairs
self._pack(v, nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 861, in _pack
self._pack(obj[i], nest_limit - 1)
File "/glade/u/home/emmah/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/msgpack/fallback.py", line 819, in _pack
n = len(obj) * obj.itemsize
^^^^^^^^
TypeError: 0-dim memory has no length
---------------------------------------------------------------------------
FutureCancelledError Traceback (most recent call last)
Cell In[6], line 1
----> 1 hrcmr.to_netcdf(der+'dsh_CO2_mean_regridded.nc') # 0-dim errors
2 hrstdr.to_netcdf(der+'dsh_CO2_stdev_regridded.nc')
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/dataset.py:1957, in Dataset.to_netcdf(self, path, mode, format, group, engine, encoding, unlimited_dims, compute, invalid_netcdf)
1954 encoding = {}
1955 from xarray.backends.api import to_netcdf
-> 1957 return to_netcdf( # type: ignore # mypy cannot resolve the overloads:(
1958 self,
1959 path,
1960 mode=mode,
1961 format=format,
1962 group=group,
1963 engine=engine,
1964 encoding=encoding,
1965 unlimited_dims=unlimited_dims,
1966 compute=compute,
1967 multifile=False,
1968 invalid_netcdf=invalid_netcdf,
1969 )
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/backends/api.py:1281, in to_netcdf(dataset, path_or_file, mode, format, group, engine, encoding, unlimited_dims, compute, multifile, invalid_netcdf)
1278 if multifile:
1279 return writer, store
-> 1281 writes = writer.sync(compute=compute)
1283 if isinstance(target, BytesIO):
1284 store.sync()
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/backends/common.py:178, in ArrayWriter.sync(self, compute, chunkmanager_store_kwargs)
175 if chunkmanager_store_kwargs is None:
176 chunkmanager_store_kwargs = {}
--> 178 delayed_store = chunkmanager.store(
179 self.sources,
180 self.targets,
181 lock=self.lock,
182 compute=compute,
183 flush=True,
184 regions=self.regions,
185 **chunkmanager_store_kwargs,
186 )
187 self.sources = []
188 self.targets = []
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/xarray/core/daskmanager.py:211, in DaskManager.store(self, sources, targets, **kwargs)
203 def store(
204 self,
205 sources: DaskArray | Sequence[DaskArray],
206 targets: Any,
207 **kwargs,
208 ):
209 from dask.array import store
--> 211 return store(
212 sources=sources,
213 targets=targets,
214 **kwargs,
215 )
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/dask/array/core.py:1230, in store(***failed resolving arguments***)
1228 elif compute:
1229 store_dsk = HighLevelGraph(layers, dependencies)
-> 1230 compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
1231 return None
1233 else:
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/dask/base.py:401, in compute_as_if_collection(cls, dsk, keys, scheduler, get, **kwargs)
399 schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)
400 dsk2 = optimization_function(cls)(dsk, keys, **kwargs)
--> 401 return schedule(dsk2, keys, **kwargs)
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/client.py:3480, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)
3478 should_rejoin = False
3479 try:
-> 3480 results = self.gather(packed, asynchronous=asynchronous, direct=direct)
3481 finally:
3482 for f in futures.values():
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/client.py:2553, in Client.gather(self, futures, errors, direct, asynchronous)
2550 local_worker = None
2552 with shorten_traceback():
-> 2553 return self.sync(
2554 self._gather,
2555 futures,
2556 errors=errors,
2557 direct=direct,
2558 local_worker=local_worker,
2559 asynchronous=asynchronous,
2560 )
File ~/.conda/envs/TPA_O2_env/lib/python3.12/site-packages/distributed/client.py:2414, in Client._gather(self, futures, errors, direct, local_worker)
2412 exception = st.exception
2413 traceback = st.traceback
-> 2414 raise exception.with_traceback(traceback)
2415 if errors == "skip":
2416 bad_keys.add(key)
FutureCancelledError: ('store-map-531644f959a82d969cf27d5b29e08356', 10, 3) cancelled for reason: scheduler-connection-lost.
Client lost the connection to the scheduler. Please check your connection and re-run your work.
Any help, even just directing me to a better place to ask for help, would be greatly appreciated.