From 288b5a22d508bd45faf2e04ec4d53c9b3f05d30c Mon Sep 17 00:00:00 2001 From: Bouwe Andela Date: Thu, 25 Apr 2024 13:26:02 +0200 Subject: [PATCH] Remove workaround for issue that was fixed long ago --- lib/iris/_merge.py | 18 ------------------ 1 file changed, 18 deletions(-) diff --git a/lib/iris/_merge.py b/lib/iris/_merge.py index 8d1a0f052a..2d8beb6f27 100644 --- a/lib/iris/_merge.py +++ b/lib/iris/_merge.py @@ -1230,14 +1230,6 @@ def merge(self, unique=True): # Generate group-depth merged cubes from the source-cubes. for level in range(group_depth): - # Track the largest dtype of the data to be merged. - # Unfortunately, da.stack() is not symmetric with regards - # to dtypes. So stacking float + int yields a float, but - # stacking an int + float yields an int! We need to ensure - # that the largest dtype prevails i.e. float, in order to - # support the masked case for dask. - # Reference https://github.com/dask/dask/issues/2273. - dtype = None # Stack up all the data from all of the relevant source # cubes in a single dask "stacked" array. # If it turns out that all the source cubes already had @@ -1258,21 +1250,11 @@ def merge(self, unique=True): else: data = as_lazy_data(data) stack[nd_index] = data - # Determine the largest dtype. - if dtype is None: - dtype = data.dtype - else: - dtype = np.promote_types(data.dtype, dtype) - - # Coerce to the largest dtype. - for nd_index in nd_indexes: - stack[nd_index] = stack[nd_index].astype(dtype) merged_data = multidim_lazy_stack(stack) if all_have_data: # All inputs were concrete, so turn the result back into a # normal array. - dtype = self._cube_signature.data_type merged_data = as_concrete_data(merged_data) merged_cube = self._get_cube(merged_data) merged_cubes.append(merged_cube)