diff --git a/docs/source/notebooks/MutualInformation_V2.ipynb b/docs/source/notebooks/MutualInformation_V2.ipynb index eaca2fb..f636321 100644 --- a/docs/source/notebooks/MutualInformation_V2.ipynb +++ b/docs/source/notebooks/MutualInformation_V2.ipynb @@ -299,7 +299,6 @@ "\n", " I = []\n", " for bcount, bit_pos_dict in enumerate(dict_list_H):\n", - "\n", " p00 = np.divide(bit_pos_dict[\"00\"], num - 1, dtype=np.float64)\n", " p01 = np.divide(bit_pos_dict[\"01\"], num - 1, dtype=np.float64)\n", " p10 = np.divide(bit_pos_dict[\"10\"], num - 1, dtype=np.float64)\n", diff --git a/ldcpy/calcs.py b/ldcpy/calcs.py index 96a53a1..76f7f20 100644 --- a/ldcpy/calcs.py +++ b/ldcpy/calcs.py @@ -169,7 +169,6 @@ def _is_memoized(self, calc_name: str) -> bool: return hasattr(self, calc_name) and (self.__getattribute__(calc_name) is not None) def _con_var(self, dir, dataset) -> xr.DataArray: - if dir == 'ns': tt = dataset.diff(self._lat_dim_name, 1) @@ -417,7 +416,6 @@ def entropy(self) -> xr.DataArray: # lower is better (1.0 means random - no compression possible) """ if not self._is_memoized('_entropy'): - a1 = self._ds.data if dask.is_dask_collection(a1): a1 = a1.compute() @@ -1052,7 +1050,6 @@ def vfftmax(self) -> xr.DataArray: @property def stft(self) -> xr.DataArray: if not self._is_memoized('_stft'): - f, t, self._stft = np.abs( stft( self.mean.isel({'lat': 100}).squeeze(), nperseg=1, nfft=382, detrend='constant' @@ -1552,7 +1549,6 @@ def covariance(self) -> xr.DataArray: The covariance between the two datasets """ if not self._is_memoized('_covariance'): - # need to use unweighted means c1_mean = self._calcs1.get_calc('ds').mean(skipna=True) c2_mean = self._calcs2.get_calc('ds').mean(skipna=True) @@ -1582,7 +1578,6 @@ def pearson_correlation_coefficient(self): returns the pearson correlation coefficient between the two datasets """ if not self._is_memoized('_pearson_correlation_coefficient'): - # we need to do this with unweighted data c1_std = float(self._calcs1.get_calc('ds').std(skipna=True)) c2_std = float(self._calcs2.get_calc('ds').std(skipna=True)) @@ -1659,7 +1654,6 @@ def spatial_rel_error(self): self._spatial_rel_error = 0 self._max_spatial_rel_error = 0 else: - if z.size > 0: m_t1_denom = np.ma.masked_invalid(t1_denom).compressed() else: @@ -1799,7 +1793,6 @@ def ssim_value(self): ssim_mats_array = [] for this_lev in range(nlevels): - with tempfile.TemporaryDirectory() as tmpdirname: filename_1, filename_2 = ( f'{tmpdirname}/t_ssim1.png', @@ -1908,7 +1901,6 @@ def ssim_value_fp_slow(self): """ if not self._is_memoized('_ssim_value_fp_slow'): - # if this is a 3D variable, we will do each level seperately if self._calcs1._vert_dim_name is not None: vname = self._calcs1._vert_dim_name @@ -1977,13 +1969,11 @@ def ssim_value_fp_slow(self): # go through 2D arrays - each grid point x0, y0 has # a 2D window [x0 - k, x0+k] [y0 - k, y0 + k] for i in range(X): - # don't go over boundaries imin = max(0, i - k) imax = min(X - 1, i + k) for j in range(Y): - if np.isnan(sc_a1[i, j]): # SKIP IF gridpoint is nan ssim_mat[i, j] = np.nan @@ -2079,7 +2069,6 @@ def ssim_value_fp_fast(self): from astropy.convolution import Gaussian2DKernel, convolve, interpolate_replace_nans if not self._is_memoized('_ssim_value_fp_fast'): - # if this is a 3D variable, we will do each level separately if self._calcs1._vert_dim_name is not None: vname = self._calcs1._vert_dim_name diff --git a/ldcpy/comp_checker.py b/ldcpy/comp_checker.py index b9e8dde..c135667 100644 --- a/ldcpy/comp_checker.py +++ b/ldcpy/comp_checker.py @@ -51,7 +51,6 @@ def __init__( comp_mode='p', accept_first=False, ): - self._calc_type = calc_type self._calc_tol = calc_tol self._tol_greater_than = True @@ -73,7 +72,6 @@ def reset_checker(self): # call before doing the next timestep self._opt_level = None def eval_comp_level(self, orig_da, comp_da, comp_level): - dc = lm.Diffcalcs(orig_da, comp_da) val = dc.get_diff_calc(self._calc_type) @@ -166,7 +164,6 @@ def _comp_rules(self, comp_level, level_passed): return new_level def _zfp_rules(self, comp_level, level_passed): - if self._comp_mode == 'p': # precision pmax = 28 pmin = 6 diff --git a/ldcpy/derived_vars.py b/ldcpy/derived_vars.py index 39b0c01..023abc8 100644 --- a/ldcpy/derived_vars.py +++ b/ldcpy/derived_vars.py @@ -10,7 +10,6 @@ def _preprocess(set_labels, list_of_cols): - contU = True num_sets = len(set_labels) @@ -26,7 +25,6 @@ def _preprocess(set_labels, list_of_cols): # top of the model radiation budget def cam_restom(all_col, sets): - col = [] fsnt = all_col['FSNT'] @@ -64,7 +62,6 @@ def cam_restom(all_col, sets): # global precipitation def cam_precip(all_col, sets): - col = [] precc = all_col['PRECC'] @@ -101,7 +98,6 @@ def cam_precip(all_col, sets): # evaporation-precipitation def cam_ep(all_col, sets): - # QFLX is "kg/m2/s or mm/s # PRECC and PRECL are m/s # 1 kg/m2/s = 86400 mm/day. @@ -148,7 +144,6 @@ def cam_ep(all_col, sets): # surface energy balance def cam_ressurf(all_col, sets): - col = [] # all in W/m^2 diff --git a/ldcpy/plot.py b/ldcpy/plot.py index 2b47ede..27511f8 100644 --- a/ldcpy/plot.py +++ b/ldcpy/plot.py @@ -80,7 +80,6 @@ def __init__( cmax=None, cmin=None, ): - self._ds = ds self._cmax = cmax @@ -209,7 +208,6 @@ def get_plot_data(self, raw_data_1, raw_data_2=None): return plot_data def get_title(self, calc_name, c_name=None): - if c_name is not None: das = f'{c_name}' else: @@ -238,7 +236,6 @@ def get_title(self, calc_name, c_name=None): title = f'{title} {self._calc_type}' if self._group_by is not None: - title = f'{title} by {self._group_by}' if self.title_lat is not None: @@ -282,7 +279,6 @@ def update_label(event_axes): return def spatial_plot(self, da_sets, titles, data_type): - if self.vert_plot: nrows = int((da_sets.sets.size)) else: @@ -326,7 +322,6 @@ def spatial_plot(self, da_sets, titles, data_type): central = 300.0 for i in range(da_sets.sets.size): - if self.vert_plot: axs[i] = plt.subplot( nrows, 1, i + 1, projection=ccrs.Robinson(central_longitude=central) @@ -563,7 +558,6 @@ def time_series_plot( if da_sets.size / da_sets.sets.size == 1: tick_interval = 1 if self._group_by == 'time.dayofyear': - group_string = 'dayofyear' xlabel = 'Day of Year' elif self._group_by == 'time.month': @@ -704,7 +698,6 @@ def get_calc_label(self, calc, data, data_type): calc_name = f'{calc}: cutoff {zscore_cutoff[0]:.2e}, % sig: {percent_sig:.2f}' elif calc == 'mean' and self._plot_type == 'spatial' and self._calc_type == 'raw': - if self._weighted: a1_data = ( lm.Datasetcalcs(data, data_type, ['time'], weighted=self._weighted) diff --git a/ldcpy/util.py b/ldcpy/util.py index 984c6b0..0705f15 100644 --- a/ldcpy/util.py +++ b/ldcpy/util.py @@ -526,7 +526,6 @@ def check_metrics( # Pearson less than pcc_tol means fail pcc = diff_calcs.get_diff_calc('pearson_correlation_coefficient') if pcc < pcc_tol: - print(' *FAILED pearson correlation coefficient test...(pcc = {0:.5f}'.format(pcc), ')') num_fail = num_fail + 1 else: @@ -581,7 +580,6 @@ def subset_data( if lon_coord_name is None: lon_coord_name = ds.cf.coordinates['longitude'][0] if lat_coord_name is None: - lat_coord_name = ds.cf.coordinates['latitude'][0] if vertical_dim_name is None: try: @@ -624,19 +622,16 @@ def subset_data( ds_subset = ds_subset.isel({vertical_dim_name: lev}) if latdim == 1: - if lat is not None: ds_subset = ds_subset.sel(**{lat_coord_name: [lat], 'method': 'nearest'}) if lon is not None: ds_subset = ds_subset.sel(**{lon_coord_name: [lon + 180], 'method': 'nearest'}) elif latdim == 2: - # print(ds_subset) if lat is not None: if lon is not None: - # lat is -90 to 90 # lon should be 0- 360 ad_lon = lon @@ -665,7 +660,6 @@ def subset_data( def var_and_wt_coords(varname, ds_col): - ca_coord = ds_col.coords['cell_area'] if dask.is_dask_collection(ca_coord): ca_coord = ca_coord.compute()