From 6546fee383638913ca06de09f8a1c47d0f423e0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C4=81nis=20Gailis?= Date: Tue, 17 Apr 2018 14:46:59 +0200 Subject: [PATCH] ValueError -> ValidationError --- cate/ops/aggregate.py | 38 ++++++++++---------- cate/ops/animate.py | 12 +++---- cate/ops/anomaly.py | 14 ++++---- cate/ops/arithmetics.py | 6 ++-- cate/ops/coregistration.py | 73 +++++++++++++++++++------------------- cate/ops/correlation.py | 68 +++++++++++++++++------------------ cate/ops/index.py | 4 +-- cate/ops/plot.py | 2 +- cate/ops/plot_helpers.py | 6 ++-- 9 files changed, 112 insertions(+), 111 deletions(-) diff --git a/cate/ops/aggregate.py b/cate/ops/aggregate.py index 319ec5c16..3fadb93b0 100644 --- a/cate/ops/aggregate.py +++ b/cate/ops/aggregate.py @@ -36,7 +36,7 @@ from cate.core.op import op, op_input, op_return from cate.ops.select import select_var from cate.util.monitor import Monitor -from cate.core.types import VarNamesLike, DatasetLike +from cate.core.types import VarNamesLike, DatasetLike, ValidationError from cate.ops.normalize import adjust_temporal_attrs @@ -65,21 +65,21 @@ def long_term_average(ds: DatasetLike.TYPE, ds = DatasetLike.convert(ds) # Check if time dtype is what we want if 'datetime64[ns]' != ds.time.dtype: - raise ValueError('Long term average operation expects a dataset with the' - ' time coordinate of type datetime64[ns], but received' - ' {}. Running the normalize operation on this' - ' dataset may help'.format(ds.time.dtype)) + raise ValidationError('Long term average operation expects a dataset with the' + ' time coordinate of type datetime64[ns], but received' + ' {}. Running the normalize operation on this' + ' dataset may help'.format(ds.time.dtype)) # Check if we have a monthly dataset try: if ds.attrs['time_coverage_resolution'] != 'P1M': - raise ValueError('Long term average operation expects a monthly dataset' - ' running temporal aggregation on this dataset' - ' beforehand may help.') + raise ValidationError('Long term average operation expects a monthly dataset' + ' running temporal aggregation on this dataset' + ' beforehand may help.') except KeyError: - raise ValueError('Could not determine temporal resolution. Running' - ' the adjust_temporal_attrs operation beforehand may' - ' help.') + raise ValidationError('Could not determine temporal resolution. Running' + ' the adjust_temporal_attrs operation beforehand may' + ' help.') var = VarNamesLike.convert(var) # Shallow @@ -154,19 +154,19 @@ def temporal_aggregation(ds: DatasetLike.TYPE, ds = DatasetLike.convert(ds) # Check if time dtype is what we want if 'datetime64[ns]' != ds.time.dtype: - raise ValueError('Temporal aggregation operation expects a dataset with the' - ' time coordinate of type datetime64[ns], but received' - ' {}. Running the normalize operation on this' - ' dataset may help'.format(ds.time.dtype)) + raise ValidationError('Temporal aggregation operation expects a dataset with the' + ' time coordinate of type datetime64[ns], but received' + ' {}. Running the normalize operation on this' + ' dataset may help'.format(ds.time.dtype)) # Check if we have a daily dataset try: if ds.attrs['time_coverage_resolution'] != 'P1D': - raise ValueError('Temporal aggregation operation expects a daily dataset') + raise ValidationError('Temporal aggregation operation expects a daily dataset') except KeyError: - raise ValueError('Could not determine temporal resolution. Running' - ' the adjust_temporal_attrs operation beforehand may' - ' help.') + raise ValidationError('Could not determine temporal resolution. Running' + ' the adjust_temporal_attrs operation beforehand may' + ' help.') with monitor.observing("resample dataset"): retset = ds.resample(freq='MS', dim='time', keep_attrs=True, how=method) diff --git a/cate/ops/animate.py b/cate/ops/animate.py index f63cc0782..a917aab22 100644 --- a/cate/ops/animate.py +++ b/cate/ops/animate.py @@ -66,7 +66,7 @@ import numpy as np from cate.core.op import op, op_input -from cate.core.types import VarName, DictLike, PolygonLike, HTML +from cate.core.types import VarName, DictLike, PolygonLike, HTML, ValidationError from cate.util.monitor import Monitor from cate.ops.plot_helpers import (get_var_data, @@ -157,7 +157,7 @@ def animate_map(ds: xr.Dataset, try: var = ds[var_name] except KeyError: - raise ValueError('Provided variable name "{}" does not exist in the given dataset'.format(var_name)) + raise ValidationError('Provided variable name "{}" does not exist in the given dataset'.format(var_name)) indexers = DictLike.convert(indexers) or {} properties = DictLike.convert(plot_properties) or {} @@ -172,8 +172,8 @@ def animate_map(ds: xr.Dataset, if len(ds.lat) < 2 or len(ds.lon) < 2: # Matplotlib can not plot datasets with less than these dimensions with # contourf and pcolormesh methods - raise ValueError('The minimum dataset spatial dimensions to create a map' - ' plot are (2,2)') + raise ValidationError('The minimum dataset spatial dimensions to create a map' + ' plot are (2,2)') # See http://scitools.org.uk/cartopy/docs/v0.15/crs/projections.html# if projection == 'PlateCarree': @@ -197,7 +197,7 @@ def animate_map(ds: xr.Dataset, elif projection == 'SouthPolarStereo': proj = ccrs.SouthPolarStereo(central_longitude=central_lon) else: - raise ValueError('illegal projection: "%s"' % projection) + raise ValidationError('illegal projection: "%s"' % projection) figure = plt.figure(figsize=(8, 4)) ax = plt.axes(projection=proj) @@ -283,7 +283,7 @@ def _get_min_max(data, monitor=None): data_min = data.min() if np.isnan(data_min): # Handle all-NaN dataset - raise ValueError('Can not create an animation of a dataset containing only NaN values.') + raise ValidationError('Can not create an animation of a dataset containing only NaN values.') else: with monitor.child(1).observing("find maximum"): data_max = data.max() diff --git a/cate/ops/anomaly.py b/cate/ops/anomaly.py index e017bf716..38c3c63ee 100644 --- a/cate/ops/anomaly.py +++ b/cate/ops/anomaly.py @@ -34,7 +34,7 @@ from cate.util.monitor import Monitor from cate.ops.subset import subset_spatial, subset_temporal from cate.ops.arithmetics import diff, ds_arithmetics -from cate.core.types import TimeRangeLike, PolygonLike +from cate.core.types import TimeRangeLike, PolygonLike, ValidationError _ALL_FILE_FILTER = dict(name='All Files', extensions=['*']) @@ -71,13 +71,13 @@ def anomaly_external(ds: xr.Dataset, # Check if the time coordinate is of dtype datetime try: if ds.time.dtype != 'datetime64[ns]': - raise ValueError('The dataset provided for anomaly calculation' - ' is required to have a time coordinate of' - ' dtype datetime64[ns]. Running the normalize' - ' operation on this dataset might help.') + raise ValidationError('The dataset provided for anomaly calculation' + ' is required to have a time coordinate of' + ' dtype datetime64[ns]. Running the normalize' + ' operation on this dataset might help.') except AttributeError: - raise ValueError('The dataset provided for anomaly calculation' - ' is required to have a time coordinate.') + raise ValidationError('The dataset provided for anomaly calculation' + ' is required to have a time coordinate.') clim = xr.open_dataset(file) ret = ds.copy() diff --git a/cate/ops/arithmetics.py b/cate/ops/arithmetics.py index 17e2e9f88..6b16872c4 100644 --- a/cate/ops/arithmetics.py +++ b/cate/ops/arithmetics.py @@ -35,7 +35,7 @@ from xarray import ufuncs as xu from cate.core.op import op, op_input, op_return -from cate.core.types import DatasetLike +from cate.core.types import DatasetLike, ValidationError from cate.util.monitor import Monitor from cate.util.safe import safe_exec @@ -95,8 +95,8 @@ def ds_arithmetics(ds: DatasetLike.TYPE, elif item[:] == 'exp': retset = xu.exp(retset) else: - raise ValueError('Arithmetic operation {} not' - ' implemented.'.format(item[0])) + raise ValidationError('Arithmetic operation {} not' + ' implemented.'.format(item[0])) return retset diff --git a/cate/ops/coregistration.py b/cate/ops/coregistration.py index 27d7f1900..6b5376990 100644 --- a/cate/ops/coregistration.py +++ b/cate/ops/coregistration.py @@ -38,6 +38,7 @@ import xarray as xr from cate.core.op import op_input, op, op_return +from cate.core.types import ValidationError from cate.util.monitor import Monitor from cate.ops import resampling @@ -90,46 +91,46 @@ def coregister(ds_master: xr.Dataset, ('master', ds_master['lat'].values, -90), ('master', ds_master['lon'].values, -180)) except KeyError: - raise ValueError('Coregistration requires that both datasets are' - ' spatial datasets with lon and lat dimensions. The' - ' dimensionality of the provided master dataset is: {},' - ' the dimensionality of the provided slave dataset is:' - ' {}. Running the normalize operation might help in' - ' case spatial dimensions have different' - ' names'.format(ds_master.dims, ds_slave.dims)) + raise ValidationError('Coregistration requires that both datasets are' + ' spatial datasets with lon and lat dimensions. The' + ' dimensionality of the provided master dataset is: {},' + ' the dimensionality of the provided slave dataset is:' + ' {}. Running the normalize operation might help in' + ' case spatial dimensions have different' + ' names'.format(ds_master.dims, ds_slave.dims)) # Check if all arrays of the slave dataset have the required dimensionality for key in ds_slave.data_vars: if not _is_valid_array(ds_slave[key]): - raise ValueError('{} data array of slave dataset is not valid for' - ' coregistration. The data array is expected to' - ' have lat and lon dimensions. The data array has' - ' the following dimensions: {}. Consider running' - ' select_var operation to exclude this' - ' data array'.format(key, ds_slave[key].dims)) + raise ValidationError('{} data array of slave dataset is not valid for' + ' coregistration. The data array is expected to' + ' have lat and lon dimensions. The data array has' + ' the following dimensions: {}. Consider running' + ' select_var operation to exclude this' + ' data array'.format(key, ds_slave[key].dims)) # Check if the grids of the provided datasets are equidistant and pixel # registered for array in grids: if not _within_bounds(array[1], array[2]): - raise ValueError('The {} dataset grid does not fall into required' - ' boundaries. Required boundaries are ({}, {}),' - ' dataset boundaries are ({}, {}). Running the' - ' normalize operation' - ' may help.'.format(array[0], - array[2], - abs(array[2]), - array[1][0], - array[1][-1])) + raise ValidationError('The {} dataset grid does not fall into required' + ' boundaries. Required boundaries are ({}, {}),' + ' dataset boundaries are ({}, {}). Running the' + ' normalize operation' + ' may help.'.format(array[0], + array[2], + abs(array[2]), + array[1][0], + array[1][-1])) if not _is_equidistant(array[1]): - raise ValueError('The {} dataset grid is not' - ' equidistant, can not perform' - ' coregistration'.format(array[0])) + raise ValidationError('The {} dataset grid is not' + ' equidistant, can not perform' + ' coregistration'.format(array[0])) if not _is_pixel_registered(array[1], array[2]): - raise ValueError('The {} dataset grid is not' - ' pixel-registered, can not perform' - ' coregistration'.format(array[0])) + raise ValidationError('The {} dataset grid is not' + ' pixel-registered, can not perform' + ' coregistration'.format(array[0])) # Co-register methods_us = {'nearest': 10, 'linear': 11} @@ -334,8 +335,8 @@ def _find_intersection(first: np.ndarray, delta = maximum - minimum if delta < max(first_px_size, second_px_size): - raise ValueError('Could not find a valid intersection to perform' - ' coregistration on') + raise ValidationError('Could not find a valid intersection to perform' + ' coregistration on') # Make sure min/max fall on pixel boundaries for both grids # Because there exists a number N denoting how many smaller pixels fall @@ -348,8 +349,8 @@ def _find_intersection(first: np.ndarray, while (0 != (minimum - global_bounds[0]) % first_px_size and 0 != (minimum - global_bounds[0]) % second_px_size): if i == safety: - raise ValueError('Could not find a valid intersection to perform' - ' coregistration on') + raise ValidationError('Could not find a valid intersection to perform' + ' coregistration on') minimum = minimum + finer i = i + 1 @@ -357,15 +358,15 @@ def _find_intersection(first: np.ndarray, while (0 != (global_bounds[1] - maximum) % first_px_size and 0 != (global_bounds[1] - maximum) % second_px_size): if i == safety: - raise ValueError('Could not find a valid intersection to perform' - ' coregistration on') + raise ValidationError('Could not find a valid intersection to perform' + ' coregistration on') maximum = maximum - finer i = i + 1 # This is possible in some cases when mis-aligned grid arrays are presented if maximum <= minimum: - raise ValueError('Could not find a valid intersection to perform' - ' coregistration on') + raise ValidationError('Could not find a valid intersection to perform' + ' coregistration on') return (minimum, maximum) diff --git a/cate/ops/correlation.py b/cate/ops/correlation.py index d10c80c27..18fc48b9c 100644 --- a/cate/ops/correlation.py +++ b/cate/ops/correlation.py @@ -46,7 +46,7 @@ from scipy.special import betainc from cate.core.op import op, op_input, op_return -from cate.core.types import VarName, DatasetLike +from cate.core.types import VarName, DatasetLike, ValidationError from cate.util.monitor import Monitor from cate.ops.normalize import adjust_spatial_attrs @@ -92,19 +92,19 @@ def pearson_correlation_scalar(ds_x: DatasetLike.TYPE, if ((len(array_x.dims) != len(array_y.dims)) and (len(array_x.dims) != 1)): - raise ValueError('To calculate simple correlation, both provided' - ' datasets should be simple 1d timeseries. To' - ' create a map of correlation coefficients, use' - ' pearson_correlation operation instead.') + raise ValidationError('To calculate simple correlation, both provided' + ' datasets should be simple 1d timeseries. To' + ' create a map of correlation coefficients, use' + ' pearson_correlation operation instead.') if len(array_x['time']) != len(array_y['time']): - raise ValueError('The length of the time dimension differs between' - ' the given datasets. Can not perform the calculation' - ', please review operation documentation.') + raise ValidationError('The length of the time dimension differs between' + ' the given datasets. Can not perform the calculation' + ', please review operation documentation.') if len(array_x['time']) < 3: - raise ValueError('The length of the time dimension should not be less' - ' than three to run the calculation.') + raise ValidationError('The length of the time dimension should not be less' + ' than three to run the calculation.') with monitor.observing("Calculate Pearson correlation"): cc, pv = pearsonr(array_x.values, array_y.values) @@ -166,43 +166,43 @@ def pearson_correlation(ds_x: DatasetLike.TYPE, # Further validate inputs if array_x.dims == array_y.dims: if len(array_x.dims) != 3 or len(array_y.dims) != 3: - raise ValueError('A correlation coefficient map can only be produced' - ' if both provided datasets are 3D datasets with' - ' lon/lat/time dimensionality, or if a combination' - ' of a 3D lon/lat/time dataset and a 1D timeseries' - ' is provided.') + raise ValidationError('A correlation coefficient map can only be produced' + ' if both provided datasets are 3D datasets with' + ' lon/lat/time dimensionality, or if a combination' + ' of a 3D lon/lat/time dataset and a 1D timeseries' + ' is provided.') if array_x.values.shape != array_y.values.shape: - raise ValueError('The provided variables {} and {} do not have the' - ' same shape, Pearson correlation can not be' - ' performed. Please review operation' - ' documentation'.format(var_x, var_y)) + raise ValidationError('The provided variables {} and {} do not have the' + ' same shape, Pearson correlation can not be' + ' performed. Please review operation' + ' documentation'.format(var_x, var_y)) if (not ds_x['lat'].equals(ds_y['lat']) or not ds_x['lon'].equals(ds_y['lon'])): - raise ValueError('When performing a pixel by pixel correlation the' - ' datasets have to have the same lat/lon' - ' definition. Consider running coregistration' - ' first') + raise ValidationError('When performing a pixel by pixel correlation the' + ' datasets have to have the same lat/lon' + ' definition. Consider running coregistration' + ' first') elif (((len(array_x.dims) == 3) and (len(array_y.dims) != 1)) or ((len(array_x.dims) == 1) and (len(array_y.dims) != 3)) or ((len(array_x.dims) != 3) and (len(array_y.dims) == 1)) or ((len(array_x.dims) != 1) and (len(array_y.dims) == 3))): - raise ValueError('A correlation coefficient map can only be produced' - ' if both provided datasets are 3D datasets with' - ' lon/lat/time dimensionality, or if a combination' - ' of a 3D lon/lat/time dataset and a 1D timeseries' - ' is provided.') + raise ValidationError('A correlation coefficient map can only be produced' + ' if both provided datasets are 3D datasets with' + ' lon/lat/time dimensionality, or if a combination' + ' of a 3D lon/lat/time dataset and a 1D timeseries' + ' is provided.') if len(array_x['time']) != len(array_y['time']): - raise ValueError('The length of the time dimension differs between' - ' the given datasets. Can not perform the calculation' - ', please review operation documentation.') + raise ValidationError('The length of the time dimension differs between' + ' the given datasets. Can not perform the calculation' + ', please review operation documentation.') if len(array_x['time']) < 3: - raise ValueError('The length of the time dimension should not be less' - ' than three to run the calculation.') + raise ValidationError('The length of the time dimension should not be less' + ' than three to run the calculation.') # Do pixel by pixel correlation retset = _pearsonr(array_x, array_y, monitor) @@ -213,7 +213,7 @@ def pearson_correlation(ds_x: DatasetLike.TYPE, def _pearsonr(x: xr.DataArray, y: xr.DataArray, monitor: Monitor) -> xr.Dataset: """ - Calculates Pearson correlation coefficients and p-values for testing + Calculate Pearson correlation coefficients and p-values for testing non-correlation of lon/lat/time xarray datasets for each lon/lat point. Heavily influenced by scipy.stats.pearsonr diff --git a/cate/ops/index.py b/cate/ops/index.py index 37cdb45c6..4cc314c74 100644 --- a/cate/ops/index.py +++ b/cate/ops/index.py @@ -36,7 +36,7 @@ from cate.ops.select import select_var from cate.ops.subset import subset_spatial from cate.ops.anomaly import anomaly_external -from cate.core.types import PolygonLike, VarName +from cate.core.types import PolygonLike, VarName, ValidationError from cate.util.monitor import Monitor @@ -112,7 +112,7 @@ def enso(ds: xr.Dataset, 'custom': custom_region} converted_region = PolygonLike.convert(regions[region]) if not converted_region: - raise ValueError('No region has been provided to ENSO index calculation') + raise ValidationError('No region has been provided to ENSO index calculation') name = 'ENSO ' + region + ' Index' if 'custom' == region: diff --git a/cate/ops/plot.py b/cate/ops/plot.py index f2281e116..ab60ccac0 100644 --- a/cate/ops/plot.py +++ b/cate/ops/plot.py @@ -194,7 +194,7 @@ def plot_map(ds: xr.Dataset, elif projection == 'SouthPolarStereo': proj = ccrs.SouthPolarStereo(central_longitude=central_lon) else: - raise ValueError('illegal projection: "%s"' % projection) + raise ValidationError('illegal projection: "%s"' % projection) figure = plt.figure(figsize=(8, 4)) ax = plt.axes(projection=proj) diff --git a/cate/ops/plot_helpers.py b/cate/ops/plot_helpers.py index 49f2745b2..3b491b7e7 100644 --- a/cate/ops/plot_helpers.py +++ b/cate/ops/plot_helpers.py @@ -29,7 +29,7 @@ ========== """ -from cate.core.types import PolygonLike +from cate.core.types import PolygonLike, ValidationError from cate.core.opimpl import get_extents from cate.util.im import ensure_cmaps_loaded @@ -49,8 +49,8 @@ def handle_plot_polygon(region: PolygonLike.TYPE = None): lon_min, lat_min, lon_max, lat_max = extents if not check_bounding_box(lat_min, lat_max, lon_min, lon_max): - raise ValueError('Provided plot extents do not form a valid bounding box ' - 'within [-180.0,+180.0,-90.0,+90.0]') + raise ValidationError('Provided plot extents do not form a valid bounding box ' + 'within [-180.0,+180.0,-90.0,+90.0]') return extents