diff --git a/conda_requirements.txt b/conda_requirements.txt index 596ac0349..33d6469b6 100644 --- a/conda_requirements.txt +++ b/conda_requirements.txt @@ -2,7 +2,10 @@ # this covers py_gnome and the oil_library # This should be the minimal conda install # install with: -# conda install --file conda_requirements.txt +# conda install --file conda_requirements.txt +# +# or create an environmemnt: +# conda create -n gnome --file conda_requirements.txt # These packages would all be in one of the following channels: # defaults # conda-forge @@ -10,7 +13,7 @@ python=2.7.* setuptools>=23.0 -numpy=1.13.* +numpy>=1.13.* scipy>=0.18.* gsw=3.0.3 psutil>=4.3 @@ -20,22 +23,27 @@ six>=1.10 geojson>=1.3 repoze.lru>=0.6 colander>=1.2 -sqlalchemy>=0.7.6 -zope.interface>=4.1 -zope.sqlalchemy>=0.7.6 -gdal=2.1.3 -netCDF4=1.3.1 -awesome-slugify>=1.6 + +tblib>=1.3.* # needed for child process exception handling. + +gdal=2.* +netCDF4 # it should find one compatible with gdal +awesome-slugify>=1.6.5 regex>=2014.12 unidecode>=0.04.19 pyshp=1.2.10 gridded=0.0.9 # NOAA maintained packages -unit_conversion=2.5.* +unit_conversion=2.6.* cell_tree2d>=0.3.* py_gd=0.1.* # libgd should get brought in automatically +# needed for OilLibrary +SQLAlchemy>=1.0.13 +zope.sqlalchemy>=0.7.7 +zope.interface>=4.1 + # required for building cython=0.24.* diff --git a/py_gnome/documentation/conf.py b/py_gnome/documentation/conf.py index ef752b6e3..543aefb99 100644 --- a/py_gnome/documentation/conf.py +++ b/py_gnome/documentation/conf.py @@ -32,7 +32,8 @@ 'sphinx.ext.coverage', # 'sphinx.ext.imgmath', 'sphinx.ext.mathjax', - 'sphinx.ext.viewcode' + 'sphinx.ext.viewcode', + 'sphinx.ext.autosectionlabel', ] # to make autodoc include __init__: diff --git a/py_gnome/documentation/scripting/outputters.rst b/py_gnome/documentation/scripting/outputters.rst index dcb8cd1f5..e3b0dcbd2 100644 --- a/py_gnome/documentation/scripting/outputters.rst +++ b/py_gnome/documentation/scripting/outputters.rst @@ -89,6 +89,7 @@ KMZ Output To save particle information into a KMZ file that can be read by Google Earth (and other applications), we use the KMZ outputter:: + from gnome.outputters import KMZOutput model.outputters += KMZOutput('gnome_results.kmz', output_timestep=timedelta(hours=6)) @@ -96,4 +97,46 @@ The KMZ contains a kml file with layers for each output timestep, unceratain and See :class:`gnome.outputters.KMZOutput` for the full documentation +Shapefile Output +---------------- + +Weathering Data Output +---------------------- + +Bulk oil budget properties (e.g. percent of total oil volume evaporated) are computed and stored in addition to the individual particle +data. These data are available through a specialized Outputter named WeatheringOutput. To save this information to a file:: + + from gnome.outputters import WeatheringOutput + model.outputters += WeatheringOutput('MyOutputDir') + +Alternatively, if you want to view specific weathering information during the model run:: + + from gnome.outputters import WeatheringOutput + model.outputters += WeatheringOutput() + + for step in model: + print "Percent evaporated is:" + print step['WeatheringOutput']['evaporated']/step['WeatheringOutput']['amount_released'] * 100 + + +Note: if you are running the model with a conservative or non-weathering substance, this will result in an +error as the WeatheringOutput will not contain any evaporation data. Depending on how you have set +up your model (spill substance, weatherers), WeatheringOutput may contain any or all of: + + * amount_released + * avg_density + * avg_viscosity + * beached + * dissolution + * evaporated + * floating + * natural_dispersion + * non_weathering + * off_maps + * sedimentation + * time_stamp + * water_content + + + diff --git a/py_gnome/documentation/scripting/scripting_intro.rst b/py_gnome/documentation/scripting/scripting_intro.rst index 42ab1c090..b0f981c6e 100644 --- a/py_gnome/documentation/scripting/scripting_intro.rst +++ b/py_gnome/documentation/scripting/scripting_intro.rst @@ -72,14 +72,41 @@ be the same as those specified when we created the map object. The default is to renderer = Renderer(output_timestep=timedelta(hours=6),map_BB=((-145,48), (-145,49), (-143,49), (-143,48))) model.outputters += renderer + +Step through the model and view data +------------------------------------ -Run the model -------------- -The model can be run by stepping through individual timesteps (e.g. if we want to see results along the way) or we -can do a full run:: +Once the model is all set up, we are ready to run the simulation. Sometimes we want to do this iteratively step-by-step to view data +along the way without outputting to a file. There are some helper utilities to extract data associated with the particles. This data +includes properties such as mass, age, and position; or weathering information such as the mass of oil evaporated (if the simulation has +specified an oil type rather than a conservative substance as in this example). + +For example, if we want to extract the particle positions as a function of time, we can use the :func:`gnome.model.get_spill_property` +convenience function, as shown below:: + + x=[] + y=[] + for step in model: + positions = model.get_spill_property('positions') + x.append(positions[:,0]) + y.append(positions[:,1]) + +To see a list of properties associated with particles use:: + + model.list_spill_properties() + +Note, this list may increase after the first time step as arrays are initialized. + +Run the model to completion +--------------------------- +Alternatively, to just run the model for the entire duration use:: model.full_run() + +Results will be written to files based on the outputters added to the model. + + View the results ---------------- The renderer that we added generated png images every 6 hours. Since we did not specify an output directory for these images, diff --git a/py_gnome/documentation/scripting/weatherers.rst b/py_gnome/documentation/scripting/weatherers.rst index 5fb86cf16..80758ec72 100644 --- a/py_gnome/documentation/scripting/weatherers.rst +++ b/py_gnome/documentation/scripting/weatherers.rst @@ -74,4 +74,27 @@ explicitly required but is needed by the Waves object. Adding on to our example waves = Waves(wind) water = Water(temperature=300.0, salinity=35.0) #temperature in Kelvin, salinity in psu model.weatherers += Evaporation(wind=wind,water=water) - model.weatherers += NaturalDispersion() + model.weatherers += NaturalDispersion + + +Dissolution +----------- + +Emulsification +-------------- + +Biodegradation +-------------- + +Viewing Bulk Weathering Data +---------------------------- + +Since the total oil volume spilled is divided among multiple particles, bulk oil budget properties +(e.g. percent of oil volume evaporated) are computed and stored in addition to the individual particle +data. These data are available through a specialized Outputter named WeatheringOutput, +see :ref:`Weathering Data Output` + + + + + diff --git a/py_gnome/gnome/__init__.py b/py_gnome/gnome/__init__.py index d02bbbf63..69b26abca 100644 --- a/py_gnome/gnome/__init__.py +++ b/py_gnome/gnome/__init__.py @@ -1,6 +1,5 @@ """ -__init__.py for the gnome package - + __init__.py for the gnome package """ from itertools import chain @@ -18,7 +17,7 @@ # from gnomeobject import init_obj_log # using a PEP 404 compliant version name -__version__ = '0.6.0' +__version__ = '0.6.1' # a few imports so that the basic stuff is there @@ -35,78 +34,32 @@ def check_dependency_versions(): a warning is displayed """ libs = [('gridded', '0.0.9'), - ('oil_library', '1.0.6'), - ('unit_conversion', '2.5.5')] + ('oil_library', '1.0.7'), + ('unit_conversion', '2.6.0')] for name, version in libs: # import the lib: try: module = importlib.import_module(name) except ImportError: - print ("ERROR: The {} package, version >= {} needs to be installed". - format(name, version)) + print ('ERROR: The {} package, version >= {} needs to be installed' + .format(name, version)) sys.exit(1) if module.__version__ < version: - w = ('Version {0} of {1} package is reported, but actual version in module is {2}'. - format(version, name, module.__version__)) + w = ('Version {0} of {1} package is reported, ' + 'but actual version in module is {2}' + .format(version, name, module.__version__)) warnings.warn(w) -# ## maybe too complex that required... -# def check_dependency_versions(): -# ''' -# Checks the versions of the following libraries: -# gridded -# oillibrary -# unit_conversion -# If the version is not at least as current as what's in the conda_requirements file, -# a warning is displayed -# ''' -# def get_version(package): -# package = package.lower() -# return next((p.version for p in pkg_resources.working_set -# if p.project_name.lower() == package), "No match") - -# libs = [('gridded', '>=', '0.0.9'), -# ('oil-library', '>=', '1.0.6'), -# ('unit-conversion', '>=', '2.5.5')] -# # condafiledir = os.path.relpath(__file__).split(__file__.split('\\')[-3])[0] -# # condafile = os.path.join(condafiledir, 'conda_requirements.txt') -# # with open(condafile, 'r') as conda_reqs: -# # for line in conda_reqs.readlines(): -# for req in libs: -# criteria = None -# req_name, cmp_str, reqd_ver = req -# if '>' in cmp_str: -# criteria = (lambda a, b: a >= b) if '=' in cmp_str else (lambda a, b: a > b) -# elif '<' in cmp_str: -# criteria = (lambda a, b: a <= b) if '=' in cmp_str else (lambda a, b: a < b) -# else: -# criteria = (lambda a, b: a == b) -# inst_ver = get_version(req_name) - -# try: -# module_ver = importlib.import_module(req_name.replace('-', '_')).__version__ -# except ImportError: -# print "ERROR: The {} package, version {} {} needs to be installed".format(*req) -# sys.exit(1) - -# if not criteria(inst_ver, reqd_ver): -# if criteria(module_ver, reqd_ver): -# w = 'Version {0} of {1} package is reported, but actual version in module is {2}'.format(inst_ver, req_name, module_ver) -# warnings.warn(w) -# else: -# w = 'Version {0} of {1} package is installed in environment, {2}{3} required'.format(inst_ver, req_name, cmp_str, reqd_ver) -# warnings.warn(w) - - def initialize_log(config, logfile=None): ''' helper function to initialize a log - done by the application using PyGnome config can be a file containing json or it can be a Python dict :param config: logging configuration as a json file or config dict - it needs to be in the dict config format used by ``logging.dictConfig``: + it needs to be in the dict config format used by + ``logging.dictConfig``: https://docs.python.org/2/library/logging.config.html#logging-config-dictschema :param logfile=None: optional name of file to log to @@ -159,12 +112,12 @@ def _valid_units(unit_name): # we have a sort of chicken-egg situation here. The above functions need # to be defined before we can import these modules. check_dependency_versions() + from . import (map, - environment, - model, -# multi_model_broadcast, - spill_container, - spill, - movers, - outputters -) + environment, + model, + # multi_model_broadcast, + spill_container, + spill, + movers, + outputters) diff --git a/py_gnome/gnome/basic_types.py b/py_gnome/gnome/basic_types.py index 447bddee3..011877534 100644 --- a/py_gnome/gnome/basic_types.py +++ b/py_gnome/gnome/basic_types.py @@ -16,6 +16,14 @@ # pull everything from the cython code from cy_gnome.cy_basic_types import * + +# in lib_gnome, the coordinate systems used (r-theta, uv, etc) +# are called ts_format, which is not a very descriptive name. +# the word 'format' can mean a lot of different things depending on +# what we are talking about. So we try to be a bit more specific here. +coord_systems = ts_format + + # Here we customize what a numpy 'long' type is.... # We do this because numpy does different things with a long # that can mismatch what Cython does with the numpy ctypes. @@ -58,7 +66,7 @@ # Define enums that are independent of C++ here so we # don't have to recompile code -wind_datasource = enum(undefined=0, file=1, manual=2, nws=3, buoy=4) +wind_datasources = enum(undefined=0, file=1, manual=2, nws=3, buoy=4) # Define an enum for weathering status. The numpy array will contain np.uint8 # datatype. Can still define 2 more flags as 2**6, 2**7 diff --git a/py_gnome/gnome/cy_gnome/cy_basic_types.pyx b/py_gnome/gnome/cy_gnome/cy_basic_types.pyx index dce2d4e94..c19eec287 100644 --- a/py_gnome/gnome/cy_gnome/cy_basic_types.pyx +++ b/py_gnome/gnome/cy_gnome/cy_basic_types.pyx @@ -14,7 +14,6 @@ def enum(**enums): x.a = 1, x.b = 2, x.c = 3 x._attr = ['a','b','c'], x._int = [ 1, 2, 3] - Just found a clever way to do enums in python - Returns a new type called Enum whose attributes are given by the input in 'enums' - also append two more attributes called: @@ -29,6 +28,7 @@ def enum(**enums): return type('Enum', (), enums) + """ LE Status as an enum type """ diff --git a/py_gnome/gnome/environment/environment.py b/py_gnome/gnome/environment/environment.py index fe298ad87..49d5140f0 100644 --- a/py_gnome/gnome/environment/environment.py +++ b/py_gnome/gnome/environment/environment.py @@ -74,13 +74,17 @@ def prepare_for_model_step(self, model_time): """ pass - def get_wind_speed(self, points, model_time, format='r', fill_value=1.0): + def get_wind_speed(self, points, model_time, + coord_sys='r', fill_value=1.0): ''' Wrapper for the weatherers so they can extrapolate ''' -# new_model_time = self.check_time(wind, model_time) - retval = self.wind.at(points, model_time, format=format) - return retval.filled(fill_value) if isinstance(retval, np.ma.MaskedArray) else retval + retval = self.wind.at(points, model_time, coord_sys=coord_sys) + + if isinstance(retval, np.ma.MaskedArray): + return retval.filled(fill_value) + else: + return retval def check_time(self, wind, model_time): """ @@ -218,13 +222,7 @@ def __init__(self, sediment=.005, # kg/m^3 oceanic default wave_height=None, fetch=None, - units={'temperature': 'K', - 'salinity': 'psu', - 'sediment': 'kg/m^3', # do we need SI here? - 'wave_height': 'm', - 'fetch': 'm', - 'density': 'kg/m^3', - 'kinematic_viscosity': 'm^2/s'}, + units=None, name='Water'): ''' Assume units are SI for all properties. 'units' attribute assumes SI @@ -239,8 +237,11 @@ def __init__(self, self.fetch = fetch self.kinematic_viscosity = 0.000001 self.name = name - self._units = dict(self._si_units) - self.units = units + + self.units = self._si_units + if units is not None: + # self.units is a property, so this is non-destructive + self.units = units def __repr__(self): info = ("{0.__class__.__module__}.{0.__class__.__name__}" @@ -327,10 +328,16 @@ def update_from_dict(self, data): @property def units(self): + if not hasattr(self, '_units'): + self._units = {} + return self._units @units.setter def units(self, u_dict): + if not hasattr(self, '_units'): + self._units = {} + for prop, unit in u_dict.iteritems(): if prop in self._units_type: if unit not in self._units_type[prop][1]: @@ -415,7 +422,10 @@ def attempt_from_netCDF(cls, **klskwargs): grid = PyGrid.from_netCDF(filename=filename, dataset=dg, **kwargs) kwargs['grid'] = grid - scs = copy.copy(Environment._subclasses) if _cls_list is None else _cls_list + if _cls_list is None: + scs = copy.copy(Environment._subclasses) + else: + scs = _cls_list for c in scs: if (issubclass(c, (Variable, VectorVariable)) and @@ -477,7 +487,7 @@ def ice_env_from_netCDF(filename=None, **kwargs): def get_file_analysis(filename): env = env_from_netCDF(filename=filename) - classes = copy.copy(Environment._subclasses) + # classes = copy.copy(Environment._subclasses) if len(env) > 0: report = ['Can create {0} types of environment objects' diff --git a/py_gnome/gnome/environment/environment_objects.py b/py_gnome/gnome/environment/environment_objects.py index 8cdc15cc5..2f0dce1f0 100644 --- a/py_gnome/gnome/environment/environment_objects.py +++ b/py_gnome/gnome/environment/environment_objects.py @@ -13,9 +13,6 @@ from gnome.environment.ts_property import TSVectorProp, TimeSeriesProp from gnome.environment.gridded_objects_base import (Time, - Depth, - Grid_U, - Grid_S, Variable, VectorVariable, VariableSchema, @@ -37,6 +34,7 @@ def __init__(self, if ds is None: if data_file is None: data_file = bathymetry.data_file + if data_file is None: raise ValueError('Need data_file or dataset ' 'containing sigma equation terms') @@ -147,8 +145,10 @@ def interpolation_alphas(self, points, data_shape, _hash=None): if ulev == 0: und_alph[within_layer] = -2 else: - a = ((pts[:, 2].take(within_layer) - blev_depths.take(within_layer)) / - (ulev_depths.take(within_layer) - blev_depths.take(within_layer))) + a = ((pts[:, 2].take(within_layer) - + blev_depths.take(within_layer)) / + (ulev_depths.take(within_layer) - + blev_depths.take(within_layer))) und_alph[within_layer] = a blev_depths = ulev_depths @@ -368,11 +368,8 @@ def constant_wind(cls, class TemperatureTS(TimeSeriesProp, Environment): - def __init__(self, - name=None, - units='K', - time=None, - data=None, + def __init__(self, name=None, units='K', + time=None, data=None, **kwargs): if 'timeseries' in kwargs: ts = kwargs['timeseries'] @@ -457,13 +454,13 @@ class IceConcentration(Variable, Environment): def __init__(self, *args, **kwargs): super(IceConcentration, self).__init__(*args, **kwargs) -# def __eq__(self, o): -# t1 = (self.name == o.name and -# self.units == o.units and -# self.time == o.time and -# self.varname == o.varname) -# t2 = self.data == o.data -# return t1 and t2 + # def __eq__(self, o): + # t1 = (self.name == o.name and + # self.units == o.units and + # self.time == o.time and + # self.varname == o.varname) + # t2 = self.data == o.data + # return t1 and t2 class Bathymetry(Variable): @@ -559,24 +556,31 @@ def __init__(self, wet_dry_mask=None, *args, **kwargs): self.wet_dry_mask = wet_dry_mask if self.units is None: - self.units='m/s' + self.units = 'm/s' - def at(self, points, time, units=None, extrapolate=False, format='uv', _auto_align=True, **kwargs): + def at(self, points, time, units=None, extrapolate=False, + coord_sys='uv', _auto_align=True, **kwargs): ''' Find the value of the property at positions P at time T :param points: Coordinates to be queried (P) - :param time: The time at which to query these points (T) - :param depth: Specifies the depth level of the variable - :param units: units the values will be returned in (or converted to) - :param extrapolate: if True, extrapolation will be supported - :param format: String describing the data and organization. :type points: Nx2 array of double + + :param time: The time at which to query these points (T) :type time: datetime.datetime object + + :param depth: Specifies the depth level of the variable :type depth: integer + + :param units: units the values will be returned in (or converted to) :type units: string such as ('m/s', 'knots', etc) + + :param extrapolate: if True, extrapolation will be supported :type extrapolate: boolean (True or False) - :type format: string, one of ('uv','u','v','r-theta','r','theta') + + :param coord_sys: String describing the coordinate system to be used. + :type coord_sys: string, one of ('uv','u','v','r-theta','r','theta') + :return: returns a Nx2 array of interpolated values :rtype: double ''' @@ -598,42 +602,57 @@ def at(self, points, time, units=None, extrapolate=False, format='uv', _auto_ali if res is not None: value = res if _auto_align: - value = gridded.utilities._align_results_to_spatial_data(value, points) + value = (gridded.utilities + ._align_results_to_spatial_data(value, points)) return value if value is None: - value = super(GridWind, self).at(pts, time, units, extrapolate=extrapolate, _auto_align=False, **kwargs) + value = super(GridWind, self).at(pts, time, units, + extrapolate=extrapolate, + _auto_align=False, **kwargs) if has_depth: value[pts[:, 2] > 0.0] = 0 # no wind underwater! if self.angle is not None: - angs = self.angle.at(pts, time, extrapolate=extrapolate, _auto_align=False, **kwargs).reshape(-1) + angs = (self.angle + .at(pts, time, + extrapolate=extrapolate, _auto_align=False, + **kwargs) + .reshape(-1)) + x = value[:, 0] * np.cos(angs) - value[:, 1] * np.sin(angs) y = value[:, 0] * np.sin(angs) + value[:, 1] * np.cos(angs) + value[:, 0] = x value[:, 1] = y - if format == 'u': - value = value[:,0] - elif format == 'v': - value = value[:,1] - elif format in ('r-theta', 'r', 'theta'): - _mag = np.sqrt(value[:,0]**2 + value[:,1]**2) - _dir = np.arctan2(value[:,1], value[:,0]) * 180./np.pi - if format == 'r': + if coord_sys == 'u': + value = value[:, 0] + elif coord_sys == 'v': + value = value[:, 1] + elif coord_sys in ('r-theta', 'r', 'theta'): + _mag = np.sqrt(value[:, 0] ** 2 + value[:, 1] ** 2) + _dir = np.arctan2(value[:, 1], value[:, 0]) * 180. / np.pi + + if coord_sys == 'r': value = _mag - elif format == 'theta': + elif coord_sys == 'theta': value = _dir else: value = np.column_stack((_mag, _dir)) + if _auto_align: - value = gridded.utilities._align_results_to_spatial_data(value, points) + value = gridded.utilities._align_results_to_spatial_data(value, + points) if mem: - self._memoize_result(pts, time, value, self._result_memo, _hash=_hash) + self._memoize_result(pts, time, value, self._result_memo, + _hash=_hash) + return value def get_start_time(self): return self.time.min_time + def get_end_time(self): return self.time.max_time diff --git a/py_gnome/gnome/environment/gridded_objects_base.py b/py_gnome/gnome/environment/gridded_objects_base.py index 711204543..4c91137a3 100644 --- a/py_gnome/gnome/environment/gridded_objects_base.py +++ b/py_gnome/gnome/environment/gridded_objects_base.py @@ -180,6 +180,11 @@ class Grid_S(gridded.grids.Grid_S, serializable.Serializable): _state.add_field([serializable.Field('filename', save=True, update=True, isdatafile=True)]) + '''hack to avoid problems when registering object in webgnome''' + @property + def non_grid_variables(self): + return None + def draw_to_plot(self, ax, features=None, style=None): def_style = {'node': {'color': 'green', 'linestyle': 'dashed', @@ -309,17 +314,80 @@ def from_netCDF(*args, **kwargs): return gridded.grids.Grid.from_netCDF(*args, **kwargs) + @staticmethod + def new_from_dict(dict_): + return PyGrid.from_netCDF(**dict_) + @staticmethod def _get_grid_type(*args, **kwargs): kwargs['_default_types'] = (('ugrid', Grid_U), ('sgrid', Grid_S), ('rgrid', Grid_R)) return gridded.grids.Grid._get_grid_type(*args, **kwargs) +class Depth(gridded.depth.Depth): + @staticmethod + def from_netCDF(*args, **kwargs): + kwargs['_default_types'] = (('level', L_Depth), ('sigma', S_Depth), ('surface', DepthBase)) + + return gridded.depth.Depth.from_netCDF(*args, **kwargs) + + @staticmethod + def _get_depth_type(*args, **kwargs): + kwargs['_default_types'] = (('level', L_Depth), ('sigma', S_Depth), ('surface', DepthBase)) + + return gridded.depth.Depth._get_depth_type(*args, **kwargs) + + +class Variable(gridded.Variable, serializable.Serializable): + _state = copy.deepcopy(serializable.Serializable._state) + _schema = VariableSchema + _state.add_field([serializable.Field('units', save=True, update=True), + serializable.Field('time', save=True, update=True, + save_reference=True), + serializable.Field('grid', update=True, read=True, + save_reference=True), + serializable.Field('varname', save=True, update=True), + serializable.Field('data_file', save=True, update=True, + isdatafile=True), + serializable.Field('grid_file', save=True, update=True, + isdatafile=True)]) + + default_names = [] + cf_names = [] + + _default_component_types = copy.deepcopy(gridded.Variable + ._default_component_types) + _default_component_types.update({'time': Time, + 'grid': PyGrid, + 'depth': Depth}) + + def __init__(self, extrapolate=False, *args, **kwargs): + self.extrapolate = extrapolate + super(Variable, self).__init__(*args, **kwargs) + + def at(self, *args, **kwargs): + if ('extrapolate' not in kwargs): + kwargs['extrapolate'] = self.extrapolate + return super(Variable, self).at(*args, **kwargs) + + @classmethod + def new_from_dict(cls, dict_): + if 'data' not in dict_: + return cls.from_netCDF(**dict_) + + return super(Variable, cls).new_from_dict(dict_) + + class DepthBase(gridded.depth.DepthBase): _state = copy.deepcopy(serializable.Serializable._state) _schema = DepthSchema _state.add_field([serializable.Field('filename', save=True, update=True, isdatafile=True)]) + _default_component_types = copy.deepcopy(gridded.depth.DepthBase + ._default_component_types) + _default_component_types.update({'time': Time, + 'grid': PyGrid, + 'variable': Variable}) @classmethod def new_from_dict(cls, dict_): dict_.pop('json_') @@ -331,11 +399,17 @@ def new_from_dict(cls, dict_): rv.__class__._def_count -= 1 return rv + class L_Depth(gridded.depth.L_Depth): _state = copy.deepcopy(serializable.Serializable._state) _schema = DepthSchema _state.add_field([serializable.Field('filename', save=True, update=True, isdatafile=True)]) + _default_component_types = copy.deepcopy(gridded.depth.L_Depth + ._default_component_types) + _default_component_types.update({'time': Time, + 'grid': PyGrid, + 'variable': Variable}) @classmethod def new_from_dict(cls, dict_): dict_.pop('json_') @@ -347,11 +421,17 @@ def new_from_dict(cls, dict_): rv.__class__._def_count -= 1 return rv + class S_Depth(gridded.depth.S_Depth): _state = copy.deepcopy(serializable.Serializable._state) _schema = DepthSchema _state.add_field([serializable.Field('filename', save=True, update=True, isdatafile=True)]) + _default_component_types = copy.deepcopy(gridded.depth.S_Depth + ._default_component_types) + _default_component_types.update({'time': Time, + 'grid': PyGrid, + 'variable': Variable}) @classmethod def new_from_dict(cls, dict_): dict_.pop('json_') @@ -363,50 +443,6 @@ def new_from_dict(cls, dict_): rv.__class__._def_count -= 1 return rv -class Depth(gridded.depth.Depth): - @staticmethod - def from_netCDF(*args, **kwargs): - kwargs['_default_types'] = (('level', L_Depth), ('sigma', S_Depth), ('surface', DepthBase)) - - return gridded.depth.Depth.from_netCDF(*args, **kwargs) - - @staticmethod - def _get_depth_type(*args, **kwargs): - kwargs['_default_types'] = (('level', L_Depth), ('sigma', S_Depth), ('surface', DepthBase)) - - return gridded.depth.Depth._get_depth_type(*args, **kwargs) - - -class Variable(gridded.Variable, serializable.Serializable): - _state = copy.deepcopy(serializable.Serializable._state) - _schema = VariableSchema - _state.add_field([serializable.Field('units', save=True, update=True), - serializable.Field('time', save=True, update=True, - save_reference=True), - serializable.Field('grid', save=True, update=True, - save_reference=True), - serializable.Field('varname', save=True, update=True), - serializable.Field('data_file', save=True, update=True, - isdatafile=True), - serializable.Field('grid_file', save=True, update=True, - isdatafile=True)]) - - default_names = [] - cf_names = [] - - _default_component_types = copy.deepcopy(gridded.Variable - ._default_component_types) - _default_component_types.update({'time': Time, - 'grid': PyGrid, - 'depth': Depth}) - - @classmethod - def new_from_dict(cls, dict_): - if 'data' not in dict_: - return cls.from_netCDF(**dict_) - - return super(Variable, cls).new_from_dict(dict_) - class VectorVariable(gridded.VectorVariable, serializable.Serializable): @@ -415,7 +451,7 @@ class VectorVariable(gridded.VectorVariable, serializable.Serializable): _state.add_field([serializable.Field('units', save=True, update=True), serializable.Field('time', save=True, update=True, save_reference=True), - serializable.Field('grid', save=True, update=True, + serializable.Field('grid', update=True, read=True, save_reference=True), serializable.Field('variables', save=True, update=True, read=True, iscollection=True), diff --git a/py_gnome/gnome/environment/waves.py b/py_gnome/gnome/environment/waves.py index 53cdb9a8d..f8e0e7632 100644 --- a/py_gnome/gnome/environment/waves.py +++ b/py_gnome/gnome/environment/waves.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - """ The waves environment object. @@ -8,7 +7,6 @@ Uses the same approach as ADIOS 2 (code ported from old MATLAB prototype code) - """ from __future__ import division @@ -102,9 +100,11 @@ def get_value(self, points, time): wave_height = self.water.wave_height if wave_height is None: - U = self.get_wind_speed(points, time, format='r') # only need velocity + # only need velocity + U = self.get_wind_speed(points, time, coord_sys='r') H = self.compute_H(U) - else: # user specified a wave height + else: + # user specified a wave height H = wave_height U = self.pseudo_wind(H) @@ -115,7 +115,6 @@ def get_value(self, points, time): return H, T, Wf, De - def get_emulsification_wind(self, points, time): """ Return the right wind for the wave climate diff --git a/py_gnome/gnome/environment/wind.py b/py_gnome/gnome/environment/wind.py index 84d38f581..c0bdd69bc 100644 --- a/py_gnome/gnome/environment/wind.py +++ b/py_gnome/gnome/environment/wind.py @@ -16,7 +16,11 @@ import unit_conversion as uc -from gnome import basic_types +from gnome.basic_types import datetime_value_2d +from gnome.basic_types import coord_systems +from gnome.basic_types import wind_datasources + +from gnome.cy_gnome.cy_ossm_time import ossm_wind_units from gnome.utilities import serializable from gnome.utilities.time_utils import sec_to_datetime @@ -24,8 +28,6 @@ from gnome.utilities.inf_datetime import InfDateTime from gnome.utilities.distributions import RayleighDistribution as rayleigh -from gnome.cy_gnome.cy_ossm_time import ossm_wind_units - from gnome.persist.extend_colander import (DefaultTupleSchema, LocalDateTime, DatetimeValue2dArraySchema) @@ -92,7 +94,7 @@ class WindSchema(base_schema.ObjType): longitude = SchemaNode(Float(), missing=drop) source_id = SchemaNode(String(), missing=drop) source_type = SchemaNode(String(), - validator=OneOf(basic_types.wind_datasource._attr), + validator=OneOf(wind_datasources._attr), default='undefined', missing='undefined') units = SchemaNode(String(), default='m/s') speed_uncertainty_scale = SchemaNode(Float(), missing=drop) @@ -144,7 +146,7 @@ def __init__(self, timeseries=None, units=None, filename=None, - format='r-theta', + coord_sys='r-theta', latitude=None, longitude=None, speed_uncertainty_scale=0.0, @@ -164,7 +166,7 @@ def __init__(self, if filename is not None: self.source_type = kwargs.pop('source_type', 'file') - super(Wind, self).__init__(filename=filename, format=format) + super(Wind, self).__init__(filename=filename, coord_sys=coord_sys) self.name = kwargs.pop('name', os.path.split(self.filename)[1]) # set _user_units attribute to match user_units read from file. @@ -173,14 +175,14 @@ def __init__(self, if units is not None: self.units = units else: - if kwargs.get('source_type') in basic_types.wind_datasource._attr: + if kwargs.get('source_type') in wind_datasources._attr: self.source_type = kwargs.pop('source_type') else: self.source_type = 'undefined' # either timeseries is given or nothing is given # create an empty default object - super(Wind, self).__init__(format=format) + super(Wind, self).__init__(coord_sys=coord_sys) self.units = 'mps' # units for default object @@ -188,7 +190,7 @@ def __init__(self, if units is None: raise TypeError('Units must be provided with timeseries') - self.set_wind_data(timeseries, units, format) + self.set_wind_data(timeseries, units, coord_sys) self.name = kwargs.pop('name', self.__class__.__name__) @@ -213,8 +215,8 @@ def __repr__(self): @property def timeseries(self): ''' - returns entire timeseries in 'r-theta' format in the units in which - the data was entered or as specified by units attribute + returns entire timeseries in 'r-theta' coordinate system in the units + in which the data was entered or as specified by units attribute ''' return self.get_wind_data(units=self.units) @@ -265,7 +267,7 @@ def timeseries_to_dict(self): @property def units(self): ''' - define units in which wind data is input/output + define units in which wind data is input/output ''' return self._user_units @@ -282,7 +284,7 @@ def units(self, value): self._check_units(value) self._user_units = value - def _convert_units(self, data, ts_format, from_unit, to_unit): + def _convert_units(self, data, coord_sys, from_unit, to_unit): ''' method to convert units for the 'value' stored in the date/time value pair @@ -290,8 +292,7 @@ def _convert_units(self, data, ts_format, from_unit, to_unit): if from_unit != to_unit: data[:, 0] = uc.convert('Velocity', from_unit, to_unit, data[:, 0]) - if ts_format == basic_types.ts_format.uv: - # TODO: avoid clobbering the 'ts_format' namespace + if coord_sys == coord_systems.uv: data[:, 1] = uc.convert('Velocity', from_unit, to_unit, data[:, 1]) @@ -376,26 +377,29 @@ def update_from_dict(self, data): else: return updated - def get_wind_data(self, datetime=None, units=None, format='r-theta'): + def get_wind_data(self, datetime=None, units=None, coord_sys='r-theta'): """ - Returns the timeseries in the requested format. If datetime=None, - then the original timeseries that was entered is returned. + Returns the timeseries in the requested coordinate system. + If datetime=None, then the original timeseries that was entered is + returned. If datetime is a list containing datetime objects, then the value for each of those date times is determined by the underlying C++ object and the timeseries is returned. - The output format is defined by the strings 'r-theta', 'uv' + The output coordinate system is defined by the strings 'r-theta', 'uv' :param datetime: [optional] datetime object or list of datetime objects for which the value is desired :type datetime: datetime object + :param units: [optional] outputs data in these units. Default is to output data without unit conversion :type units: string. Uses the unit_conversion module. - :param format: output format for the times series: - either 'r-theta' or 'uv' - :type format: either string or integer value defined by - basic_types.ts_format.* (see cy_basic_types.pyx) + + :param coord_sys: output coordinate system for the times series: + either 'r-theta' or 'uv' + :type coord_sys: either string or integer value defined by + basic_types.ts_format.* (see cy_basic_types.pyx) :returns: numpy array containing dtype=basic_types.datetime_value_2d. Contains user specified datetime and the corresponding @@ -407,20 +411,20 @@ def get_wind_data(self, datetime=None, units=None, format='r-theta'): todo: return data in appropriate significant digits """ - datetimeval = super(Wind, self).get_timeseries(datetime, format) + datetimeval = super(Wind, self).get_timeseries(datetime, coord_sys) units = (units, self._user_units)[units is None] datetimeval['value'] = self._convert_units(datetimeval['value'], - format, + coord_sys, 'meter per second', units) return datetimeval - def set_wind_data(self, wind_data, units, format='r-theta'): + def set_wind_data(self, wind_data, units, coord_sys='r-theta'): """ Sets the timeseries of the Wind object to the new value given by - a numpy array. The format for the input data defaults to + a numpy array. The coordinate system for the input data defaults to basic_types.format.magnitude_direction but can be changed by the user. Units are also required with the data. @@ -428,12 +432,14 @@ def set_wind_data(self, wind_data, units, format='r-theta'): numpy array :type datetime_value_2d: numpy array of dtype basic_types.datetime_value_2d + :param units: units associated with the data. Valid units defined in Wind.valid_vel_units list - :param format: output format for the times series; as defined by - basic_types.format. - :type format: either string or integer value defined by - basic_types.format.* (see cy_basic_types.pyx) + + :param coord_sys: output coordinate system for the times series, + as defined by basic_types.format. + :type coord_sys: either string or integer value defined by + basic_types.format.* (see cy_basic_types.pyx) """ if self._check_timeseries(wind_data): self._check_units(units) @@ -441,10 +447,10 @@ def set_wind_data(self, wind_data, units, format='r-theta'): wind_data = self._xform_input_timeseries(wind_data) wind_data['value'] = self._convert_units(wind_data['value'], - format, units, + coord_sys, units, 'meter per second') - super(Wind, self).set_timeseries(wind_data, format) + super(Wind, self).set_timeseries(wind_data, coord_sys) else: raise ValueError('Bad timeseries as input') @@ -454,7 +460,7 @@ def get_value(self, time): independent of location; however, a gridded datafile may require location so this interface may get refactored if it needs to support different types of wind data. It returns the data in SI units (m/s) - in 'r-theta' format (speed, direction) + in 'r-theta' coordinate system (speed, direction) :param time: the time(s) you want the data for :type time: datetime object or sequence of datetime objects. @@ -465,10 +471,11 @@ def get_value(self, time): return tuple(data[0]['value']) - def at(self, points, time, format='r-theta', extrapolate=True, _auto_align=True): + def at(self, points, time, coord_sys='r-theta', + extrapolate=True, _auto_align=True): ''' Returns the value of the wind at the specified points at the specified - time. Valid format specifications include 'r-theta', 'r', 'theta', + time. Valid coordinate systems include 'r-theta', 'r', 'theta', 'uv', 'u' or 'v'. This function is for API compatibility with the new environment objects. @@ -476,36 +483,42 @@ def at(self, points, time, format='r-theta', extrapolate=True, _auto_align=True) This may not be None. To get wind values position-independently, use get_value(time) :param time: Datetime of the time to be queried - :param format: String describing the data and organization. + + :param coord_sys: String describing the coordinate system. + :param extrapolate: extrapolation on/off (ignored for now) ''' if points is None: - points = np.array((0,0)).reshape(-1,2) + points = np.array((0, 0)).reshape(-1, 2) + pts = gridded.utilities._reorganize_spatial_data(points) ret_data = np.zeros_like(pts, dtype='float64') - if format in ('r-theta','uv'): - data = self.get_wind_data(time, 'm/s', format)[0]['value'] - ret_data[:,0] = data[0] - ret_data[:,1] = data[1] - elif format in ('u','v','r','theta'): - f = None - if format in ('u','v'): + + if coord_sys in ('r-theta', 'uv'): + data = self.get_wind_data(time, 'm/s', coord_sys)[0]['value'] + ret_data[:, 0] = data[0] + ret_data[:, 1] = data[1] + elif coord_sys in ('u', 'v', 'r', 'theta'): + if coord_sys in ('u', 'v'): f = 'uv' else: f = 'r-theta' + data = self.get_wind_data(time, 'm/s', f)[0]['value'] - if format in ('u','r'): - ret_data[:,0] = data[0] - ret_data = ret_data[:,0] + if coord_sys in ('u', 'r'): + ret_data[:, 0] = data[0] + ret_data = ret_data[:, 0] else: - ret_data[:,1] = data[1] - ret_data = ret_data[:,1] + ret_data[:, 1] = data[1] + ret_data = ret_data[:, 1] else: - raise ValueError('invalid format {0}'.format(format)) + raise ValueError('invalid coordinate system {0}'.format(coord_sys)) if _auto_align: - ret_data = gridded.utilities._align_results_to_spatial_data(ret_data, points) + ret_data = (gridded.utilities + ._align_results_to_spatial_data(ret_data, points)) + return ret_data def set_speed_uncertainty(self, up_or_down=None): @@ -597,7 +610,7 @@ def constant_wind(speed, direction, units='m/s'): The time for a constant wind timeseries is irrelevant. This function simply sets it to datetime.now() accurate to hours. """ - wind_vel = np.zeros((1, ), dtype=basic_types.datetime_value_2d) + wind_vel = np.zeros((1, ), dtype=datetime_value_2d) # just to have a time accurate to minutes wind_vel['time'][0] = datetime.datetime.now().replace(microsecond=0, @@ -605,7 +618,7 @@ def constant_wind(speed, direction, units='m/s'): minute=0) wind_vel['value'][0] = (speed, direction) - return Wind(timeseries=wind_vel, format='r-theta', units=units) + return Wind(timeseries=wind_vel, coord_sys='r-theta', units=units) def wind_from_values(values, units='m/s'): @@ -616,10 +629,10 @@ def wind_from_values(values, units='m/s'): :returns: A Wind timeseries object that can be used for a wind mover, etc. """ - wind_vel = np.zeros((len(values), ), dtype=basic_types.datetime_value_2d) + wind_vel = np.zeros((len(values), ), dtype=datetime_value_2d) for i, record in enumerate(values): wind_vel['time'][i] = record[0] wind_vel['value'][i] = tuple(record[1:3]) - return Wind(timeseries=wind_vel, format='r-theta', units=units) + return Wind(timeseries=wind_vel, coord_sys='r-theta', units=units) diff --git a/py_gnome/gnome/map.py b/py_gnome/gnome/map.py index a4c2ca8f7..7db1934ad 100644 --- a/py_gnome/gnome/map.py +++ b/py_gnome/gnome/map.py @@ -340,6 +340,9 @@ def resurface_airborne_elements(self, spill_container): np.maximum(next_positions[:, 2], 0.0, out=next_positions[:, 2]) return None + def to_geojson(self): + return FeatureCollection([]) + class ParamMap(GnomeMap): _state = copy.deepcopy(GnomeMap._state) diff --git a/py_gnome/gnome/model.py b/py_gnome/gnome/model.py index ef67a695c..0d98d98b7 100644 --- a/py_gnome/gnome/model.py +++ b/py_gnome/gnome/model.py @@ -1615,6 +1615,15 @@ def set_make_default_refs(self, value): for item in oc: item.make_default_refs = value + def list_spill_properties(self): + ''' + Convenience method to list properties of a spill that + can be retrived using get_spill_property + + ''' + + return self.spills.items()[0].data_arrays.keys() + def get_spill_property(self, prop_name, ucert=0): ''' Convenience method to allow user to look up properties of a spill. diff --git a/py_gnome/gnome/movers/current_movers.py b/py_gnome/gnome/movers/current_movers.py index 798ff7b58..c9b8d8e78 100644 --- a/py_gnome/gnome/movers/current_movers.py +++ b/py_gnome/gnome/movers/current_movers.py @@ -7,7 +7,7 @@ import numpy as np -from colander import (SchemaNode, Bool, String, Float, drop) +from colander import (SchemaNode, Bool, String, Float, Int, drop) from gnome import basic_types @@ -1051,6 +1051,15 @@ class ComponentMoverSchema(ObjType, ProcessSchema): filename1 = SchemaNode(String(), missing=drop) filename2 = SchemaNode(String(), missing=drop) scale_refpoint = WorldPoint(missing=drop) + pat1_angle = SchemaNode(Float(), missing=drop) + pat1_speed = SchemaNode(Float(), missing=drop) + pat1_speed_units = SchemaNode(Int(), missing=drop) + pat1_scale_to_value = SchemaNode(Float(), missing=drop) + pat2_angle = SchemaNode(Float(), missing=drop) + pat2_speed = SchemaNode(Float(), missing=drop) + pat2_speed_units = SchemaNode(Int(), missing=drop) + pat2_scale_to_value = SchemaNode(Float(), missing=drop) + scale_by = SchemaNode(Int(), missing=drop) class ComponentMover(CurrentMoversBase, Serializable): diff --git a/py_gnome/gnome/movers/py_current_movers.py b/py_gnome/gnome/movers/py_current_movers.py index 95ee9d2e9..6dfad55e0 100644 --- a/py_gnome/gnome/movers/py_current_movers.py +++ b/py_gnome/gnome/movers/py_current_movers.py @@ -67,7 +67,6 @@ class PyCurrentMover(movers.PyMover, serializable.Serializable): def __init__(self, filename=None, current=None, - name=None, extrapolate=False, time_offset=0, current_scale=1, @@ -114,8 +113,8 @@ def __init__(self, self.current = GridCurrent.from_netCDF(filename=self.filename, **kwargs) - if name is None: - name = self.__class__.__name__ + str(self.__class__._def_count) + if 'name' not in kwargs: + kwargs['name'] = self.__class__.__name__ + str(self.__class__._def_count) self.__class__._def_count += 1 self.extrapolate = extrapolate @@ -137,10 +136,10 @@ def __init__(self, # either a 1, or 2 depending on whether spill is certain or not self.spill_type = 0 - (super(PyCurrentMover, self) .__init__(default_num_method=default_num_method, **kwargs)) + def _attach_default_refs(self, ref_dict): pass return serializable.Serializable._attach_default_refs(self, ref_dict) diff --git a/py_gnome/gnome/movers/py_wind_movers.py b/py_gnome/gnome/movers/py_wind_movers.py index 7c09242fa..6fd2eaa0c 100644 --- a/py_gnome/gnome/movers/py_wind_movers.py +++ b/py_gnome/gnome/movers/py_wind_movers.py @@ -1,6 +1,8 @@ import movers import copy +import numpy as np + from colander import (SchemaNode, Bool, Float, String, Sequence, DateTime, drop) @@ -21,7 +23,7 @@ class PyWindMoverSchema(base_schema.ObjType): filename = SchemaNode(typ=Sequence(accept_scalar=True), children=[SchemaNode(String())], missing=drop) - current_scale = SchemaNode(Float(), missing=drop) + wind_scale = SchemaNode(Float(), missing=drop) extrapolate = SchemaNode(Bool(), missing=drop) time_offset = SchemaNode(Float(), missing=drop) wind = GridWind._schema(missing=drop) @@ -45,7 +47,7 @@ class PyWindMover(movers.PyMover, serializable.Serializable): _state.add_field([serializable.Field('filename', save=True, read=True, isdatafile=True, test_for_eq=False), - serializable.Field('wind', save=True, read=True, + serializable.Field('wind', read=True, save_reference=True), serializable.Field('extrapolate', read=True, save=True)]) _state.add(update=['uncertain_duration', 'uncertain_time_delay'], @@ -60,7 +62,6 @@ class PyWindMover(movers.PyMover, serializable.Serializable): def __init__(self, filename=None, wind=None, - name=None, extrapolate=False, time_offset=0, uncertain_duration=3, @@ -80,13 +81,13 @@ def __init__(self, will attempt to be instantiated from the file :param active_start: datetime when the mover should be active :param active_stop: datetime after which the mover should be inactive - :param current_scale: Value to scale current data + :param wind_scale: Value to scale wind data :param uncertain_duration: how often does a given uncertain element get reset :param uncertain_time_delay: when does the uncertainly kick in. :param uncertain_cross: Scale for uncertainty perpendicular to the flow :param uncertain_along: Scale for uncertainty parallel to the flow - :param extrapolate: Allow current data to be extrapolated + :param extrapolate: Allow wind data to be extrapolated before and after file data :param time_offset: Time zone shift if data is in GMT :param num_method: Numerical method for calculating movement delta. @@ -106,8 +107,8 @@ def __init__(self, self.wind = GridWind.from_netCDF(filename=self.filename, **kwargs) - if name is None: - name = self.__class__.__name__ + str(self.__class__._def_count) + if 'name' not in kwargs: + kwargs['name'] = self.__class__.__name__ + str(self.__class__._def_count) self.__class__._def_count += 1 self.extrapolate = extrapolate @@ -130,7 +131,7 @@ def from_netCDF(cls, filename=None, extrapolate=False, time_offset=0, - current_scale=1, + wind_scale=1, uncertain_duration=24 * 3600, uncertain_time_delay=0, uncertain_along=.5, @@ -145,7 +146,7 @@ def from_netCDF(cls, filename=filename, extrapolate=extrapolate, time_offset=time_offset, - current_scale=current_scale, + wind_scale=wind_scale, uncertain_along=uncertain_along, uncertain_across=uncertain_across, uncertain_cross=uncertain_cross, @@ -169,7 +170,7 @@ def real_data_stop(self, value): @property def is_data_on_cells(self): - return self.wind.grid.infer_location(self.current.u.data) != 'node' + return self.wind.grid.infer_location(self.wind.u.data) != 'node' def prepare_for_model_step(self, sc, time_step, model_time_datetime): """ @@ -188,11 +189,12 @@ def prepare_for_model_step(self, sc, time_step, model_time_datetime): if sc.num_released is None or sc.num_released == 0: return - rand.random_with_persistance(sc['windage_range'][:, 0], - sc['windage_range'][:, 1], - sc['windages'], - sc['windage_persist'], - time_step) + if self.active: + rand.random_with_persistance(sc['windage_range'][:, 0], + sc['windage_range'][:, 1], + sc['windages'], + sc['windage_persist'], + time_step) def get_move(self, sc, time_step, model_time_datetime, num_method=None): """ @@ -212,21 +214,26 @@ def get_move(self, sc, time_step, model_time_datetime, num_method=None): All movers must implement get_move() since that's what the model calls """ method = None + positions = sc['positions'] - if num_method is None: - method = self.num_methods[self.default_num_method] - else: - method = self.num_method[num_method] + if self.active and len(positions) > 0: + if num_method is None: + method = self.num_methods[self.default_num_method] + else: + method = self.num_method[num_method] - status = sc['status_codes'] != oil_status.in_water - positions = sc['positions'] - pos = positions[:] + status = sc['status_codes'] != oil_status.in_water + #positions = sc['positions'] + pos = positions[:] + + deltas = method(sc, time_step, model_time_datetime, pos, self.wind) + deltas[:, 0] *= sc['windages'] + deltas[:, 1] *= sc['windages'] - deltas = method(sc, time_step, model_time_datetime, pos, self.wind) - deltas[:, 0] *= sc['windages'] - deltas[:, 1] *= sc['windages'] + deltas = FlatEarthProjection.meters_to_lonlat(deltas, positions) + deltas[status] = (0, 0, 0) - deltas = FlatEarthProjection.meters_to_lonlat(deltas, positions) - deltas[status] = (0, 0, 0) + else: + deltas = np.zeros_like(positions) return deltas diff --git a/py_gnome/gnome/movers/ship_drift_mover.py b/py_gnome/gnome/movers/ship_drift_mover.py index 14c576463..b3af04510 100644 --- a/py_gnome/gnome/movers/ship_drift_mover.py +++ b/py_gnome/gnome/movers/ship_drift_mover.py @@ -164,11 +164,12 @@ def prepare_for_model_step(self, sc, time_step, model_time_datetime): self.grid.prepare_for_model_step(model_time_datetime) # here we might put in drift angle stuff ? - rand.random_with_persistance(sc['windage_range'][:, 0], - sc['windage_range'][:, 1], - sc['windages'], - sc['windage_persist'], - time_step) + if self.active: + rand.random_with_persistance(sc['windage_range'][:, 0], + sc['windage_range'][:, 1], + sc['windages'], + sc['windage_persist'], + time_step) def prepare_data_for_get_move(self, sc, model_time_datetime): """ diff --git a/py_gnome/gnome/movers/wind_movers.py b/py_gnome/gnome/movers/wind_movers.py index b59a9a823..a6fb4f5af 100644 --- a/py_gnome/gnome/movers/wind_movers.py +++ b/py_gnome/gnome/movers/wind_movers.py @@ -4,18 +4,16 @@ import os import copy -from datetime import datetime import numpy as np from colander import (SchemaNode, Bool, String, Float, drop) -from gnome import basic_types +from gnome.exceptions import ReferencedObjectNotSet + from gnome.basic_types import (world_point, world_point_type, - velocity_rec, - datetime_value_2d) -from gnome.exceptions import ReferencedObjectNotSet + velocity_rec) from gnome.cy_gnome.cy_wind_mover import CyWindMover from gnome.cy_gnome.cy_gridwind_mover import CyGridWindMover @@ -492,7 +490,7 @@ def get_scaled_velocities(self, time): num_cells = num_tri / 2 # will need to update this for regular grids - vels = np.zeros(num_cells, dtype=basic_types.velocity_rec) + vels = np.zeros(num_cells, dtype=velocity_rec) self.mover.get_scaled_velocities(time, vels) return vels diff --git a/py_gnome/gnome/multi_model_broadcast.py b/py_gnome/gnome/multi_model_broadcast.py index 5126b6b2a..b2f8278a1 100644 --- a/py_gnome/gnome/multi_model_broadcast.py +++ b/py_gnome/gnome/multi_model_broadcast.py @@ -3,13 +3,15 @@ import os import psutil import time -import traceback import logging +import traceback +from six import reraise from cPickle import loads, dumps import uuid import multiprocessing as mp +import tblib.pickling_support import zmq @@ -20,6 +22,10 @@ from gnome.outputters import WeatheringOutput +# allows us to pickle exception traceback info +tblib.pickling_support.install() + + class ModelConsumer(mp.Process): ''' This is a consumer process that makes the model available @@ -151,17 +157,18 @@ def _set_wind_speed_uncertainty(self, up_or_down): return all(res) + def _set_spill_amount_uncertainty(self, up_or_down): + res = [s.set_amount_uncertainty(up_or_down) for s in self.model.spills] + + return all(res) + def _get_spill_container_uncertainty(self): return self.model.spills.uncertain def _set_spill_container_uncertainty(self, uncertain): self.model.spills.uncertain = uncertain - return self.model.spills.uncertain - def _set_spill_amount_uncertainty(self, up_or_down): - res = [s.set_amount_uncertainty(up_or_down) for s in self.model.spills] - - return all(res) + return self.model.spills.uncertain def _get_cache_dir(self): return self.model._cache._cache_dir @@ -266,11 +273,20 @@ def cmd(self, command, args, if idx is not None: self.tasks[idx].send(request) - return loads(self.tasks[idx].recv()) + out = self.recv_from_task(self.tasks[idx]) + + self.handle_child_exception(out) + + return out elif uncertainty_values is not None: idx = self.lookup[uncertainty_values] + self.tasks[idx].send(request) - return loads(self.tasks[idx].recv()) + out = self.recv_from_task(self.tasks[idx]) + + self.handle_child_exception(out) + + return out else: out = [] @@ -283,22 +299,42 @@ def cmd(self, command, args, [t.send(request) for t in self.tasks] try: - out = [loads(t.recv()) for t in self.tasks] + out = [self.recv_from_task(t) for t in self.tasks] except zmq.Again: self.logger.warning('Broadcaster command has timed out!') self.stop() out = None + except Exception as e: + self.logger.warning('Broadcaster caught exception {}' + .format(e)) + self.stop() + out = None else: for t in self.tasks: t.send(request) - out.append(loads(t.recv())) + out.append(self.recv_from_task(t)) if timeout is not None: [t.setsockopt(zmq.RCVTIMEO, time) for t, time in zip(self.tasks, old_timeouts)] + if out is not None: + for o in out: + self.handle_child_exception(o) + return out + def recv_from_task(self, task): + return loads(task.recv()) + + def handle_child_exception(self, response): + if (isinstance(response, tuple) and len(response) == 3 and + isinstance(response[0], type) and + isinstance(response[1], Exception) and + isinstance(response[2], traceback.types.TracebackType)): + self.stop() + reraise(*response) + def stop(self): if len(self.tasks) > 0: try: @@ -312,9 +348,10 @@ def stop(self): for c in self.consumers: c.terminate() c.join() + self.logger.info('joined all consumers!') - self.context.destroy() + self.context.term() self.consumers = [] self.tasks = [] @@ -346,6 +383,7 @@ def _spawn_tasks(self): task.connect('ipc://{0}/Task-{1}'.format(self.ipc_folder, p)) task.setsockopt(zmq.RCVTIMEO, 10 * 1000) + task.setsockopt(zmq.LINGER, 5) self.tasks.append(task) diff --git a/py_gnome/gnome/outputters/json.py b/py_gnome/gnome/outputters/json.py index 496ae90c4..98eeacf9e 100644 --- a/py_gnome/gnome/outputters/json.py +++ b/py_gnome/gnome/outputters/json.py @@ -14,6 +14,8 @@ from gnome.persist import class_from_objtype from .outputter import Outputter, BaseSchema + + class SpillJsonSchema(BaseSchema): pass @@ -54,8 +56,7 @@ class SpillJsonOutput(Outputter, Serializable): def write_output(self, step_num, islast_step=False): 'dump data in geojson format' - super(SpillJsonOutput, self).write_output(step_num, - islast_step) + super(SpillJsonOutput, self).write_output(step_num, islast_step) if not self._write_step: return None @@ -68,9 +69,8 @@ def write_output(self, step_num, islast_step=False): for sc in self.cache.load_timestep(step_num).items(): position = sc['positions'] - longitude = np.around(position[:,0], 4).tolist() - latitude = np.around(position[:,1], 4).tolist() - l = len(longitude) + longitude = np.around(position[:, 0], 4).tolist() + latitude = np.around(position[:, 1], 4).tolist() status = sc['status_codes'].tolist() mass = np.around(sc['mass'], 4).tolist() spill_num = sc['spill_num'].tolist() @@ -85,11 +85,11 @@ def write_output(self, step_num, islast_step=False): # to_be_removed : 12 out = {"longitude": longitude, - "latitude":latitude, + "latitude": latitude, "status": status, "mass": mass, - "spill_num":spill_num, - "length":l + "spill_num": spill_num, + "length": len(longitude) } if sc.uncertain: @@ -103,6 +103,7 @@ def write_output(self, step_num, islast_step=False): 'step_num': step_num, 'certain': certain_scs, 'uncertain': uncertain_scs} + if self.output_dir: output_info['output_filename'] = self.output_to_file(certain_scs, step_num) @@ -347,8 +348,17 @@ def deserialize(cls, json_): for i, cm in enumerate(json_['ice_movers']): cm_cls = class_from_objtype(cm['obj_type']) + cm_dict = cm_cls.deserialize(json_['ice_movers'][i]) _to_dict['ice_movers'].append(cm_dict) return _to_dict + + + + + + + + diff --git a/py_gnome/gnome/outputters/netcdf.py b/py_gnome/gnome/outputters/netcdf.py index 27256646a..34bf4083f 100644 --- a/py_gnome/gnome/outputters/netcdf.py +++ b/py_gnome/gnome/outputters/netcdf.py @@ -59,37 +59,97 @@ 'flag_meanings': " ".join(["%i: %s," % pair for pair in sorted(zip(oil_status._int, oil_status._attr))]) - }, + }, 'spill_num': {'long_name': 'spill to which the particle belongs'}, 'id': {'long_name': 'particle ID', }, + 'density': { + 'long_name': 'emulsion density at end of timestep', + 'units': 'kg/m^3'}, + 'viscosity': { + 'long_name': 'emulsion viscosity at end of timestep', + 'units': 'm^2/sec'}, + 'oil_density': { + 'long_name': 'oil density at end of timestep', + 'units': 'kg/m^3'}, + 'oil_viscosity': { + 'long_name': 'oil viscosity at end of timestep', + 'units': 'm^2/sec'}, 'droplet_diameter': {'long_name': 'diameter of oil droplet class', 'units': 'meters' }, 'rise_vel': {'long_name': 'rise velocity of oil droplet class', 'units': 'm s-1'}, + 'windages': {}, + 'windage_range': {}, + 'windage_persist': {}, 'next_positions': {}, 'last_water_positions': {}, + 'bulk_init_volume': {}, + 'interfacial_area': {}, + 'area': {}, + 'fay_area': {}, + 'frac_coverage': {}, + 'bulltime': {}, + 'evap_decay_constant': {}, + 'partition_coeff': {}, + 'droplet_avg_size': {}, + 'init_mass': {'long_name': 'initial mass', + 'units': 'kilograms', + }, + 'mass_components': {}, + 'fate_status': {}, # weathering data 'floating': { 'long_name': 'total mass floating in water after each time step', 'units': 'kilograms'}, + 'beached': { + 'long_name': 'total mass on the shoreline after each time step', + 'units': 'kilograms'}, + 'off_maps': { + 'long_name': 'total mass that has left the map since the beginning of model run', + 'units': 'kilograms'}, 'evaporated': { 'long_name': 'total mass evaporated since beginning of model run', 'units': 'kilograms'}, - 'dispersed': { + 'natural_dispersion': { 'long_name': 'total mass dispersed since beginning of model run', 'units': 'kilograms'}, + 'sedimentation': { + 'long_name': 'total mass lost due to sedimentation since beginning of model run', + 'units': 'kilograms'}, + 'dissolution': { + 'long_name': 'total mass dissolved since beginning of model run', + 'units': 'kilograms'}, + 'water_content': { + 'long_name': 'fraction of total mass that is water after each time step'}, + 'frac_water': { + 'long_name': 'water fraction after each time step'}, + 'frac_lost': { + 'long_name': 'fraction of total mass that is lost after each time step'}, 'avg_density': { 'long_name': 'average density at end of timestep', 'units': 'kg/m^3'}, 'avg_viscosity': { 'long_name': 'average viscosity at end of timestep', - 'units': 'kg/m^3'}, + 'units': 'm^2/sec'}, 'amount_released': { 'long_name': 'total mass of oil released thus far', 'units': 'kg'}, + 'non_weathering': { + 'long_name': 'total mass of oil that does not weather after each time step', + 'units': 'kg'}, + + 'chem_dispersed': { + 'long_name': 'total mass chemically dispersed since beginning of model run', + 'units': 'kilograms'}, + 'skimmed': { + 'long_name': 'total mass skimmed since beginning of model run', + 'units': 'kilograms'}, + 'burned': { + 'long_name': 'total mass burned since beginning of model run', + 'units': 'kilograms'}, } @@ -157,18 +217,35 @@ class NetCDFOutput(Outputter, Serializable): 'id', 'mass', 'age', + # if they are not there, they will be ignored + # if they are there, the user probably wants them + 'density', + 'viscosity', + 'frac_water', ] + # these are being handled specially -- i.e. pulled from the positions array + special_arrays = set(('latitude', + 'longitude', + 'depth', + )) + # the list of arrays that we usually don't want -- i.e. for internal use # these will get skipped if "most" is asked for # "all" will output everything. usually_skipped_arrays = ['next_positions', 'last_water_positions', 'windages', - 'windage_range', - 'windage_persist', 'mass_components', 'half_lives', + 'init_mass', + 'interfacial_area', + 'fay_area', + 'bulk_init_volume', + 'frac_coverage', + 'bulltime', + 'partition_coeff', + 'evap_decay_constant', ] # define _state for serialization @@ -187,7 +264,8 @@ class NetCDFOutput(Outputter, Serializable): def __init__(self, netcdf_filename, - which_data='standard', + which_data='all', + #which_data='standard', compress=True, **kwargs): """ @@ -412,6 +490,13 @@ def _update_arrays_to_output(self, sc): # remove the ones we don't want for var_name in self.usually_skipped_arrays: self.arrays_to_output.discard(var_name) + # make sure they are all there + to_remove = set() + for var_name in self.arrays_to_output: + # fixme: -- is there a way to get the keys as a set so we don't have to loop? + if var_name not in sc and var_name not in self.special_arrays: + to_remove.add(var_name) + self.arrays_to_output -= to_remove def prepare_for_model_run(self, model_start_time, @@ -507,15 +592,18 @@ def prepare_for_model_run(self, # the arrays to get shape and dtype instead of the # array_types since array_type could contain None for # shape - dt = sc[var_name].dtype - - if len(sc[var_name].shape) == 1: - shape = ('data',) - chunksz = (self._chunksize,) + try: + dt = sc[var_name].dtype + except KeyError: # ignore arrays that aren't there + pass else: - y_sz = d_dims[sc[var_name].shape[1]] - shape = ('data', y_sz) - chunksz = (self._chunksize, sc[var_name].shape[1]) + if len(sc[var_name].shape) == 1: + shape = ('data',) + chunksz = (self._chunksize,) + else: + y_sz = d_dims[sc[var_name].shape[1]] + shape = ('data', y_sz) + chunksz = (self._chunksize, sc[var_name].shape[1]) self._create_nc_var(rootgrp, var_name, dt, shape, chunksz) diff --git a/py_gnome/gnome/outputters/outputter.py b/py_gnome/gnome/outputters/outputter.py index c194bb565..4712f11da 100644 --- a/py_gnome/gnome/outputters/outputter.py +++ b/py_gnome/gnome/outputters/outputter.py @@ -102,7 +102,7 @@ def __init__(self, self.name = name - # make sure the output_dir exits: + # make sure the output_dir exists: if output_dir is not None: try: os.mkdir(output_dir) diff --git a/py_gnome/gnome/outputters/shape.py b/py_gnome/gnome/outputters/shape.py index 34e3bd56e..7508792ff 100644 --- a/py_gnome/gnome/outputters/shape.py +++ b/py_gnome/gnome/outputters/shape.py @@ -5,7 +5,7 @@ import os import zipfile -from colander import SchemaNode, String, drop +from colander import SchemaNode, String, Boolean, drop import shapefile as shp from gnome.utilities.serializable import Serializable, Field @@ -14,11 +14,8 @@ class ShapeSchema(BaseSchema): - ''' - Nothing is required for initialization - ''' - filename = SchemaNode(String(), missing=drop) + zip_output = SchemaNode(Boolean(), missing=drop) class ShapeOutput(Outputter, Serializable): @@ -31,11 +28,12 @@ class that outputs GNOME results (particles) in a shapefile format. # need a schema and also need to override save so output_dir # is saved correctly - maybe point it to saveloc _state += [Field('filename', update=True, save=True), ] + _state += [Field('zip_output', update=True, save=True), ] _schema = ShapeSchema time_formatter = '%m/%d/%Y %H:%M' - def __init__(self, filename, **kwargs): + def __init__(self, filename, zip_output=True, **kwargs): ''' :param str output_dir=None: output directory for shape files uses super to pass optional \*\*kwargs to base class __init__ method @@ -48,6 +46,8 @@ def __init__(self, filename, **kwargs): self.filename = filename self.filedir = os.path.dirname(filename) + self.zip_output = zip_output + super(ShapeOutput, self).__init__(**kwargs) def prepare_for_model_run(self, @@ -108,10 +108,7 @@ def prepare_for_model_run(self, w = shp.Writer(shp.POINT) w.autobalance = 1 - w.field('Year', 'C') - w.field('Month', 'C') - w.field('Day', 'C') - w.field('Hour', 'C') + w.field('Time', 'C') w.field('LE id', 'N') w.field('Depth', 'N') w.field('Mass', 'N') @@ -141,10 +138,7 @@ def write_output(self, step_num, islast_step=False): for k, p in enumerate(sc['positions']): self.w_u.point(p[0], p[1]) - self.w_u.record(curr_time.year, - curr_time.month, - curr_time.day, - curr_time.hour, + self.w_u.record(curr_time.strftime('%Y-%m-%dT%H:%M:%S'), sc['id'][k], p[2], sc['mass'][k], @@ -153,10 +147,7 @@ def write_output(self, step_num, islast_step=False): else: for k, p in enumerate(sc['positions']): self.w.point(p[0], p[1]) - self.w.record(curr_time.year, - curr_time.month, - curr_time.day, - curr_time.hour, + self.w.record(curr_time.strftime('%Y-%m-%dT%H:%M:%S'), sc['id'][k], p[2], sc['mass'][k], @@ -175,23 +166,30 @@ def write_output(self, step_num, islast_step=False): else: self.w.save(fn) - zfilename = fn + '.zip' + print 'ShapefileOutputter.zip_output: ', self.zip_output + if self.zip_output is True: + zfilename = fn + '.zip' + + prj_file = open("%s.prj" % fn, "w") + prj_file.write(self.epsg) + prj_file.close() - prj_file = open("%s.prj" % fn, "w") - prj_file.write(self.epsg) - prj_file.close() + zipf = zipfile.ZipFile(zfilename, 'w') - zipf = zipfile.ZipFile(zfilename, 'w') + for suf in ['shp', 'prj', 'dbf', 'shx']: + f = os.path.split(fn)[-1] + '.' + suf + zipf.write(os.path.join(self.filedir, f), arcname=f) + os.remove(fn + '.' + suf) - for suf in ['shp', 'prj', 'dbf', 'shx']: - f = os.path.split(fn)[-1] + '.' + suf - zipf.write(os.path.join(self.filedir, f), arcname=f) - os.remove(fn + '.' + suf) + zipf.close() - zipf.close() + if self.zip_output is True: + output_filename = self.filename + '.zip' + else: + output_filename = self.filename output_info = {'time_stamp': sc.current_time_stamp.isoformat(), - 'output_filename': self.filename + '.zip'} + 'output_filename': output_filename} return output_info diff --git a/py_gnome/gnome/outputters/weathering.py b/py_gnome/gnome/outputters/weathering.py index bcd8bde86..90d4e1a06 100644 --- a/py_gnome/gnome/outputters/weathering.py +++ b/py_gnome/gnome/outputters/weathering.py @@ -63,7 +63,7 @@ def __init__(self, 'avg_density': 'kg/m^3', 'avg_viscosity': 'm^2/s'} - super(WeatheringOutput, self).__init__(**kwargs) + super(WeatheringOutput, self).__init__(output_dir=self.output_dir, **kwargs) def write_output(self, step_num, islast_step=False): ''' diff --git a/py_gnome/gnome/persist/save_load.py b/py_gnome/gnome/persist/save_load.py index 30ff63aeb..f7eca2e44 100644 --- a/py_gnome/gnome/persist/save_load.py +++ b/py_gnome/gnome/persist/save_load.py @@ -318,8 +318,8 @@ def save(self, saveloc, references=None, name=None): json_ = self.serialize('save') c_fields = self._state.get_field_by_attribute('iscollection') - #JAH: Added this from the model save function. If any bugs pop up - #in the references system this may be the cause + # JAH: Added this from the model save function. If any bugs pop up + # in the references system this may be the cause references = (references, References())[references is None] for field in c_fields: diff --git a/py_gnome/gnome/spill/spill.py b/py_gnome/gnome/spill/spill.py index 981eba458..a98d63b07 100644 --- a/py_gnome/gnome/spill/spill.py +++ b/py_gnome/gnome/spill/spill.py @@ -8,10 +8,10 @@ Element_types -- what the types of the elements are. """ - +from datetime import timedelta import copy from inspect import getmembers, ismethod -from datetime import timedelta + import unit_conversion as uc from colander import (SchemaNode, Bool, String, Float, drop) @@ -21,7 +21,10 @@ from gnome.persist.base_schema import ObjType from . import elements -from .release import PointLineRelease, ContinuousRelease, GridRelease +from .release import (PointLineRelease, + ContinuousRelease, + GridRelease, + SpatialRelease) from .. import _valid_units @@ -31,9 +34,7 @@ class BaseSpill(Serializable, object): and as a spec for the API. """ - def __init__(self, - release_time=None, - name=""): + def __init__(self, release_time=None, name=""): """ initialize -- sub-classes will probably have a lot more to do """ @@ -681,10 +682,11 @@ def get_mass(self, units=None): elif self.units in self.valid_vol_units: # need to convert to mass # DO NOT change this back! - # for the UI to be consistent, the conversion needs to use standard - # density -- not the current water temp. + # for the UI to be consistent, the conversion needs to use + # standard density -- not the current water temp. # water_temp = self.water.get('temperature') - # ideally substance would have a "standard_density" attribute for this. + # ideally substance would have a "standard_density" attribute + # for this. std_rho = self.element_type.standard_density vol = uc.convert('Volume', self.units, 'm^3', self.amount) @@ -851,7 +853,8 @@ def deserialize(cls, json_): deserialized, created and added to this dict by load method ''' etcls = class_from_objtype(json_['element_type']['obj_type']) - dict_['element_type'] = etcls.deserialize(json_['element_type']) + dict_['element_type'] = etcls.deserialize(json_['element_type'] + ) if 'water' in json_: w_cls = class_from_objtype(json_['water']['obj_type']) @@ -871,6 +874,7 @@ def deserialize(cls, json_): else: return json_ + """ Helper functions """ @@ -928,6 +932,7 @@ def surface_point_line_spill(num_elements, units=units, name=name) + def grid_spill(bounds, resolution, release_time, @@ -945,7 +950,8 @@ def grid_spill(bounds, (max_lon, max_lat)) :type bounds: 2x2 numpy array or equivalent - :param resolution: resolution of grid -- it will be a resoluiton X resolution grid + :param resolution: resolution of grid -- it will be a resoluiton X + resolution grid :type resolution: integer :param release_time: time the LEs are released (datetime object) @@ -988,7 +994,6 @@ def grid_spill(bounds, name=name) - def subsurface_plume_spill(num_elements, start_position, release_time, @@ -1141,6 +1146,7 @@ def point_line_release_spill(num_elements, amount=amount, units=units, name=name) + return spill @@ -1169,5 +1175,5 @@ def spatial_release_spill(start_positions, amount=amount, units=units, name=name) - return spill + return spill diff --git a/py_gnome/gnome/spill_container.py b/py_gnome/gnome/spill_container.py index 3ba20c486..9c2121e66 100644 --- a/py_gnome/gnome/spill_container.py +++ b/py_gnome/gnome/spill_container.py @@ -99,7 +99,7 @@ def _set_data(self, sc, array_types, fate_mask, fate): dict_to_update = getattr(self, fate) for at in array_types: array = sc._array_name(at) - #dict_to_update[array] = sc[array][fate_mask] + if array not in dict_to_update: dict_to_update[array] = sc[array][fate_mask] @@ -488,8 +488,9 @@ def _set_substancespills(self): # 'data' will be updated when weatherers ask for arrays they need # define the substances list and the list of spills for each substance - self._substances_spills = \ - substances_spills(substances=subs, s_id=s_id, spills=spills) + self._substances_spills = substances_spills(substances=subs, + s_id=s_id, + spills=spills) if len(self.get_substances()) > 1: # add an arraytype for substance if more than one substance @@ -856,13 +857,13 @@ def initialize_data_arrays(self): """ for name, atype in self._array_types.iteritems(): # Initialize data_arrays with 0 elements - # fixme: is every array type with None shape neccesarily oil components?? + # fixme: is every array type with None shape neccesarily + # oil components?? # but it is more than just mass_components # maybe some other flag?? if atype.shape is None: - # if name == "mass_components": num_comp = self._oil_comp_array_len - self._data_arrays[name] = atype.initialize_null(shape=(num_comp, )) + self._data_arrays[name] = atype.initialize_null(shape=(num_comp,)) else: self._data_arrays[name] = atype.initialize_null() @@ -1314,7 +1315,7 @@ def index(self, spill): ''' try: return self._spill_container.spills.index(spill) - except: + except Exception: return self._u_spill_container.spills.index(spill) @property diff --git a/py_gnome/gnome/tamoc/tamoc_spill.py b/py_gnome/gnome/tamoc/tamoc_spill.py index 6f8a42a2c..1598f7dd6 100644 --- a/py_gnome/gnome/tamoc/tamoc_spill.py +++ b/py_gnome/gnome/tamoc/tamoc_spill.py @@ -27,127 +27,81 @@ __all__ = [] -# def tamoc_spill(release_time, -# start_position, -# num_elements=None, -# end_release_time=None, -# name='TAMOC plume'): -# ''' -# Helper function returns a Spill object for a spill from the TAMOC model - -# This version is essentially a template -- it needs to be filled in with -# access to the parameters from the "real" TAMOC model. - -# Also, this version is for inert particles only a size and density. -# They will not change once released into gnome. - -# Future work: create a "proper" weatherable oil object. - -# :param release_time: start of plume release -# :type release_time: datetime.datetime - -# :param start_position: location of initial release -# :type start_position: 3-tuple of floats (long, lat, depth) - -# :param num_elements: total number of elements to be released -# :type num_elements: integer - -# :param end_release_time=None: End release time for a time varying release. -# If None, then release runs for tehmodel duration -# :type end_release_time: datetime.datetime - -# :param float flow_rate=None: rate of release mass or volume per time. -# :param str units=None: must provide units for amount spilled. -# :param tuple windage_range=(.01, .04): Percentage range for windage. -# Active only for surface particles -# when a mind mover is added -# :param windage_persist=900: Persistence for windage values in seconds. -# Use -1 for inifinite, otherwise it is -# randomly reset on this time scale. -# :param str name='TAMOC spill': a name for the spill. -# ''' - -# release = PointLineRelease(release_time=release_time, -# start_position=start_position, -# num_elements=num_elements, -# end_release_time=end_release_time) - -# # This helper function is just passing parameters thru to the plume -# # helper function which will do the work. -# # But this way user can just specify all parameters for release and -# # element_type in one go... -# element_type = elements.plume(distribution_type=distribution_type, -# distribution=distribution, -# substance_name=substance, -# windage_range=windage_range, -# windage_persist=windage_persist, -# density=density, -# density_units=density_units) - -# return Spill(release, -# element_type=element_type, -# amount=amount, -# units=units, -# name=name) - class TamocDroplet(): """ Dummy class to show what we need from the TAMOC output + + :param mass_flux=1.0: Measured in kg/s + :param radius=1e-6: Measured in meters + :param density=900.0: Measured in kg/m^3 at 15degC + :param position=(10, 20, 100): (x, y, z) in meters + :param flag_phase_insitu='Mixture': Flag for the phase of the particle + at plumetermination + :param flag_phase_surface='Mixture': Flag for the phase of the particle + at 1 atm and 15 degC """ def __init__(self, - mass_flux=1.0, # kg/s - radius=1e-6, # meters - density=900.0, # kg/m^3 at 15degC - position=(10, 20, 100), # (x, y, z) in meters - flag_phase_insitu = 'Mixture', # flag for the phase of the particle at plumetermination - flag_phase_surface = 'Mixture' # flag for the phase of the particle at 1 atm and 15 degC - ): + mass_flux=1.0, + radius=1e-6, + density=900.0, + position=(10, 20, 100), + flag_phase_insitu='Mixture', + flag_phase_surface='Mixture'): self.mass_flux = mass_flux self.radius = radius self.density = density self.position = np.asanyarray(position) + self.flag_phase_insitu = flag_phase_insitu + self.flag_phase_surface = flag_phase_surface def __repr__(self): - return '[flux = {0}, radius = {1}, density = {2}, position = {3}]'.format(self.mass_flux, self.radius, self.density, self.position) + return ('[flux = {0}, radius = {1}, density = {2}, position = {3}]' + .format(self.mass_flux, + self.radius, + self.density, + self.position)) class TamocDissMasses(): """ Dummy class to show what we need from the TAMOC output - """ + + :param mass_flux=1.0: Measured in kg/s + :param position=(10, 20, 100): (x, y, z) in meters + :param chem_name='x': The name of the chemical + """ def __init__(self, - mass_flux=1.0, # kg/s - position=(10, 20, 100), # (x, y, z) in meters - chem_name='x' - ): + mass_flux=1.0, + position=(10, 20, 100), + chem_name='x'): self.mass_flux = mass_flux self.position = np.asanyarray(position) self.chem_name = chem_name def __repr__(self): - return '[flux = {0}, position = {1}, chem_name = {2}]'.format(self.mass_flux, self.position, self.chem_name) + return ('[flux = {0}, position = {1}, chem_name = {2}]' + .format(self.mass_flux, self.position, self.chem_name)) def log_normal_pdf(x, mean, std): """ - Utility to compute the log normal CDF - - used to get "realistic" distributin of droplet sizes - + Utility to compute the log normal CDF + - used to get a "realistic" distribution of droplet sizes """ - sigma = np.sqrt(np.log(1 + std ** 2 / mean ** 2)) mu = np.log(mean) + sigma ** 2 / 2 - return ((1 / (x * sigma * np.sqrt(2 * np.pi))) * np.exp(-((np.log(x) - mu) ** 2 / (2 * sigma ** 2)))) + + return ((1 / (x * sigma * np.sqrt(2 * np.pi))) * + np.exp(-((np.log(x) - mu) ** 2 / (2 * sigma ** 2)))) def fake_tamoc_results(num_droplets=10): """ - utility for providing a tamoc result set + utility for providing a tamoc result set - a simple list of TamocDroplet objects + Returns a simple list of TamocDroplet objects """ # sizes from 10 to 1000 microns @@ -177,60 +131,10 @@ def fake_tamoc_results(num_droplets=10): class TamocSpill(gnome.spill.spill.BaseSpill): """ Models a spill - """ - # _update = ['on', 'release', - # 'amount', 'units', 'amount_uncertainty_scale'] - - # _create = ['frac_coverage'] - # _create.extend(_update) - - # _state = copy.deepcopy(serializable.Serializable._state) - # _state.add(save=_create, update=_update) - # _state += serializable.Field('element_type', - # save=True, - # save_reference=True, - # update=True) - # _schema = SpillSchema - - # valid_vol_units = _valid_units('Volume') - # valid_mass_units = _valid_units('Mass') -# # Release depth (m) -# z0 = 2000 -# # Release diameter (m) -# D = 0.30 -# # Release temperature (K) -# T0 = 273.15 + 150. -# # Release angles of the plume (radians) -# phi_0 = -np.pi / 2. -# theta_0 = 0. -# # Salinity of the continuous phase fluid in the discharge (psu) -# S0 = 0. -# # Concentration of passive tracers in the discharge (user-defined) -# c0 = 1. -# # List of passive tracers in the discharge -# chem_name = 'tracer' -# # Presence or abscence of hydrates in the particles -# hydrate = True -# # Prescence or abscence of dispersant -# dispersant = True -# # Reduction in interfacial tension due to dispersant -# sigma_fac = np.array([[1.], [1. / 200.]]) # sigma_fac[0] - for gas; sigma_fac[1] - for liquid -# # Define liquid phase as inert -# inert_drop = 'False' -# # d_50 of gas particles (m) -# d50_gas = 0.008 -# # d_50 of oil particles (m) -# d50_oil = 0.0038 -# # number of bins in the particle size distribution -# nbins = 10 -# # Create the ambient profile needed for TAMOC -# # name of the nc file -# nc_file = './Input/case_01' -# # Define and input the ambient ctd profiles -# fname_ctd = './Input/ctd_api.txt' -# # Define and input the ambient velocity profile -# ua = 0.05 + TODO: we should not be using complex multidemensional values as + parameter defaults such as the one used for 'tamoc_parameters' + """ def __init__(self, release_time, start_position, @@ -264,9 +168,6 @@ def __init__(self, 'salinity': None, 'temperature': None} ): - """ - - """ super(TamocSpill, self).__init__(release_time=release_time, name=name) @@ -277,7 +178,11 @@ def __init__(self, self.num_released = 0 self.amount_released = 0.0 - self.tamoc_interval = timedelta(hours=TAMOC_interval) if TAMOC_interval is not None else None + if TAMOC_interval is not None: + self.tamoc_interval = timedelta(hours=TAMOC_interval) + else: + self.tamoc_interval = None + self.last_tamoc_time = release_time self.droplets = None self.on = on # spill is active or not @@ -291,35 +196,38 @@ def update_environment_conditions(self, current_time): currents = ds['currents'] u_data = currents.variables[0].data v_data = currents.variables[1].data - source_idx=None + source_idx = None + try: source_idx = currents.grid.locate_faces(np.array(self.start_position)[0:2], 'node') except TypeError: source_idx = currents.grid.locate_faces(np.array(self.start_position)[0:2]) + if currents.grid.node_lon.shape[0] == u_data.shape[-1]: # lon/lat are inverted in data so idx must be reversed source_idx = source_idx[::-1] + print source_idx time_idx = currents.time.index_of(current_time, False) print time_idx u_conditions = u_data[time_idx, :, source_idx[0], source_idx[1]] max_depth_ind = np.where(u_conditions.mask)[0].min() u_conditions = u_conditions[0:max_depth_ind] - v_conditions = v_data[time_idx, 0:max_depth_ind, source_idx[0], source_idx[1]] -# for d in range(0, max_depth_ind): -# uv[d] = currents.at(np.array(self.start_position)[0:2], current_time, depth=d, memoize=False) -# print d -# print uv[d] + v_conditions = v_data[time_idx, 0:max_depth_ind, + source_idx[0], source_idx[1]] + self.tamoc_parameters['ua'] = u_conditions self.tamoc_parameters['va'] = v_conditions print 'getdepths' -# depth_var = u_data._grp[currents.variables[0].data.dimensions[1]] + try: self.tamoc_parameters['depths'] = u_data._grp['depth_levels'][0:max_depth_ind] except IndexError: self.tamoc_parameters['depths'] = u_data._grp['depth'][0:max_depth_ind] + if ds['salinity'] is not None: pass + if ds['temperature'] is not None: pass @@ -335,10 +243,13 @@ def run_tamoc(self, current_time, time_step): self.droplets, self.diss_components = self._run_tamoc() return self.droplets - if (current_time >= self.release_time and (self.last_tamoc_time is None or self.droplets is None) or - current_time >= self.last_tamoc_time + self.tamoc_interval and current_time < self.end_release_time): + if (current_time >= self.release_time and + (self.last_tamoc_time is None or self.droplets is None) or + current_time >= self.last_tamoc_time + self.tamoc_interval and + current_time < self.end_release_time): self.last_tamoc_time = current_time self.droplets, self.diss_components = self._run_tamoc() + return self.droplets def _run_tamoc(self): @@ -350,6 +261,7 @@ def _run_tamoc(self): # Release conditions tp = self.tamoc_parameters + # Release depth (m) z0 = tp['depth'] # Release diameter (m) @@ -365,14 +277,17 @@ def _run_tamoc(self): S0 = tp['discharge_salinity'] # Concentration of passive tracers in the discharge (user-defined) c0 = tp['tracer_concentration'] + # List of passive tracers in the discharge chem_name = 'tracer' # Presence or abscence of hydrates in the particles + hydrate = tp['hydrate'] - # Prescence or abscence of dispersant + # Presence or absence of dispersant dispersant = tp['dispersant'] # Reduction in interfacial tension due to dispersant - sigma_fac = tp['sigma_fac'] # sigma_fac[0] - for gas; sigma_fac[1] - for liquid + # sigma_fac[0] - for gas; sigma_fac[1] - for liquid + sigma_fac = tp['sigma_fac'] # Define liquid phase as inert inert_drop = tp['inert_drop'] # d_50 of gas particles (m) @@ -381,11 +296,14 @@ def _run_tamoc(self): d50_oil = tp['d50_oil'] # number of bins in the particle size distribution nbins = tp['nbins'] + # Create the ambient profile needed for TAMOC # name of the nc file nc_file = tp['nc_file'] + # Define and input the ambient ctd profiles fname_ctd = tp['fname_ctd'] + # Define and input the ambient velocity profile ua = tp['ua'] va = tp['va'] @@ -401,31 +319,37 @@ def _run_tamoc(self): # Read in the user-specified properties for the chemical data data, units = chem.load_data('./Input/API_ChemData.csv') oil = dbm.FluidMixture(composition, user_data=data) - #oil.delta = self.load_delta('./Input/API_Delta.csv',oil.nc) + + # oil.delta = self.load_delta('./Input/API_Delta.csv',oil.nc) # if np.sum(oil.delta==0.): # print 'Binary interaction parameters are zero, estimating them.' # # Estimate the values of the binary interaction parameters # oil.delta = self.estimate_binary_interaction_parameters(oil) - # Get the release rates of gas and liquid phase md_gas, md_oil = self.release_flux(oil, mass_frac, profile, T0, z0, Q) print 'md_gas, md_oil', np.sum(md_gas), np.sum(md_oil) # Get the particle list for this composition - particles = self.get_particles(composition, data, md_gas, md_oil, profile, d50_gas, d50_oil, - nbins, T0, z0, dispersant, sigma_fac, oil, mass_frac, hydrate, inert_drop) + particles = self.get_particles(composition, data, + md_gas, md_oil, profile, + d50_gas, d50_oil, + nbins, T0, z0, + dispersant, sigma_fac, oil, mass_frac, + hydrate, inert_drop) print len(particles) print particles # Run the simulation jlm = bpm.Model(profile) - jlm.simulate(np.array([0., 0., z0]), D, None, phi_0, theta_0, - S0, T0, c0, chem_name, particles, track=False, dt_max=60., - sd_max=6000.) + jlm.simulate(np.array([0., 0., z0]), + D, None, phi_0, theta_0, S0, T0, c0, + chem_name, particles, + track=False, dt_max=60., sd_max=6000.) # Update the plume object with the nearfiled terminal level answer - jlm.q_local.update(jlm.t[-1], jlm.q[-1], jlm.profile, jlm.p, jlm.particles) + jlm.q_local.update(jlm.t[-1], jlm.q[-1], + jlm.profile, jlm.p, jlm.particles) Mp = np.zeros((len(jlm.particles), len(jlm.q_local.M_p[0]))) gnome_particles = [] @@ -435,52 +359,90 @@ def _run_tamoc(self): for i in range(len(jlm.particles)): nb0 = jlm.particles[i].nb0 Tp = jlm.particles[i].T - Mp[i, 0:len(jlm.q_local.M_p[i])] = jlm.q_local.M_p[i][:] / jlm.particles[i].nbe + Mp[i, 0:len(jlm.q_local.M_p[i])] = (jlm.q_local.M_p[i][:] / + jlm.particles[i].nbe) + mass_flux = np.sum(Mp[i, :] * jlm.particles[i].nb0) density = jlm.particles[i].rho_p + radius = (jlm.particles[i].diameter(Mp[i, 0:len(jlm.particles[i].m)], Tp, - jlm.q_local.Pa, jlm.q_local.S, jlm.q_local.T)) / 2. - position = np.array([jlm.particles[i].x, jlm.particles[i].y, jlm.particles[i].z]) + jlm.q_local.Pa, + jlm.q_local.S, + jlm.q_local.T)) / 2. + + position = np.array([jlm.particles[i].x, + jlm.particles[i].y, + jlm.particles[i].z]) + # Calculate the equlibrium and get the particle phase Eq_parti = dbm.FluidMixture(composition=jlm.particles[i].composition[:], user_data=data) + # Get the particle equilibrium at the plume termination conditions print 'Insitu' - flag_phase_insitu = self.get_phase(jlm.profile, Eq_parti, Mp[i, :]/np.sum(Mp[i, :]), Tp, jlm.particles[i].z) + flag_phase_insitu = self.get_phase(jlm.profile, + Eq_parti, + Mp[i, :] / np.sum(Mp[i, :]), + Tp, + jlm.particles[i].z) + # Get the particle equilibrium at the 15 C and 1 atm print 'Surface' - flag_phase_surface = self.get_phase(jlm.profile, Eq_parti, Mp[i, :]/np.sum(Mp[i, :]), 273.15 + 15. , 0.) - gnome_particles.append(TamocDroplet(mass_flux, radius, density, position)) + flag_phase_surface = self.get_phase(jlm.profile, + Eq_parti, + Mp[i, :] / np.sum(Mp[i, :]), + 273.15 + 15., + 0.) + gnome_particles.append(TamocDroplet(mass_flux, radius, density, + position)) for p in gnome_particles: print p + m_tot_diss = 0. + # Calculate the dissolved particle flux for j in range(len(jlm.chem_names)): - diss_mass_flux = jlm.q_local.c_chems[j] * np.pi * jlm.q_local.b**2 * jlm.q_local.V + diss_mass_flux = (jlm.q_local.c_chems[j] * + np.pi * + jlm.q_local.b ** 2 * + jlm.q_local.V) m_tot_diss += diss_mass_flux -# print diss_mass_flux + position = np.array([jlm.q_local.x, jlm.q_local.y, jlm.q_local.z]) -# print position chem_name = jlm.q_local.chem_names[j] -# print chem_name - gnome_diss_components.append(TamocDissMasses(diss_mass_flux, position,chem_name)) - - print 'total dissolved mass flux at plume termination' ,m_tot_diss - print 'total non ddissolved mass flux at plume termination', m_tot_nondiss - print 'total mass flux tracked at plume termination',m_tot_diss+m_tot_nondiss - print 'total mass flux released at the orifice',np.sum(md_gas)+ np.sum(md_oil) - print 'perccentsge_error', (np.sum(md_gas)+ np.sum(md_oil)-m_tot_diss-m_tot_nondiss)/(np.sum(md_gas)+ np.sum(md_oil))*100. + gnome_diss_components.append(TamocDissMasses(diss_mass_flux, + position, + chem_name)) + + print ('total dissolved mass flux at plume termination {}\n' + 'total non-dissolved mass flux at plume termination {}\n' + 'total mass flux tracked at plume termination {}\n' + 'total mass flux released at the orifice {}\n' + 'percentage_error {}' + .format(m_tot_diss, + m_tot_nondiss, + m_tot_diss + m_tot_nondiss, + np.sum(md_gas) + np.sum(md_oil), + ((np.sum(md_gas) + np.sum(md_oil) - + m_tot_diss - m_tot_nondiss) / + (np.sum(md_gas) + np.sum(md_oil)) * 100.))) # Now, we will generate the GNOME properties for a weatherable particle # For now, computed at the release location: # The pressure at release: - P0 = profile.get_values(z0,['pressure']) - (K_ow, json_oil) = self.translate_properties_gnome_to_tamoc(md_oil, composition, oil, P0, S0, T=288.15) + P0 = profile.get_values(z0, ['pressure']) + K_ow, json_oil = self.translate_properties_gnome_to_tamoc(md_oil, + composition, + oil, + P0, S0, + T=288.15) return gnome_particles, gnome_diss_components + def __repr__(self): - return ('{0.__class__.__module__}.{0.__class__.__name__}()'.format(self)) + return ('{0.__class__.__module__}.{0.__class__.__name__}()' + .format(self)) def _get_mass_distribution(self, mass_fluxes, time_step): ts = time_step @@ -519,10 +481,11 @@ def _check_units(self, units): return True else: msg = ('Units for amount spilled must be in volume or mass units. ' - 'Valid units for volume: {0}, for mass: {1} ').format( - self.valid_vol_units, self.valid_mass_units) + 'Valid units for volume: {0}, for mass: {1}' + .format(self.valid_vol_units, self.valid_mass_units)) ex = uc.InvalidUnitError(msg) self.logger.exception(ex, exc_info=True) + raise ex # this should be raised since run will fail otherwise # what is this for?? @@ -572,7 +535,8 @@ def rewind(self): """ self.num_released = 0 self.amount_released = 0 - # don't want to run tamoc on every rewind! self.droplets = self.run_tamoc() + + # don't want to run tamoc on every rewind! self.last_tamoc_time = None def num_elements_to_release(self, current_time, time_step): @@ -594,10 +558,11 @@ def num_elements_to_release(self, current_time, time_step): if not self.on: return 0 - if current_time < self.release_time or current_time > self.end_release_time: + if (current_time < self.release_time or + current_time > self.end_release_time): return 0 - self.droplets= self.run_tamoc(current_time, time_step) + self.droplets = self.run_tamoc(current_time, time_step) duration = (self.end_release_time - self.release_time).total_seconds() if duration is 0: @@ -640,27 +605,36 @@ def set_newparticle_values(self, num_new_particles, current_time, mass_fluxes = [tam_drop.mass_flux for tam_drop in self.droplets] delta_masses, proportions, total_mass = self._get_mass_distribution(mass_fluxes, time_step) - # set up LE distribution, the number of particles in each 'release point' + # set up LE distribution, + # the number of particles in each 'release point' LE_distribution = [int(num_new_particles * p) for p in proportions] diff = num_new_particles - sum(LE_distribution) for i in range(0, diff): LE_distribution[i % len(LE_distribution)] += 1 # compute release point location for each droplet - positions = [self.start_position + FlatEarthProjection.meters_to_lonlat(d.position, self.start_position) for d in self.droplets] + positions = [self.start_position + + FlatEarthProjection.meters_to_lonlat(d.position, + self.start_position) + for d in self.droplets] + for p in positions: p[0][2] -= self.start_position[2] - # for each release location, set the position and mass of the elements released at that location + # for each release location, set the position and mass + # of the elements released at that location total_rel = 0 - for mass_dist, n_LEs, pos, droplet in zip(delta_masses, LE_distribution, positions, self.droplets): + for mass_dist, n_LEs, pos, droplet in zip(delta_masses, + LE_distribution, + positions, self.droplets): start_idx = -num_new_particles + total_rel + if start_idx == 0: break end_idx = start_idx + n_LEs if end_idx == 0: end_idx = None -# print '{0} to {1}'.format(start_idx, end_idx) + if start_idx == end_idx: continue @@ -668,7 +642,11 @@ def set_newparticle_values(self, num_new_particles, current_time, data_arrays['mass'][start_idx:end_idx] = mass_dist / n_LEs data_arrays['init_mass'][start_idx:end_idx] = mass_dist / n_LEs data_arrays['density'][start_idx:end_idx] = droplet.density - data_arrays['droplet_diameter'][start_idx:end_idx] = np.random.normal(droplet.radius * 2, droplet.radius * 0.15, (n_LEs)) + data_arrays['droplet_diameter'][start_idx:end_idx] = \ + np.random.normal(droplet.radius * 2, + droplet.radius * 0.15, + (n_LEs)) + v = data_arrays['rise_vel'][start_idx:end_idx] rise_velocity_from_drop_size(v, data_arrays['density'][start_idx:end_idx], @@ -680,28 +658,6 @@ def set_newparticle_values(self, num_new_particles, current_time, self.num_released += num_new_particles self.amount_released += total_mass - # def get(self, prop=None): - # print "in get:", prop - # try: - # return getattr(self, prop) - # except AttributeError: - # super(TamocSpill, self).get(prop) - - # if self.element_type is not None: - # self.element_type.set_newparticle_values(num_new_particles, self, - # data_arrays) - - # self.release.set_newparticle_positions(num_new_particles, current_time, - # time_step, data_arrays) - - # data_arrays['mass'][-num_new_particles:] = \ - # self._elem_mass(num_new_particles, current_time, time_step) - - # # set arrays that are spill specific - 'frac_coverage' - # if 'frac_coverage' in data_arrays: - # data_arrays['frac_coverage'][-num_new_particles:] = \ - # self.frac_coverage - def get_profile(self, nc_name, fname, u_a, v_a, w_a, depths): """ Read in the ambient CTD data @@ -741,8 +697,8 @@ def get_profile(self, nc_name, fname, u_a, v_a, w_a, depths): p_lat = 0. p_lon = 0. p_time = date2num(datetime(1998, 1, 1, 1, 0, 0), - units='seconds since 1970-01-01 00:00:00 0:00', - calendar='julian') + units='seconds since 1970-01-01 00:00:00 0:00', + calendar='julian') nc = ambient.create_nc_db(nc_name, summary, source, sea_name, p_lat, p_lon, p_time) @@ -753,14 +709,16 @@ def get_profile(self, nc_name, fname, u_a, v_a, w_a, depths): # Compute the pressure and insert into the netCDF dataset P = ambient.compute_pressure(data[:, 0], data[:, 1], data[:, 2], 0) P_data = np.vstack((data[:, 0], P)).transpose() - nc = ambient.fill_nc_db(nc, P_data, ['z', 'pressure'], ['m', 'Pa'], - ['average', 'computed'], 0) + nc = ambient.fill_nc_db(nc, P_data, + ['z', 'pressure'], + ['m', 'Pa'], + ['average', 'computed'], 0) # Create an ambient.Profile object from this dataset profile = ambient.Profile(nc, chem_names='all') # Force the max depth to model -# depths[-1] = profile.z_max + # depths[-1] = profile.z_max # Add the crossflow velocity @@ -770,10 +728,12 @@ def get_profile(self, nc_name, fname, u_a, v_a, w_a, depths): u_crossflow = np.zeros((len(depths), 2)) u_crossflow[:, 0] = depths + if u_a.shape != depths.shape: u_crossflow[:, 1] = np.linspace(u_a[0], u_a[-1], len(depths)) else: u_crossflow[:, 1] = u_a + symbols = ['z', 'ua'] units = ['m', 'm/s'] comments = ['provided', 'provided'] @@ -781,10 +741,12 @@ def get_profile(self, nc_name, fname, u_a, v_a, w_a, depths): v_crossflow = np.zeros((len(depths), 2)) v_crossflow[:, 0] = depths + if v_a.shape != depths.shape: v_crossflow[:, 1] = np.linspace(v_a[0], v_a[-1], len(depths)) else: v_crossflow[:, 1] = v_a + symbols = ['z', 'va'] units = ['m', 'm/s'] comments = ['provided', 'provided'] @@ -792,10 +754,12 @@ def get_profile(self, nc_name, fname, u_a, v_a, w_a, depths): w_crossflow = np.zeros((len(depths), 2)) w_crossflow[:, 0] = depths + if w_a.shape != depths.shape: w_crossflow[:, 1] = np.linspace(w_a[0], w_a[-1], len(depths)) else: w_crossflow[:, 1] = w_a + symbols = ['z', 'wa'] units = ['m', 'm/s'] comments = ['provided', 'provided'] @@ -814,7 +778,6 @@ def get_composition(self, fname): with open(fname) as datfile: for line in datfile: - # Get a line of data entries = line.strip().split(',') print entries @@ -868,9 +831,11 @@ def release_flux(self, oil, mass_frac, profile, T0, z0, Q): # Return the total mass flux of gas and oil at the release return (md_gas, md_oil) - - def get_particles(self, composition, data, md_gas0, md_oil0, profile, d50_gas, d50_oil, nbins, - T0, z0, dispersant, sigma_fac, oil, mass_frac, hydrate, inert_drop): + def get_particles(self, composition, data, + md_gas0, md_oil0, profile, + d50_gas, d50_oil, nbins, + T0, z0, dispersant, sigma_fac, + oil, mass_frac, hydrate, inert_drop): """ docstring for get_particles @@ -883,11 +848,14 @@ def get_particles(self, composition, data, md_gas0, md_oil0, profile, d50_gas, d sigma = np.array([[1.], [1.]]) # Create DBM objects for the bubbles and droplets - bubl = dbm.FluidParticle(composition, fp_type=0, sigma_correction=sigma[0], user_data=data) - drop = dbm.FluidParticle(composition, fp_type=1, sigma_correction=sigma[1], user_data=data) + bubl = dbm.FluidParticle(composition, fp_type=0, + sigma_correction=sigma[0], user_data=data) + drop = dbm.FluidParticle(composition, fp_type=1, + sigma_correction=sigma[1], user_data=data) # Get the local ocean conditions - T, S, P = profile.get_values(z0, ['temperature', 'salinity', 'pressure']) + T, S, P = profile.get_values(z0, + ['temperature', 'salinity', 'pressure']) rho = seawater.density(T, S, P) # Get the mole fractions of the released fluids @@ -898,12 +866,20 @@ def get_particles(self, composition, data, md_gas0, md_oil0, profile, d50_gas, d # Use the Rosin-Rammler distribution to get the mass flux in each # size class -# de_gas, md_gas = sintef.rosin_rammler(nbins, d50_gas, np.sum(md_gas0), -# bubl.interface_tension(md_gas0, T0, S, P), -# bubl.density(md_gas0, T0, P), rho) -# de_oil, md_oil = sintef.rosin_rammler(nbins, d50_oil, np.sum(md_oil0), -# drop.interface_tension(md_oil0, T0, S, P), -# drop.density(md_oil0, T0, P), rho) + # de_gas, md_gas = sintef.rosin_rammler(nbins, d50_gas, + # np.sum(md_gas0), + # bubl.interface_tension(md_gas0, + # T0, + # S, P), + # bubl.density(md_gas0, T0, P), + # rho) + # de_oil, md_oil = sintef.rosin_rammler(nbins, d50_oil, + # np.sum(md_oil0), + # drop.interface_tension(md_oil0, + # T0, + # S, P), + # drop.density(md_oil0, T0, P), + # rho) # Get the user defined particle size distibution de_oil, vf_oil, de_gas, vf_gas = self.userdefined_de() @@ -916,8 +892,9 @@ def get_particles(self, composition, data, md_gas0, md_oil0, profile, d50_gas, d isfluid = True iscompressible = True rho_o = drop.density(md_oil0, T0, P) - inert = dbm.InsolubleParticle(isfluid, iscompressible, rho_p=rho_o, gamma=40., - beta=0.0007, co=2.90075e-9) + inert = dbm.InsolubleParticle(isfluid, iscompressible, + rho_p=rho_o, gamma=40., beta=0.0007, + co=2.90075e-9) # Create the particle objects particles = [] @@ -926,42 +903,58 @@ def get_particles(self, composition, data, md_gas0, md_oil0, profile, d50_gas, d # Bubbles for i in range(nbins): if md_gas[i] > 0.: - (m0, T0, nb0, P, Sa, Ta) = dispersed_phases.initial_conditions( - profile, z0, bubl, molf_gas, md_gas[i], 2, de_gas[i], T0) + m0, T0, nb0, P, Sa, Ta = dispersed_phases.initial_conditions( + profile, z0, bubl, molf_gas, md_gas[i], 2, de_gas[i], T0) # Get the hydrate formation time for bubbles if hydrate is True and dispersant is False: - t_hyd = dispersed_phases.hydrate_formation_time(bubl, z0, m0, T0, profile) + t_hyd = dispersed_phases.hydrate_formation_time(bubl, + z0, m0, T0, + profile) if np.isinf(t_hyd): t_hyd = 0. else: t_hyd = 0. - particles.append(bpm.Particle(0., 0., z0, bubl, m0, T0, nb0, - 1.0, P, Sa, Ta, K=1., K_T=1., fdis=1.e-6, t_hyd=t_hyd)) + + particles.append(bpm.Particle(0., 0., z0, bubl, + m0, T0, nb0, + 1.0, P, Sa, Ta, + K=1., K_T=1., fdis=1.e-6, + t_hyd=t_hyd)) # Droplets for i in range(len(de_oil)): # Add the live droplets to the particle list if md_oil[i] > 0. and not inert_drop: - (m0, T0, nb0, P, Sa, Ta) = dispersed_phases.initial_conditions( - profile, z0, drop, molf_oil, md_oil[i], 2, de_oil[i], T0) + m0, T0, nb0, P, Sa, Ta = dispersed_phases.initial_conditions( + profile, z0, drop, molf_oil, md_oil[i], 2, de_oil[i], T0) # Get the hydrate formation time for bubbles if hydrate is True and dispersant is False: - t_hyd = dispersed_phases.hydrate_formation_time(drop, z0, m0, T0, profile) + t_hyd = dispersed_phases.hydrate_formation_time(drop, + z0, m0, T0, + profile) if np.isinf(t_hyd): t_hyd = 0. else: t_hyd = 0. - particles.append(bpm.Particle(0., 0., z0, drop, m0, T0, nb0, - 1.0, P, Sa, Ta, K=1., K_T=1., fdis=1.e-6, t_hyd=t_hyd)) + + particles.append(bpm.Particle(0., 0., z0, drop, + m0, T0, nb0, 1.0, P, Sa, Ta, + K=1., K_T=1., fdis=1.e-6, + t_hyd=t_hyd)) + # Add the inert droplets to the particle list if md_oil[i] > 0. and inert_drop is True: - (m0, T0, nb0, P, Sa, Ta) = dispersed_phases.initial_conditions( - profile, z0, inert, molf_oil, md_oil[i], 2, de_oil[i], T0) - particles.append(bpm.Particle(0., 0., z0, inert, m0, T0, nb0, - 1.0, P, Sa, Ta, K=1., K_T=1., fdis=1.e-6, t_hyd=0.)) + m0, T0, nb0, P, Sa, Ta = dispersed_phases.initial_conditions( + profile, z0, inert, molf_oil, md_oil[i], 2, de_oil[i], T0) + + particles.append(bpm.Particle(0., 0., z0, inert, + m0, T0, nb0, 1.0, P, Sa, Ta, + K=1., K_T=1., fdis=1.e-6, + t_hyd=0.)) # Define the lambda for particles model = params.Scales(profile, particles) + for j in range(len(particles)): particles[j].lambda_1 = model.lambda_1(z0, j) @@ -990,13 +983,15 @@ def userdefined_de(self): vf_oil = np.zeros([100, 1]) vf_gas = np.zeros([100, 1]) - de_oil = de_details[:, 0] / 1000. de_gas = de_details[:, 2] / 1000. vf_oil = de_details[:, 1] vf_gas = de_details[:, 3] - return (de_oil[de_oil > 0.], vf_oil[vf_oil > 0.], de_gas[de_gas > 0.], vf_gas[vf_gas > 0.]) + return (de_oil[de_oil > 0.], + vf_oil[vf_oil > 0.], + de_gas[de_gas > 0.], + vf_gas[vf_gas > 0.]) def get_phase(self, profile, particle, Mp, T, z): """ @@ -1011,20 +1006,19 @@ def get_phase(self, profile, particle, Mp, T, z): # Get the equilibrium composition m0, xi, K = particle.equilibrium(Mp, T, P) - print 'liquid fraction' , np.sum(m0[1,:]) - print 'gas fraction', np.sum(m0[0,:]) + print 'liquid fraction', np.sum(m0[1, :]) + print 'gas fraction', np.sum(m0[0, :]) - if np.sum(m0[1,:]) == 1.0: + if np.sum(m0[1, :]) == 1.0: print ' Particle is complete liquid' flag_phase = 'Liquid' - elif np.sum(m0[0,:]) == 1.0: + elif np.sum(m0[0, :]) == 1.0: print 'particle is complete gas' flag_phase = 'Gas' else: print 'particle is a mixture of gas and liquid' flag_phase = 'Mixture' - return (flag_phase) def estimate_binary_interaction_parameters(self, oil): @@ -1054,15 +1048,17 @@ def estimate_binary_interaction_parameters(self, oil): ''' # Initialize the matrix - delta = np.zeros((len(oil.M),len(oil.M))) + delta = np.zeros((len(oil.M), len(oil.M))) # Populate the matrix with the estimates: for yy in range(len(oil.M)): for tt in range(len(oil.M)): - if not (tt==yy): - delta[yy,tt] = 0.00145*np.max( (oil.M[tt]/oil.M[yy],oil.M[yy]/oil.M[tt]) ) + if not (tt == yy): + delta[yy, tt] = 0.00145 * np.max((oil.M[tt]/oil.M[yy], + oil.M[yy]/oil.M[tt])) + return delta - def load_delta(self,file_name, nc): + def load_delta(self, file_name, nc): """ Loads the binary interaction parameters. @@ -1078,7 +1074,7 @@ def load_delta(self,file_name, nc): delta : ndarray, size (nc,nc) a matrix containing the loaded binary interaction parameters """ - delta = np.zeros([nc,nc]) + delta = np.zeros([nc, nc]) k = 0 with open(file_name, 'r') as datfile: for row in datfile: @@ -1089,7 +1085,8 @@ def load_delta(self,file_name, nc): return (delta) - def translate_properties_gnome_to_tamoc(self, md_oil, composition, oil, P, Sa, T=288.15): + def translate_properties_gnome_to_tamoc(self, md_oil, composition, oil, + P, Sa, T=288.15): ''' Translates properties from TAMOC components to GNOME components. @@ -1137,65 +1134,86 @@ def translate_properties_gnome_to_tamoc(self, md_oil, composition, oil, P, Sa, T # Let's get the partial densities in liquid for each component: # (Initialize the array:) densities = np.zeros(len(composition)) + # We will compute component densities at 288.15 K_T, except if the # user has input a lower T. A higher T is not allowed. # (In deep waters, droplets should cool very fast, it is not a # reasonable assumption to compute at a high T.) T_rho = np.min([288.15, T]) + # Check that we have no gas phase at this conditions: m_, xi, K = oil.equilibrium(md_oil, T_rho, P) - if np.sum(m_,1)[0]>0.: + + if np.sum(m_, 1)[0] > 0.: # The mixture would separate in a gas and a liquid phase at # equilibrium. Let's use the composition of the liquid phase: md_oil = m_[1] + # density of the bulk oil at release conditions: rho_0 = oil.density(md_oil, T_rho, P)[1] + # Now, we will remove/add a little mass of a component, and get its # partial density as the ratio of the change of mass divided by # change of oil volume. - for ii in range(len(densities)): # (We do a loop over each component) + for ii in range(len(densities)): # (We do a loop over each component) # We will either remove 1% or add 1% mass (and we choose the one # that keeps the mixture as a liquid): - add_or_remove = np.array([.99,1.01]) + add_or_remove = np.array([.99, 1.01]) for tt in range(len(add_or_remove)): # Factor used to remove/add mass of just component i: m_multiplication_factors = np.ones(len(densities)) + # We remove or add 1% of the mass of component i: m_multiplication_factors[ii] = add_or_remove[tt] m_i = md_oil * m_multiplication_factors - # Make an equilibrium calculation to check that we did not generate a gas phase: + + # Make an equilibrium calculation to check that we did not + # generate a gas phase: m_ii, xi, K = oil.equilibrium(m_i, T_rho, P) print T_rho, P + # If we did not generate a gas phase, stop here. Else we will # do the for loop a second time using the second value in # 'add_or_remove' - if np.sum(m_ii,1)[0]==0.: - + if np.sum(m_ii, 1)[0] == 0.: break + # We compute the density of the new mixture: rho_i = oil.density(m_i, T_rho, P)[1] # we get the partial density of each component as: # (DELTA(Mass) / DELTA(Volume)): - densities[ii] = (np.sum(md_oil) - np.sum(m_i)) / (np.sum(md_oil)/rho_0 - np.sum(m_i)/rho_i) + densities[ii] = ((np.sum(md_oil) - np.sum(m_i)) / + (np.sum(md_oil) / rho_0 - np.sum(m_i) / rho_i)) + + print ('TAMOC density: {} ' + 'and estimated from component densities: {}' + .format(rho_0, + (np.sum(md_oil) / np.sum(md_oil / densities)))) - print 'TAMOC density: ',rho_0,' and estimated from component densities: ',(np.sum(md_oil)/np.sum(md_oil/densities)) # Note: the (np.sum(md_oil)/np.sum(md_oil/densities)) makes sense # physically: density = SUM(MASSES) / SUM(VOLUMES) (Assuming volume # of mixing is zero, which is a very good assumption for petroleum # liquids) - print 'However GNOME would somehow estimate the density as m_i * rho_i: ',np.sum(md_oil*densities/np.sum(md_oil)) # This is the GNOME-way, though less physically-grounded. - print 'densities: ',densities + # This is the GNOME-way, though less physically-grounded. + print ('However GNOME would somehow estimate the density as ' + 'm_i * rho_i: {}' + .format(np.sum(md_oil * densities / np.sum(md_oil)))) + print 'densities: ', densities + # Normalize densities so that the GNOME-way to compute density gives # the TAMOC density for the whole oil: - densities = densities * rho_0 / (np.sum(md_oil*densities/np.sum(md_oil))) - print 'GNOME value after normalizing densities: ',np.sum(md_oil*densities/np.sum(md_oil)) + densities = (densities * + rho_0 / + np.sum(md_oil * densities / np.sum(md_oil))) + print ('GNOME value after normalizing densities: {}' + .format(np.sum(md_oil * densities / np.sum(md_oil)))) print composition - print 'densities: ',densities - print 'MW: ',oil.M - print 'Tb: ',oil.Tb - print 'delta: ',oil.delta + print 'densities: ', densities + print 'MW: ', oil.M + print 'Tb: ', oil.Tb + print 'delta: ', oil.delta # Now oil properties: oil_viscosity = oil.viscosity(md_oil, T_rho, P)[1] @@ -1205,9 +1223,10 @@ def translate_properties_gnome_to_tamoc(self, md_oil, composition, oil, P, Sa, T # Compute the oil-water partition coefficients, K_ow: C_oil = md_oil / (np.sum(md_oil) / oil.density(md_oil, T_rho, P)[1]) C_water = oil.solubility(md_oil, T, P, Sa)[1] + K_ow = C_oil / C_water - print 'K_ow :' - print K_ow + print 'K_ow : {}'.format(K_ow) + # Below, we will assume that any component having a K_ow that is not # inf is a 'Aromatics' (it may not be a component corresponding to # aromatics compounds. But it contains soluble compounds. Labeling it @@ -1218,34 +1237,47 @@ def translate_properties_gnome_to_tamoc(self, md_oil, composition, oil, P, Sa, T # We need to create a list of dictionaries containing the molecular # weights: molecular_weights_dict_list = [] + for i in range(len(oil.M)): # This is the dictionary for the current component: current_dict = dict() + # Populate the keys of the dictionary with corresponding values: if not np.isinf(K_ow[i]): current_dict['sara_type'] = 'Aromatics' else: - current_dict['sara_type'] = 'Saturatess' - current_dict['g_mol'] = oil.M[i] * 1000. # BEWARE: GNOME wants g/mol and TAMOC has kg/mol. + current_dict['sara_type'] = 'Saturatess' + + # BEWARE: GNOME wants g/mol and TAMOC has kg/mol. + current_dict['g_mol'] = oil.M[i] * 1000. current_dict['ref_temp_k'] = oil.Tb[i] + # append each dictionary to the list of dictionarries: molecular_weights_dict_list.append(current_dict) + json_object['molecular_weights'] = molecular_weights_dict_list + # Now do the same for the cuts: cuts_dict_list = [] + for i in range(len(oil.M)): # This is the dictionary for the current component: current_dict = dict() + # Populate the keys of the dictionary with corresponding values: current_dict['vapor_temp_k'] = oil.Tb[i] current_dict['fraction'] = md_oil[i] + # append each dictionary to the list of dictionarries: cuts_dict_list.append(current_dict) + json_object['cuts'] = cuts_dict_list json_object['oil_seawater_interfacial_tension_ref_temp_k'] = T_rho json_object['oil_seawater_interfacial_tension_n_m'] = oil_interface_tension[0] + # Now do the same for the densities: densities_dict_list = [] + for i in range(len(oil.M)): # This is the dictionary for the current component: current_dict = dict() @@ -1254,77 +1286,98 @@ def translate_properties_gnome_to_tamoc(self, md_oil, composition, oil, P, Sa, T if not np.isinf(K_ow[i]): current_dict['sara_type'] = 'Aromatics' else: - current_dict['sara_type'] = 'Saturatess' + current_dict['sara_type'] = 'Saturatess' + current_dict['ref_temp_k'] = oil.Tb[i] # append each dictionary to the list of dictionarries: densities_dict_list.append(current_dict) + json_object['sara_densities'] = densities_dict_list + # This one is for the density of the oil as a whole: oil_density_dict = dict() - oil_density_dict['ref_temp_k'] = T_rho # a priori 288.15 + oil_density_dict['ref_temp_k'] = T_rho # a priori 288.15 oil_density_dict['kg_m_3'] = oil_density[0] oil_density_dict['weathering'] = 0. json_object['densities'] = [oil_density_dict] # This one is for the viscosity of the oil as a whole: - oil_viscosity_dict = dict() # Note: 'dvis' in GNOME is the dynamic viscosity called 'viscosity' in TAMOC - oil_viscosity_dict['ref_temp_k'] = T_rho # a priori 288.15 + # Note: 'dvis' in GNOME is the dynamic viscosity + # called 'viscosity' in TAMOC + oil_viscosity_dict = dict() + oil_viscosity_dict['ref_temp_k'] = T_rho # a priori 288.15 oil_viscosity_dict['kg_ms'] = oil_viscosity[0] oil_viscosity_dict['weathering'] = 0. + json_object['dvis'] = [oil_viscosity_dict] json_object['name'] = 'test TAMOC oil' + # Now do the same for the sara dractions: SARA_dict_list = [] + for i in range(len(oil.M)): # This is the dictionary for the current component: current_dict = dict() + # Populate the keys of the dictionary with corresponding values: if not np.isinf(K_ow[i]): current_dict['sara_type'] = 'Aromatics' else: - current_dict['sara_type'] = 'Saturatess' + current_dict['sara_type'] = 'Saturatess' + current_dict['ref_temp_k'] = oil.Tb[i] current_dict['fraction'] = md_oil[i] + # append each dictionary to the list of dictionarries: SARA_dict_list.append(current_dict) + json_object['sara_fractions'] = SARA_dict_list + # print json_object + from oil_library.models import Oil - #print json_object json_oil = Oil.from_json(json_object) + print json_oil.densities - #print json_oil.dvis # Hum. Oil has no attribute 'dvis', but 'kvis' is empty. Is that a bug? - print 'interfacial tension: ', json_oil.oil_seawater_interfacial_tension_n_m, oil_interface_tension + + # print json_oil.dvis + # Hum. Oil has no attribute 'dvis', but 'kvis' is empty. Is that a bug? + print ('interfacial tension: ({}, {})' + .format(json_oil.oil_seawater_interfacial_tension_n_m, + oil_interface_tension)) print json_oil.molecular_weights print json_oil.sara_fractions print json_oil.cuts print json_oil.densities - # # # TO ELUCIDATE: IS IT NORMAL THAT THE FIELDS OF json_oil ARE NOT - # # # THE SAME AS WHEN AN OIL IS IMPORTED FROM THE OIL DATABASE USING get_oil?? - - # # I CANNOT DO THIS BELOW, THIS IS ONLY FOR OILS IN THE DATABASE: - #from oil_library import get_oil, get_oil_props - #uuu = get_oil_props(json_oil.name) - #print 'oil density from our new created substance: ',np.sum(uuu.mass_fraction * uuu.component_density), ' or same: ',uuu.density_at_temp() - #print 'component densities: ',uuu.component_density - #print 'component mass fractions: ',uuu.mass_fraction - #print 'component molecular weights: ',uuu.molecular_weight - #print 'component boiling points: ',uuu.boiling_point - #print 'API: ',uuu.api - #print 'KINEMATIC viscosity: ',uuu.kvis_at_temp() - - - -# oil = dbm.FluidMixture(['benzene','toluene','ethylbenzene']) # tested the K_ow with benzene and toluene and ethylbenzene -# md_oil = np.array([1.,1.,1.]) -# C_oil = md_oil / (np.sum(md_oil) / oil.density(md_oil, T_rho, P)[1]) -# C_water = oil.solubility(md_oil, T_rho, P, Sa)[1] -# K_ow = C_oil / C_water -# from gnome.utilities.weathering import BanerjeeHuibers -# K_ow2 = BanerjeeHuibers.partition_coeff(oil.M*1000., oil.density(md_oil, T_rho, P)[1]) -# print 'K_ow :' -# print K_ow -# print K_ow2 + # TO ELUCIDATE: IS IT NORMAL THAT THE FIELDS OF json_oil ARE NOT + # THE SAME AS WHEN AN OIL IS IMPORTED FROM THE + # OIL DATABASE USING get_oil?? + + # NOTE: I CANNOT DO THIS BELOW, THIS IS ONLY FOR OILS IN THE DATABASE. + # from oil_library import get_oil, get_oil_props + # uuu = get_oil_props(json_oil.name) + # print ('oil density from our new created substance: {} or same: {}' + # .format(np.sum(uuu.mass_fraction * uuu.component_density), + # uuu.density_at_temp())) + # print 'component densities: ',uuu.component_density + # print 'component mass fractions: ',uuu.mass_fraction + # print 'component molecular weights: ',uuu.molecular_weight + # print 'component boiling points: ',uuu.boiling_point + # print 'API: ',uuu.api + # print 'KINEMATIC viscosity: ',uuu.kvis_at_temp() + + # tested the K_ow with benzene and toluene and ethylbenzene + # oil = dbm.FluidMixture(['benzene','toluene','ethylbenzene']) + # md_oil = np.array([1.,1.,1.]) + # C_oil = md_oil / (np.sum(md_oil) / oil.density(md_oil, T_rho, P)[1]) + # C_water = oil.solubility(md_oil, T_rho, P, Sa)[1] + # K_ow = C_oil / C_water + # from gnome.utilities.weathering import BanerjeeHuibers + # K_ow2 = BanerjeeHuibers.partition_coeff(oil.M * 1000., + # oil.density(md_oil, + # T_rho, P)[1]) + # print 'K_ow :' + # print K_ow + # print K_ow2 return (K_ow, json_oil) - diff --git a/py_gnome/gnome/utilities/projections.py b/py_gnome/gnome/utilities/projections.py index 51403b330..1f15adeb4 100644 --- a/py_gnome/gnome/utilities/projections.py +++ b/py_gnome/gnome/utilities/projections.py @@ -310,7 +310,7 @@ def to_lonlat(self, coords): """ coords = np.asarray(coords) - if np.issubdtype(coords.dtype, int): + if np.issubdtype(coords.dtype, np.integer): # convert to float64: coords = coords.astype(np.float64) @@ -679,7 +679,7 @@ def to_lonlat(self, coords): """ coords = to_2d_coords(coords) - if np.issubdtype(coords.dtype, int): + if np.issubdtype(coords.dtype, np.integer): # convert to float64: coords = coords.astype(np.float64) diff --git a/py_gnome/gnome/utilities/serializable.py b/py_gnome/gnome/utilities/serializable.py index 71945f5dd..e90ae69b3 100644 --- a/py_gnome/gnome/utilities/serializable.py +++ b/py_gnome/gnome/utilities/serializable.py @@ -577,28 +577,38 @@ def to_dict(self): NOTE: add the json_='webapi' key to be serialized so we know what the serialization is for """ - + data = {} list_ = self._state.get_names('all') - data = {} for key in list_: value = self.attr_to_dict(key) + if hasattr(value, 'to_dict'): value = value.to_dict() # recursive call - elif (key in [f.name for f in self._state.get_field_by_attribute('iscollection')]): - #if self.key is a list, this needs special attention. It does - #not have a to_dict like OrderedCollection does! + elif (key in [f.name for f in + self._state.get_field_by_attribute('iscollection')]): + # if self.key is a list, this needs special attention. It does + # not have a to_dict like OrderedCollection does! vals = [] + for obj in value: - try: - obj_type = '{0.__module__}.{0.__class__.__name__}'.format(obj) - except AttributeError: - obj_type = '{0.__class__.__name__}'.format(obj) - _id=None + if isinstance(obj, dict) and 'obj_type' in obj: + obj_type = obj['obj_type'] + else: + try: + obj_type = ('{0.__module__}.{0.__class__.__name__}' + .format(obj)) + except AttributeError: + obj_type = '{0.__class__.__name__}'.format(obj) + + _id = None if hasattr(obj, 'id'): - _id= str(obj.id) + _id = str(obj.id) + elif 'id' in obj: + _id = obj['id'] else: - _id= str(id(obj)) + _id = str(id(obj)) + val = {'obj_type': obj_type, 'id': _id} vals.append(val) @@ -850,6 +860,7 @@ def to_serialize(self, json_='webapi'): If json_='webapi', it subselects Fields with (update=True, read=True) ''' dict_ = self.to_dict() + if json_ == 'webapi': attrlist = self._attrlist() elif json_ == 'save': diff --git a/py_gnome/gnome/utilities/timeseries.py b/py_gnome/gnome/utilities/timeseries.py index e115bc483..07b7787f2 100644 --- a/py_gnome/gnome/utilities/timeseries.py +++ b/py_gnome/gnome/utilities/timeseries.py @@ -21,7 +21,7 @@ class TimeseriesError(Exception): class Timeseries(GnomeId): - def __init__(self, timeseries=None, filename=None, format='uv'): + def __init__(self, timeseries=None, filename=None, coord_sys='uv'): """ Initializes a timeseries object from either a timeseries or datafile containing the timeseries. If both timeseries and file are given, @@ -31,19 +31,20 @@ def __init__(self, timeseries=None, filename=None, format='uv'): timeseries = np.zeros((1,), dtype=basic_types.datetime_value_2d) - If user provides timeseries, the default format is 'uv'. The C++ - stores the data in 'uv' format - transformations are done in this - Python code (set_timeseries(), get_timeseries()). + If user provides timeseries, the default coordinate system is 'uv'. + The C++ stores the data in 'uv' coordinates - transformations are done + in this Python code (set_timeseries(), get_timeseries()). - C++ code only transforms the data from 'r-theta' to 'uv' format if - data is read from file. And this happens during initialization because - C++ stores data in 'uv' format internally. + C++ code only transforms the data from 'r-theta' to 'uv' coordinates + if data is read from file. And this happens during initialization + because C++ stores data in 'uv' coordinates internally. Units option are not included - let derived classes manage units since the units for CyTimeseries (OSSMTimeValue_c) are limited. No unit conversion is performed when get_timeseries, set_timeseries is invoked. - It does, however convert between 'uv' and 'r-theta' depending on format - specified. Choose format='uv' if no transformation is desired. + It does, however convert between 'uv' and 'r-theta' depending on the + coordinate system specified. Choose coord_sys='uv' if no transformation + is desired. .. note:: For the Wind datafiles, the units will get read from the file. These are stored in ossm.user_units. It would be ideal to remove @@ -55,28 +56,30 @@ def __init__(self, timeseries=None, filename=None, format='uv'): :param timeseries: numpy array containing time_value_pair :type timeseries: numpy.ndarray containing - basic_types.datetime_value_2d or basic_types.datetime_value_1d. It - gets converted to an array containging basic_types.time_value_pair - datatype since that's what the C++ code expects + basic_types.datetime_value_2d or + basic_types.datetime_value_1d. + It gets converted to an array containing + basic_types.time_value_pair datatype since that's + what the C++ code expects :param filename: path to a timeseries file from which to read data. - Datafile must contain either a 3 line or a 5 line header with - following info: + Datafile must contain either a 3 line or a 5 line + header with following info: - 1. Station Name: name of the station as a string - 2. (long, lat, z): station location as tuple containing floats - 3. units: for wind this is knots, meteres per second - or miles per hour. For datafile containing something other than - velocity, this should be 'undefined' + 1. Station Name: name of the station as a string + 2. (long, lat, z): station location as tuple + containing floats + 3. units: for wind this is knots, meteres per second + or miles per hour. For datafile containing + something other than velocity, this should be + 'undefined' Optional parameters (kwargs): - :param format: (Optional) default timeseries format is - magnitude direction: 'r-theta' - :type format: string 'r-theta' or 'uv'. Default is 'r-theta'. + :param coord_sys: (Optional) default timeseries coordinate system is + magnitude direction: 'r-theta' + :type coord_sys: string 'r-theta' or 'uv'. Default is 'r-theta'. Converts string to integer defined by gnome.basic_types.ts_format.* - TODO: 'format' is a python builtin keyword. We should - not use it as an argument name """ if (timeseries is None and filename is None): timeseries = np.array([(sec_to_date(zero_time()), [0.0, 0.0])], @@ -89,11 +92,11 @@ def __init__(self, timeseries=None, filename=None, format='uv'): self._check_timeseries(timeseries) datetime_value_2d = self._xform_input_timeseries(timeseries) - time_value_pair = to_time_value_pair(datetime_value_2d, format) + time_value_pair = to_time_value_pair(datetime_value_2d, coord_sys) self.ossm = CyTimeseries(timeseries=time_value_pair) else: - ts_format = tsformat(format) + ts_format = tsformat(coord_sys) self.ossm = CyTimeseries(filename=self._filename, file_format=ts_format) @@ -175,19 +178,6 @@ def _timeseries_is_ascending(self, timeseries): else: return True - # not needed -- _timeseries_is_ascending should catch this - # def _timeseries_has_duplicates(self, timeseries): - # # we need to have a valid shape to sort - # if timeseries.shape == (): - # timeseries = np.asarray([timeseries], - # dtype=basic_types.datetime_value_2d) - - # unique = np.unique(timeseries['time']) - # if len(unique) != len(timeseries['time']): - # return True - # else: - # return False - def _xform_input_timeseries(self, timeseries): ''' Ensure input data is numpy array with correct dtype and check @@ -213,60 +203,59 @@ def __str__(self): def filename(self): return self._filename - def get_timeseries(self, datetime=None, format='uv'): + def get_timeseries(self, datetime=None, coord_sys='uv'): """ - Returns the timeseries in requested format. If datetime=None, - then the original timeseries that was entered is returned. + Returns the timeseries in requested coordinate system. + If datetime=None, then the original timeseries that was entered is + returned. If datetime is a list containing datetime objects, then the value for each of those date times is determined by the underlying C++ object and the timeseries is returned. - The output format is defined by the strings 'r-theta', 'uv' + The output coordinate system is defined by the strings 'r-theta', 'uv' :param datetime: [optional] datetime object or list of datetime objects for which the value is desired :type datetime: datetime object - :param format: output format for the times series: - either 'r-theta' or 'uv' - :type format: either string or integer value defined by - basic_types.ts_format.* (see cy_basic_types.pyx) + + :param coord_sys: output coordinate system for the times series: + either 'r-theta' or 'uv' + :type coord_sys: either string or integer value defined by + basic_types.ts_format.* (see cy_basic_types.pyx) :returns: numpy array containing dtype=basic_types.datetime_value_2d. Contains user specified datetime and the corresponding values in user specified ts_format """ if datetime is None: - datetimeval = to_datetime_value_2d(self.ossm.timeseries, format) + datetimeval = to_datetime_value_2d(self.ossm.timeseries, coord_sys) else: datetime = np.asarray(datetime, dtype='datetime64[s]').reshape(-1) timeval = np.zeros((len(datetime), ), dtype=basic_types.time_value_pair) timeval['time'] = date_to_sec(datetime) (timeval['value'], err) = self.ossm.get_time_value(timeval['time']) + if err != 0: - msg = ('No available data in the time interval ' - 'that is being modeled\n' + msg = ('No available data in the time interval that is being ' + 'modeled\n' '\tModel time: {}\n' '\tMover: {} of type {}\n' - #'\tData available from {} to {}' - #.format(model_time_datetime, - #self.name, self.__class__, - #self.real_data_start, self.real_data_stop)) - .format(datetime, - self.name, self.__class__)) - #self.real_data_start, self.real_data_stop)) + .format(datetime, self.name, self.__class__)) self.logger.error(msg) raise RuntimeError(msg) - datetimeval = to_datetime_value_2d(timeval, format) + + datetimeval = to_datetime_value_2d(timeval, coord_sys) return datetimeval - def set_timeseries(self, datetime_value_2d, format='uv'): + def set_timeseries(self, datetime_value_2d, coord_sys='uv'): """ Sets the timeseries to the new value given by a numpy array. The - format for the input data defaults to - basic_types.format.magnitude_direction but can be changed by the user + coordinate system for the input data defaults to + basic_types.format.magnitude_direction but can be changed by the user. + Assumes timeseries is valid so _check_timeseries has been invoked and any unit conversions are done. This function simply converts datetime_value_2d to time_value_pair and updates the data in underlying @@ -276,13 +265,14 @@ def set_timeseries(self, datetime_value_2d, format='uv'): numpy array :type datetime_value_2d: numpy array of dtype basic_types.datetime_value_2d - :param format: output format for the times series; as defined by - basic_types.format. - :type format: either string or integer value defined by - basic_types.format.* (see cy_basic_types.pyx) + + :param coord_sys: output coordinate system for the times series, + as defined by basic_types.ts_format. + :type coord_sys: either string or integer value defined by + basic_types.ts_format.* (see cy_basic_types.pyx) """ datetime_value_2d = self._xform_input_timeseries(datetime_value_2d) - timeval = to_time_value_pair(datetime_value_2d, format) + timeval = to_time_value_pair(datetime_value_2d, coord_sys) self.ossm.timeseries = timeval @@ -296,6 +286,7 @@ def __eq__(self, other): ''' self_ts = self.get_timeseries() other_ts = other.get_timeseries() + if not np.all(self_ts['time'] == other_ts['time']): return False diff --git a/py_gnome/gnome/weatherers/core.py b/py_gnome/gnome/weatherers/core.py index 04e9469de..2b4744033 100644 --- a/py_gnome/gnome/weatherers/core.py +++ b/py_gnome/gnome/weatherers/core.py @@ -106,28 +106,36 @@ def _exp_decay(self, M_0, lambda_, time): mass_remain = M_0 * np.exp(lambda_ * time) return mass_remain - def get_wind_speed(self, points, model_time, format='r', fill_value=1.0): + def get_wind_speed(self, points, model_time, + coord_sys='r', fill_value=1.0): ''' Wrapper for the weatherers so they can extrapolate ''' -# new_model_time = self.check_time(wind, model_time) - retval = self.wind.at(points, model_time, format=format) - return retval.filled(fill_value) if isinstance(retval, np.ma.MaskedArray) else retval + retval = self.wind.at(points, model_time, coord_sys=coord_sys) + + if isinstance(retval, np.ma.MaskedArray): + return retval.filled(fill_value) + else: + return retval def check_time(self, wind, model_time): """ Should have an option to extrapolate but for now we do by default """ new_model_time = model_time + if wind is not None: if model_time is not None: timeval = date_to_sec(model_time) start_time = wind.get_start_time() end_time = wind.get_end_time() + if end_time == start_time: return model_time + if timeval < start_time: new_model_time = sec_to_datetime(start_time) + if timeval > end_time: new_model_time = sec_to_datetime(end_time) else: @@ -146,8 +154,10 @@ def serialize(self, json_='webapi'): if json_ == 'webapi': if hasattr(self, 'wind') and self.wind: serial['wind'] = self.wind.serialize(json_) + if hasattr(self, 'waves') and self.waves: serial['waves'] = self.waves.serialize(json_) + if hasattr(self, 'water') and self.water: serial['water'] = self.water.serialize(json_) @@ -161,11 +171,13 @@ def deserialize(cls, json_): if not cls.is_sparse(json_): schema = cls._schema() - for w in ['wind','water','waves']: + for w in ['wind', 'water', 'waves']: if w in json_: obj = json_[w]['obj_type'] schema.add(eval(obj)._schema(name=w, missing=drop)) + dict_ = schema.deserialize(json_) + return dict_ else: return json_ @@ -217,6 +229,7 @@ def weather_elements(self, sc, time_step, model_time): ''' if not self.active: return + if sc.num_released == 0: return diff --git a/py_gnome/gnome/weatherers/emulsification.py b/py_gnome/gnome/weatherers/emulsification.py index 4ff16fcdc..c0586933d 100644 --- a/py_gnome/gnome/weatherers/emulsification.py +++ b/py_gnome/gnome/weatherers/emulsification.py @@ -82,7 +82,7 @@ def prepare_for_model_step(self, sc, time_step, model_time): return # eventually switch this in - def new_weather_elements(self, sc, time_step, model_time): + def weather_elements_lehr(self, sc, time_step, model_time): ''' weather elements over time_step - sets 'water_content' in sc.mass_balance @@ -253,7 +253,7 @@ def new_weather_elements(self, sc, time_step, model_time): sc.update_from_fatedataview() - def weather_elements(self, sc, time_step, model_time): + def weather_elements_adios2(self, sc, time_step, model_time): ''' weather elements over time_step - sets 'water_content' in sc.mass_balance @@ -317,6 +317,35 @@ def weather_elements(self, sc, time_step, model_time): sc.update_from_fatedataview() + def weather_elements(self, sc, time_step, model_time): + ''' + weather elements over time_step + - sets 'water_content' in sc.mass_balance + ''' + + if not self.active: + return + if sc.num_released == 0: + return + + use_new_algorithm = False + # only use new algorithm if all substances have measured SARA totals + for substance in sc.get_substances(): + if substance.record.imported is not None: + sat = substance.record.imported.saturates + arom = substance.record.imported.aromatics + if sat is not None and arom is not None: + use_new_algorithm = True + else: + use_new_algorithm = False + break + else: + use_new_algorithm = False #use old algorithm + break + + #self.weather_elements_lehr(sc, time_step, model_time) + self.weather_elements_adios2(sc, time_step, model_time) + def _H_log(self, k, x): ''' logistic function for turning on emulsification diff --git a/py_gnome/gnome/weatherers/roc.py b/py_gnome/gnome/weatherers/roc.py index e0452f60d..4fed0489e 100644 --- a/py_gnome/gnome/weatherers/roc.py +++ b/py_gnome/gnome/weatherers/roc.py @@ -4,31 +4,35 @@ ''' from __future__ import division -import pytest -import datetime +import os import copy -import unit_conversion as uc import json -import os -import logging -import numpy as np -import math +from datetime import datetime, timedelta from collections import OrderedDict -from colander import (drop, SchemaNode, MappingSchema, Integer, Float, String, OneOf, Mapping, SequenceSchema, TupleSchema, DateTime) +import numpy as np + +import unit_conversion as uc + +from colander import (drop, SchemaNode, MappingSchema, OneOf, + SequenceSchema, TupleSchema, + Integer, Float, String, DateTime, Mapping) -from gnome.weatherers import Weatherer -from gnome.utilities.serializable import Serializable, Field -from gnome.persist.extend_colander import LocalDateTime, DefaultTupleSchema, NumpyArray, TimeDelta -from gnome.persist import validators, base_schema -from gnome.weatherers.core import WeathererSchema from gnome import _valid_units from gnome.basic_types import oil_status, fate as bt_fate +from gnome.array_types import mass, density, fay_area, frac_water + +from gnome.utilities.serializable import Serializable, Field + +from gnome.weatherers import Weatherer +from gnome.weatherers.core import WeathererSchema + +from gnome.persist import validators, base_schema +from gnome.persist.extend_colander import (LocalDateTime, + DefaultTupleSchema, + NumpyArray, + TimeDelta) -from gnome.array_types import (mass, - density, - fay_area, - frac_water) # define valid units at module scope because the Schema and Object both use it _valid_dist_units = _valid_units('Length') @@ -44,6 +48,7 @@ class OnSceneTupleSchema(TupleSchema): start = SchemaNode(DateTime(default_tzinfo=None)) end = SchemaNode(DateTime(default_tzinfo=None)) + class OnSceneTimeSeriesSchema(SequenceSchema): value = OnSceneTupleSchema() @@ -54,23 +59,19 @@ class OnSceneTimeSeriesSchema(SequenceSchema): # validators.no_duplicate_datetime(node, cstruct) # validators.ascending_datetime(node, cstruct) + class ResponseSchema(WeathererSchema): timeseries = OnSceneTimeSeriesSchema() -class Response(Weatherer, Serializable): - _schema = ResponseSchema +class Response(Weatherer, Serializable): _state = copy.deepcopy(Weatherer._state) + _state += [Field('timeseries', save=True, update=True)] _oc_list = ['timeseries'] _schema = ResponseSchema - _state = copy.deepcopy(Weatherer._state) - - _state += [Field('timeseries', save=True, update=True)] - - def __init__(self, - timeseries=None, + def __init__(self, timeseries=None, **kwargs): super(Response, self).__init__(**kwargs) self.timeseries = timeseries @@ -80,7 +81,8 @@ def _get_thickness(self, sc): oil_thickness = 0.0 substance = self._get_substance(sc) if sc['area'].any() > 0: - volume_emul = (sc['mass'].mean() / substance.density_at_temp()) / (1.0 - sc['frac_water'].mean()) + volume_emul = ((sc['mass'].mean() / substance.density_at_temp()) / + (1.0 - sc['frac_water'].mean())) oil_thickness = volume_emul / sc['area'].mean() return uc.convert('Length', 'meters', 'inches', oil_thickness) @@ -92,12 +94,13 @@ def units(self): @units.setter def units(self, u_dict): for prop, unit in u_dict.iteritems(): - if prop in self._units_type: - if unit not in self._units_type[prop][1]: - msg = ("{0} are invalid units for {1}." - "Ignore it".format(unit, prop)) - self.logger.error(msg) - raise uc.InvalidUnitError(msg) + if (prop in self._units_type and + unit not in self._units_type[prop][1]): + msg = ('{0} are invalid units for {1}. Ignore it' + .format(unit, prop)) + + self.logger.error(msg) + raise uc.InvalidUnitError(msg) self._units[prop] = unit @@ -112,7 +115,7 @@ def get(self, attr, unit=None): if unit in self._units_type[attr][1]: return uc.convert(self._units_type[attr][0], self.units[attr], - unit, val) + unit, val) else: ex = uc.InvalidUnitError((unit, self._units_type[attr][0])) self.logger.error(str(ex)) @@ -127,7 +130,8 @@ def set(self, attr, value, unit): def _is_active(self, model_time, time_step): for t in self.timeseries: - if model_time >= t[0] and model_time + datetime.timedelta(seconds=time_step / 2) <= t[1]: + if (model_time >= t[0] and + model_time + timedelta(seconds=time_step / 2) <= t[1]): return True return False @@ -146,6 +150,7 @@ def _get_substance(self, sc): for now, just log an error if more than one substance is present ''' substance = sc.get_substances(complete=False) + if len(substance) > 1: self.logger.error('Found more than one type of oil ' '- not supported. Results with be incorrect') @@ -155,29 +160,35 @@ def _get_substance(self, sc): def _remove_mass_simple(self, data, amount): total_mass = data['mass'].sum() rm_mass_frac = min(amount / total_mass, 1.0) - data['mass_components'] = \ - (1 - rm_mass_frac) * data['mass_components'] + + data['mass_components'] = (1 - rm_mass_frac) * data['mass_components'] data['mass'] = data['mass_components'].sum(1) + return total_mass - data['mass'].sum() def _remove_mass_indices(self, data, amounts, indices): - #removes mass from the mass components specified by an indices array + # removes mass from the mass components specified by an indices array masses = data['mass'][indices] rm_mass_frac = np.clip(amounts / masses, 0, 1) + old_mass = data['mass_components'][indices].sum(1) + data['mass_components'][indices] = (1 - rm_mass_frac)[:, np.newaxis] * data['mass_components'][indices] - new_mass = data['mass_components'][indices].sum(1) data['mass'][indices] = data['mass_components'][indices].sum(1) + + new_mass = data['mass_components'][indices].sum(1) + return old_mass - new_mass def index_of(self, time): ''' - Returns the index of the timeseries entry that the time specified is within. - If it is not in one of the intervals, -1 will be returned + Returns the index of the timeseries entry that the time specified + is within. If it is not in one of the intervals, -1 will be returned ''' for i, t in enumerate(self.timeseries): if time >= t[0] and time < t[-1]: return i + return -1 def next_interval_index(self, time): @@ -186,19 +197,23 @@ def next_interval_index(self, time): returns None if there is no next interval ''' if time >= self.timeseries[-1][-1]: - #off end + # off end return None + if time < self.timeseries[0][0]: - #before start + # before start return 0 + idx = self.index_of(time) if idx > -1: - #inside valid interval + # inside valid interval return idx + 1 if idx + 1 != len(self.timeseries) else None + if idx == -1: - #outside timeseries intervals - for i, t in enumerate(self.timeseries[0:-1]): - if time >= self.timeseries[i][-1] and time < self.timeseries[i+1][0]: + # outside timeseries intervals + for i, _t in enumerate(self.timeseries[0:-1]): + if (time >= self.timeseries[i][-1] and + time < self.timeseries[i+1][0]): return i+1 def time_to_next_interval(self, time): @@ -221,25 +236,30 @@ def is_operating(self, time): return self.index_of(time) > -1 def _no_op_step(self): - self._time_remaining = 0; + self._time_remaining = 0 + class PlatformUnitsSchema(MappingSchema): def __init__(self, *args, **kwargs): for k, v in Platform._attr.items(): - self.add(SchemaNode(String(), missing=drop, name=k, validator=OneOf(v[2]))) + self.add(SchemaNode(String(), missing=drop, name=k, + validator=OneOf(v[2]))) + super(PlatformUnitsSchema, self).__init__() class PlatformSchema(base_schema.ObjType): - def __init__(self, *args, **kwargs): for k in Platform._attr.keys(): self.add(SchemaNode(Float(), missing=drop, name=k)) + units = PlatformUnitsSchema() units.missing = drop units.name = 'units' + self.add(units) self.add(SchemaNode(String(), name="type", missing=drop)) + super(PlatformSchema, self).__init__() @@ -248,16 +268,16 @@ class Platform(Serializable): _attr = {"swath_width_max": ('ft', 'length', _valid_dist_units), "swath_width": ('ft', 'length', _valid_dist_units), "swath_width_min": ('ft', 'length', _valid_dist_units), - "reposition_speed": ('kts', 'velocity', _valid_vel_units), #non-boat + "reposition_speed": ('kts', 'velocity', _valid_vel_units), # non-boat "application_speed_min": ('kts', 'velocity', _valid_vel_units), "application_speed": ('kts', 'velocity', _valid_vel_units), "application_speed_max": ('kts', 'velocity', _valid_vel_units), - "cascade_transit_speed_max_without_payload": ('kts', 'velocity', _valid_vel_units), #non-boat - "cascade_transit_speed_without_payload": ('kts', 'velocity', _valid_vel_units), #non-boat - "cascade_transit_speed_min_without_payload": ('kts', 'velocity', _valid_vel_units), #non-boat - "cascade_transit_speed_with_payload": ('kts', 'velocity', _valid_vel_units), #non-boat - "cascade_transit_speed_max_with_payload": ('kts', 'velocity', _valid_vel_units), #non-boat - "cascade_transit_speed_min_with_payload": ('kts', 'velocity', _valid_vel_units), #non-boat + "cascade_transit_speed_max_without_payload": ('kts', 'velocity', _valid_vel_units), # non-boat + "cascade_transit_speed_without_payload": ('kts', 'velocity', _valid_vel_units), # non-boat + "cascade_transit_speed_min_without_payload": ('kts', 'velocity', _valid_vel_units), # non-boat + "cascade_transit_speed_with_payload": ('kts', 'velocity', _valid_vel_units), # non-boat + "cascade_transit_speed_max_with_payload": ('kts', 'velocity', _valid_vel_units), # non-boat + "cascade_transit_speed_min_with_payload": ('kts', 'velocity', _valid_vel_units), # non-boat "transit_speed_max": ('kts', 'velocity', _valid_vel_units), "transit_speed_min": ('kts', 'velocity', _valid_vel_units), "transit_speed": ('kts', 'velocity', _valid_vel_units), @@ -282,10 +302,13 @@ class Platform(Serializable): _units_type = dict([(k, (v[1], v[2])) for k, v in _attr.items()]) base_dir = os.path.dirname(__file__) + with open(os.path.join(base_dir, 'platforms.json'), 'r') as f: js = json.load(f) - plat_types = dict(zip([t['name'] for t in js['vessel']], js['vessel'])) - plat_types.update(dict(zip([t['name'] for t in js['aircraft']], js['aircraft']))) + plat_types = dict(zip([t['name'] for t in js['vessel']], + js['vessel'])) + plat_types.update(dict(zip([t['name'] for t in js['aircraft']], + js['aircraft']))) _schema = PlatformSchema @@ -295,22 +318,23 @@ class Platform(Serializable): _state += [Field('units', save=True, update=True)] _state += [Field('type', save=True, update=True)] - def __init__(self, - units=None, - type='Platform', + def __init__(self, units=None, type='Platform', **kwargs): - - if '_name' in kwargs.keys(): + if '_name' in kwargs: kwargs = self.plat_types[kwargs.pop('_name')] + if units is None: units = dict([(k, v[0]) for k, v in self._attr.items()]) + self.units = units self.type = type + for k in Platform._attr.keys(): setattr(self, k, kwargs.get(k, None)) self.disp_remaining = 0 self.cur_pump_rate = 0 + if self.approach is None or self.departure is None: self.is_boat = True else: @@ -322,6 +346,7 @@ def __init__(self, def get(self, attr, unit=None): val = getattr(self, attr) + if unit is None: if (attr not in self._si_units or self._si_units[attr] == self.units[attr]): @@ -348,6 +373,7 @@ def release_rate(self, dosage, unit='gal/acre'): '''return unit = gal/min''' if unit != 'gal/acre': dosage = uc.Convert('oilconcentration', 'unit', 'gal/acre', dosage) + a_s = self.get('application_speed', 'ft/min') s_w = self.get('swadth_width', 'ft') @@ -363,10 +389,17 @@ def new_from_dict(cls, dict_): def one_way_transit_time(self, dist, unit='nm', payload=False): '''return unit = sec''' t_s = self.get('transit_speed', 'kts') - t_l_d = self.get('taxi_land_depart', 'sec') if self.taxi_land_depart is not None else None + + if self.taxi_land_depart is not None: + t_l_d = self.get('taxi_land_depart', 'sec') + else: + t_l_d = None + raw = dist / t_s * 3600 + if t_l_d is not None: raw += t_l_d + return raw def max_dosage(self): @@ -374,9 +407,11 @@ def max_dosage(self): p_r_m = self.get('pump_rate_max', 'm^3/s') a_s = self.get('application_speed', 'm/s') s_w_m = self.get('swath_width_min', 'm') + dos = (p_r_m) / (a_s * s_w_m) dos = uc.convert('length', 'm', 'micron', dos) dos = uc.convert('oilconcentration', 'micron', 'gal/acre', dos) + return dos def min_dosage(self): @@ -384,33 +419,46 @@ def min_dosage(self): p_r_m = self.get('pump_rate_min', 'm^3/s') a_s = self.get('application_speed', 'm/s') s_w_m = self.get('swath_width_max', 'm') + dos = (p_r_m) / (a_s * s_w_m) dos = uc.convert('length', 'm', 'micron', dos) dos = uc.convert('oilconcentration', 'micron', 'gal/acre', dos) + return dos def cascade_time(self, dist, unit='nm', payload=False): '''return unit = sec''' - dist = dist if unit == 'nm' else uc.convert('length', unit, 'nm', dist) - max_range = self.get('max_rage_with_payload', 'nm') if payload else self.get('max_range_no_payload', 'nm') - speed = self.get('cascade_transit_speed_with_payload', 'kts') if payload else self.get('cascade_transit_speed_without_payload', 'kts') + dist = uc.convert('length', unit, 'nm', dist) + + if payload: + max_range = self.get('max_rage_with_payload', 'nm') + speed = self.get('cascade_transit_speed_with_payload', 'kts') + else: + max_range = self.get('max_range_no_payload', 'nm') + speed = self.get('cascade_transit_speed_without_payload', 'kts') + taxi_land_depart = self.get('taxi_land_depart', 'hr') fuel_load = self.get('refuel', 'hr') cascade_time = 0 + if dist > max_range: num_legs = dist / max_range frac_leg = (num_legs * 1000) % 1000 num_legs = int(num_legs) + cascade_time += taxi_land_depart cascade_time += (num_legs * max_range) + inter_stop = (taxi_land_depart * 2 + fuel_load) + cascade_time += num_legs * inter_stop cascade_time += frac_leg * (max_range / speed) cascade_time += taxi_land_depart else: cascade_time += taxi_land_depart * 2 cascade_time += dist / speed + return cascade_time * 3600 def max_onsite_time(self, dist, simul=False): @@ -420,7 +468,9 @@ def max_onsite_time(self, dist, simul=False): m_o_t = self.get('max_op_time', 'sec') o_w_t_t = self.one_way_transit_time(dist) r_r = self.refuel_reload(simul=simul) + rv = m_o_t - o_w_t_t * 2 - r_r + return rv def num_passes_possible(self, time, pass_len, pass_type): @@ -430,13 +480,14 @@ def num_passes_possible(self, time, pass_len, pass_type): A pass consists of an approach, spray, u-turn, and reposition. ''' - - return int(time.total_seconds() / int(self.pass_duration(pass_len, pass_type))) + return int(time.total_seconds() / + int(self.pass_duration(pass_len, pass_type))) def refuel_reload(self, simul=False): '''return unit = sec''' rl = self.get('dispersant_load', 'sec') rf = self.get('fuel_load', 'sec') + return max(rl, rf) if simul else rf + rl def pass_duration(self, pass_len, pass_type, units='nm'): @@ -445,54 +496,87 @@ def pass_duration(self, pass_len, pass_type, units='nm'): return in sec ''' times = self.pass_duration_tuple(pass_len, pass_type, units='nm') + + # TODO: why have the conditional if the return type is the same? if pass_type == 'bidirectional': return sum(times) else: return sum(times) def pass_duration_tuple(self, pass_len, pass_type, units='nm'): - appr_dist = self.get('approach', 'm') if self.approach is not None else 0 - dep_dist = self.get('departure', 'm') if self.departure is not None else 0 - rep_speed = self.get('reposition_speed', 'm/s') if self.reposition_speed is not None else 1 + if self.approach is not None: + appr_dist = self.get('approach', 'm') + else: + appr_dist = 0 + + if self.departure is not None: + dep_dist = self.get('departure', 'm') + else: + dep_dist = 0 + + if self.reposition_speed is not None: + rep_speed = self.get('reposition_speed', 'm/s') + else: + rep_speed = 1 + appr_time = appr_dist / rep_speed dep_time = dep_dist / rep_speed - u_turn = self.get('u_turn_time', 'sec') if self.u_turn_time is not None else 0 + + if self.u_turn_time is not None: + u_turn = self.get('u_turn_time', 'sec') + else: + u_turn = 0 pass_len = uc.convert('length', units, 'm', pass_len) app_speed = self.get('application_speed', 'm/s') spray_time = pass_len / app_speed + if pass_type == 'bidirectional': self._ts_spray_time += spray_time * 2 + return (appr_time, spray_time, u_turn, spray_time, dep_time) else: self._ts_spray_time += spray_time + return (appr_time, spray_time, u_turn, dep_time) def sortie_possible(self, time_avail, transit, pass_len): # assume already refueled/reloaded - # possible if able to complete transit, at least one pass, and transit back within time available + # possible if able to complete transit, at least one pass, + # and transit back within time available min_spray_time = self.pass_duration(pass_len, 'bidirectional') - tot_mission_time = self.one_way_transit_time(transit) * 2 + min_spray_time - return time_avail > datetime.timedelta(seconds=tot_mission_time) + tot_mission_time = (self.one_way_transit_time(transit) * 2 + + min_spray_time) + + return time_avail > timedelta(seconds=tot_mission_time) def eff_pump_rate(self, dosage, unit='gal/acre'): ''' - given a dosage, determine the pump rate necessary given the airspeed and area covered in a pass + given a dosage, determine the pump rate necessary given the airspeed + and area covered in a pass + return value = m^3/s ''' dosage = uc.convert('oilconcentration', unit, 'micron', dosage) dosage = uc.convert('length', 'micron', 'm', dosage) + app_speed = self.get('application_speed', 'm/s') swath_width = self.get('swath_width', 'm') + eff_pr = dosage * app_speed * swath_width max_pr = self.get('pump_rate_max', 'm^3/s') min_pr = self.get('pump_rate_min', 'm^3/s') + if eff_pr > max_pr: - #log warning? - print 'computed pump rate is too high for this platform. using max instead' + # log warning? + print ('Computed pump rate is too high for this platform. ' + 'Using max instead') + return max_pr elif eff_pr < min_pr: - print 'computed pump rate is too low for this platform. using min instead' + print ('Computed pump rate is too low for this platform. ' + 'Using min instead') + return min_pr else: return eff_pr @@ -500,8 +584,10 @@ def eff_pump_rate(self, dosage, unit='gal/acre'): def spray_time_fraction(self, pass_len, pass_type, units='nm'): pass_len = uc.convert('length', units, 'm', pass_len) app_speed = self.get('application_speed', 'm/s') + pass_dur = self.pass_duration(pass_len, pass_type, units) spray_time = pass_len / app_speed + if pass_type == 'bidirectional': return (spray_time * 2) / pass_dur else: @@ -511,41 +597,46 @@ def spray_time_fraction(self, pass_len, pass_type, units='nm'): class DisperseUnitsSchema(MappingSchema): def __init__(self, *args, **kwargs): for k, v in Disperse._attr.items(): - self.add(SchemaNode(String(), missing=drop, name=k, validator=OneOf(v[2]))) + self.add(SchemaNode(String(), missing=drop, name=k, + validator=OneOf(v[2]))) + super(DisperseUnitsSchema, self).__init__() class DisperseSchema(ResponseSchema): - loading_type = SchemaNode(String(), validator=OneOf(['simultaneous', 'separate'])) - dosage_type = SchemaNode(String(), missing=drop, validator=OneOf(['auto', 'custom'])) + loading_type = SchemaNode(String(), + validator=OneOf(['simultaneous', 'separate'])) + dosage_type = SchemaNode(String(), missing=drop, + validator=OneOf(['auto', 'custom'])) disp_oil_ratio = SchemaNode(Float(), missing=drop) disp_eff = SchemaNode(Float(), missing=drop) platform = PlatformSchema() def __init__(self, *args, **kwargs): - for k, v in Disperse._attr.items(): + for k, _v in Disperse._attr.items(): self.add(SchemaNode(Float(), missing=drop, name=k)) + units = DisperseUnitsSchema() units.missing = drop units.name = 'units' + self.add(units) + super(DisperseSchema, self).__init__() + class Disperse(Response): _attr = {'transit': ('nm', 'length', _valid_dist_units), 'pass_length': ('nm', 'length', _valid_dist_units), 'cascade_distance': ('nm', 'length', _valid_dist_units), - 'dosage': ('gal/acre', 'oilconcentration', _valid_oil_concentration_units)} + 'dosage': ('gal/acre', 'oilconcentration', + _valid_oil_concentration_units)} _si_units = dict([(k, v[0]) for k, v in _attr.items()]) - _units_type = dict([(k, (v[1], v[2])) for k, v in _attr.items()]) - _schema = DisperseSchema - _state = copy.deepcopy(Response._state) - _state += [Field(k, save=True, update=True) for k in _attr.keys()] _state += [Field('units', save=True, update=True), Field('disp_oil_ratio', save=True, update=True), @@ -556,8 +647,39 @@ class Disperse(Response): Field('report', save=False, update=False), Field('wind', save=True, update=True, save_reference=True)] - wind_eff_list = [15, 30, 45, 60, 70, 78, 80, 82, 83, 84, 84, 84, 84, 84, 83, 83, 82, 80, 79, 78, 77, 75, 73, 71, 69, 67, 65, 63, 60, 58, 55, 53, 50, 47, 44, 41, 38] - visc_eff_table = OrderedDict([(1, 68), (2, 71), (3, 72.5), (4, 74), (5, 75), (7, 77), (10, 78), (20, 80), (40, 83.5), (70, 85.5), (100, 87), (300, 89.5), (500, 90.5), (700, 91), (1000, 92), (2000, 91), (3000, 83), (5000, 52), (7000, 32), (10000, 17), (20000, 11), (30000, 8.5), (40000, 7), (50000, 6.5), (100000, 6), (1000000, 0)]) + _schema = DisperseSchema + + wind_eff_list = [15, 30, 45, 60, 70, 78, 80, 82, + 83, 84, 84, 84, 84, 84, 83, 83, + 82, 80, 79, 78, 77, 75, 73, 71, + 69, 67, 65, 63, 60, 58, 55, 53, + 50, 47, 44, 41, 38] + visc_eff_table = OrderedDict([(1, 68), + (2, 71), + (3, 72.5), + (4, 74), + (5, 75), + (7, 77), + (10, 78), + (20, 80), + (40, 83.5), + (70, 85.5), + (100, 87), + (300, 89.5), + (500, 90.5), + (700, 91), + (1000, 92), + (2000, 91), + (3000, 83), + (5000, 52), + (7000, 32), + (10000, 17), + (20000, 11), + (30000, 8.5), + (40000, 7), + (50000, 6.5), + (100000, 6), + (1000000, 0)]) def __init__(self, name=None, @@ -577,6 +699,7 @@ def __init__(self, onsite_reload_refuel=False, **kwargs): super(Disperse, self).__init__(**kwargs) + self.name = name self.transit = transit self.pass_length = pass_length @@ -587,25 +710,29 @@ def __init__(self, self.loading_type = loading_type self.pass_type = pass_type self.disp_oil_ratio = 20 if disp_oil_ratio is None else disp_oil_ratio - self.disp_eff = disp_eff self.onsite_reload_refuel = onsite_reload_refuel + self.disp_eff = disp_eff + if self.disp_eff is not None: self._disp_eff_type = 'fixed' else: self._disp_eff_type = 'auto' + # time to next state if platform is not None: if isinstance(platform, basestring): - #find platform name + # find platform name self.platform = Platform(_name=platform) else: - #platform is defined as a dict + # platform is defined as a dict self.platform = Platform(**platform) else: self.platform = platform + if units is None: units = dict([(k, v[0]) for k, v in self._attr.items()]) self._units = units + self.wind = wind self.cur_state = None self.oil_treated_this_timestep = 0 @@ -618,12 +745,20 @@ def __init__(self, self._area_this_sortie = 0 self._disp_sprayed_this_timestep = 0 self._remaining_dispersant = None - self._pass_time_tuple = self.platform.pass_duration_tuple(self.pass_length, self.pass_type) + + self._pass_time_tuple = (self.platform + .pass_duration_tuple(self.pass_length, + self.pass_type)) if dosage is not None: - self._dosage_m = uc.convert('oilconcentration', self.units['dosage'], 'micron', self.dosage) - self._dosage_m = uc.convert('length', 'micron', 'meters', self._dosage_m) - self.report=[] + self._dosage_m = uc.convert('oilconcentration', + self.units['dosage'], 'micron', + self.dosage) + self._dosage_m = uc.convert('length', + 'micron', 'meters', + self._dosage_m) + + self.report = [] self.array_types.update({'area', 'density', 'viscosity'}) def get_mission_data(self, @@ -633,10 +768,13 @@ def get_mission_data(self, efficiency=None, units=None): ''' - Given a dosage and an area to spray, will return a tuple of information as follows: - Minimize number of passes by using high swath_width. If pump rate cannot get to the dosage necessary - reduce the swath width until it can. + Given a dosage and an area to spray, will return a tuple of information + as follows: + Minimize number of passes by using high swath_width. + If pump rate cannot get to the dosage necessary, reduce the swath width + until it can. Default units are ('gal/acre', 'm^3, 'nm', percent) + Return tuple is as below (num_passes, disp/pass, oil/pass) (number, gal, ft, gal/min) @@ -648,70 +786,93 @@ def get_mission_data(self, 'efficiency': 'percent'} # Efficiency determines how much of the pass length is - pass_area = self.get('swath_width', 'm') * uc.convert('length', units['pass_len'], 'm', pass_len) + pass_area = (self.get('swath_width', 'm') * + uc.convert('length', units['pass_len'], 'm', pass_len)) pass_len = uc.convert('length', units['pass_len'], 'm', pass_len) + app_speed = self.get('application_speed', 'm/s') + spray_time = pass_len / app_speed + max_dos = (self.get('pump_rate_max', 'm^3/s') * spray_time / pass_area) max_dos = uc.convert('length', 'm', 'micron', max_dos) max_dos = uc.convert('oilconcentration', 'micron', 'gal/acre', max_dos) def prepare_for_model_run(self, sc): self._setup_report(sc) + if self.on: sc.mass_balance['chem_dispersed'] = 0.0 + if self.cascade_on: self.cur_state = 'cascade' else: self.cur_state = 'retired' + self._remaining_dispersant = self.platform.get('payload', 'm^3') self.oil_treated_this_timestep = 0 + if 'systems' not in sc.mass_balance: sc.mass_balance['systems'] = {} - sc.mass_balance['systems'][self.id] = { - 'time_spraying': 0.0, - 'dispersed': 0.0, - 'payloads_delivered': 0, - 'dispersant_applied': 0.0, - 'oil_treated': 0.0, - 'area_covered': 0.0, - 'state': [] - } + sc.mass_balance['systems'][self.id] = {'time_spraying': 0.0, + 'dispersed': 0.0, + 'payloads_delivered': 0, + 'dispersant_applied': 0.0, + 'oil_treated': 0.0, + 'area_covered': 0.0, + 'state': []} def dosage_from_thickness(self, sc): - thickness = self._get_thickness(sc) # inches - self._dosage_m = uc.convert('length', 'inches', 'm', thickness) / self.disp_oil_ratio + thickness = self._get_thickness(sc) # inches + + self._dosage_m = (uc.convert('length', 'inches', 'm', thickness) / + self.disp_oil_ratio) self.dosage = uc.convert('length', 'inches', 'micron', thickness) - self.dosage = uc.convert('oilconcentration', 'micron', 'gal/acre', self.dosage) / self.disp_oil_ratio + self.dosage = (uc.convert('oilconcentration', + 'micron', 'gal/acre', self.dosage) / + self.disp_oil_ratio) def get_disp_eff_avg(self, sc, model_time): wind_eff_list = Disperse.wind_eff_list visc_eff_table = Disperse.visc_eff_table + vel = self.wind.get_value(model_time) spd = vel[0] + wind_eff = wind_eff_list[int(spd)] / 100. idxs = self.dispersable_oil_idxs(sc) - avg_visc = np.mean(sc['viscosity'][idxs] * 1000000) if len(idxs) > 0 else 1000000 + + if len(idxs) > 0: + avg_visc = np.mean(sc['viscosity'][idxs] * 1000000) + else: + avg_visc = 1000000 + visc_eff = visc_eff_table[visc_eff_table.keys()[np.searchsorted(visc_eff_table.keys(), avg_visc)]] / 100 + return wind_eff * visc_eff def get_disp_eff(self, sc, model_time): wind_eff_list = Disperse.wind_eff_list visc_eff_table = Disperse.visc_eff_table + vel = self.wind.get_value(model_time) spd = vel[0] + wind_eff = wind_eff_list[int(spd)] / 100. idxs = self.dispersable_oil_idxs(sc) visc = sc['viscosity'][idxs] * 1000000 - visc_idxs = np.array([np.searchsorted(visc_eff_table.keys(), v) for v in visc]) - visc_eff = np.array([visc_eff_table[visc_eff_table.keys()[v]] for v in visc_idxs]) / 100 + + visc_idxs = np.array([np.searchsorted(visc_eff_table.keys(), v) + for v in visc]) + visc_eff = np.array([visc_eff_table[visc_eff_table.keys()[v]] + for v in visc_idxs]) / 100 + return wind_eff * visc_eff def prepare_for_model_step(self, sc, time_step, model_time): ''' ''' - self.state = [] if self._is_active(model_time, time_step): @@ -722,24 +883,25 @@ def prepare_for_model_step(self, sc, time_step, model_time): if not self.active: return - if self._disp_eff_type != 'fixed': self.disp_eff = self.get_disp_eff_avg(sc, model_time) - slick_area = 'WHAT??' + + _slick_area = 'WHAT??' self.platform._ts_spray_time = 0 self._ts_payloads_delivered = 0 + if not isinstance(time_step, timedelta): + time_step = timedelta(seconds=time_step) - if not isinstance(time_step, datetime.timedelta): - time_step = datetime.timedelta(seconds=time_step) + self._time_remaining = timedelta(seconds=time_step.total_seconds()) + _zero = timedelta(seconds=0) - self._time_remaining = datetime.timedelta(seconds=time_step.total_seconds()) - zero = datetime.timedelta(seconds=0) if self.cur_state is None: # This is first step., setup inactivity if necessary if self.next_interval_index(model_time) != 0: - raise ValueError('disperse time series begins before time of first step!') + raise ValueError('disperse time series begins before time ' + 'of first step!') else: self.cur_state = 'retired' @@ -753,128 +915,221 @@ def prepare_for_model_step(self, sc, time_step, model_time): self.simulate_plane(sc, time_step, model_time) def simulate_boat(self, sc, time_step, model_time): - zero = datetime.timedelta(seconds=0) + zero = timedelta(seconds=0) ttni = self.time_to_next_interval(model_time) + tte = self.timeseries[-1][-1] - model_time if tte < zero: return - while self._time_remaining > zero: + while self._time_remaining > zero: if self.cur_state == 'retired': if model_time < self.timeseries[0][0]: tts = self.timeseries[0][0] - model_time self._time_remaining -= min(self._time_remaining, tts) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self.time_remaining > 0: - #must just have started. Get ready + # must just have started. Get ready self.cur_state = 'ready' - self.report.append((model_time, 'Begin new operational period')) + self.report.append((model_time, + 'Begin new operational period')) else: self.cur_state = 'ready' - self.report.append((model_time, 'Begin new operational period')) + self.report.append((model_time, + 'Begin new operational period')) elif self.cur_state == 'ready': - if self.platform.sortie_possible(tte, self.transit, self.pass_length): + if self.platform.sortie_possible(tte, self.transit, + self.pass_length): # sortie is possible, so start immediately self.report.append((model_time, 'Starting sortie')) - self._next_state_time = model_time + datetime.timedelta(seconds=self.platform.one_way_transit_time(self.transit)) + self._next_state_time = (model_time + timedelta(seconds=self.platform.one_way_transit_time(self.transit))) self.cur_state = 'en_route' self._area_sprayed_this_sortie = 0 self._area_sprayed_this_ts = 0 else: # cannot sortie, so retire until next interval self.cur_state = 'deactivated' - self.report.append((model_time, 'Deactivating due to insufficient time remaining to conduct sortie')) + self.report.append((model_time, + 'Deactivating due to insufficient ' + 'time remaining to conduct sortie')) print self.report[-1] self._time_remaining -= min(self._time_remaining, ttni) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) elif self.cur_state == 'en_route': time_left = self._next_state_time - model_time - self.state.append(['transit', min(self._time_remaining, time_left).total_seconds()]) + self.state.append(['transit', + min(self._time_remaining,time_left) + .total_seconds()]) self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: self.report.append((model_time, 'Reached slick')) + self._op_start = model_time - self._op_end = (self.timeseries[-1][-1] - datetime.timedelta(seconds=self.platform.one_way_transit_time(self.transit))) + self._op_end = (self.timeseries[-1][-1] - + timedelta(seconds=self.platform + .one_way_transit_time(self.transit))) + self._cur_pass_num = 1 self.cur_state = 'onsite' - dur = datetime.timedelta(hours=self.platform.get('max_op_time', 'hrs')) + dur = timedelta(hours=self.platform.get('max_op_time', + 'hrs')) + self._next_state_time = model_time + dur elif self.cur_state == 'onsite': remaining_op = self._op_end - model_time + if self.is_operating(model_time): interval_remaining = self.time_to_next_interval(model_time) - spray_time = min(self._time_remaining, remaining_op, interval_remaining) + spray_time = min(self._time_remaining, + remaining_op, + interval_remaining) + if self.dosage_type == 'auto': self.dosage_from_thickness(sc) + dosage = self.dosage - disp_possible = spray_time.total_seconds() * self.platform.eff_pump_rate(dosage) - disp_actual = min(self._remaining_dispersant, disp_possible) + disp_possible = (spray_time.total_seconds() * + self.platform.eff_pump_rate(dosage)) + disp_actual = min(self._remaining_dispersant, + disp_possible) + if disp_actual != disp_possible: - spray_time = datetime.timedelta(seconds=disp_actual / self.platform.eff_pump_rate(dosage)) + spray_time = timedelta(seconds=disp_actual / + self.platform.eff_pump_rate(dosage)) + treated_possible = disp_actual * self.disp_oil_ratio mass_treatable = np.mean(sc['density'][self.dispersable_oil_idxs(sc)]) * treated_possible oil_avail = self.dispersable_oil_amount(sc, 'kg') - self.report.append((model_time, 'Oil available: ' + str(oil_avail) + ' Treatable mass: ' + str(mass_treatable) + ' Dispersant Sprayed: ' + str(disp_actual))) - self.report.append((model_time, 'Sprayed ' + str(disp_actual) + 'm^3 dispersant in ' + str(spray_time) + ' on ' + str(oil_avail) + ' kg of oil')) + + self.report.append((model_time, + 'Oil available: {} ' + 'Treatable mass: {} ' + 'Dispersant Sprayed: {}' + .format(oil_avail, mass_treatable, + disp_actual))) + + self.report.append((model_time, + 'Sprayed {} m^3 dispersant ' + 'in {} ' + 'on {} kg of oil' + .format(disp_actual, spray_time, + oil_avail))) print self.report[-1] + self.state.append(['onsite', spray_time.total_seconds()]) + self._time_remaining -= spray_time self._disp_sprayed_this_timestep += disp_actual self._remaining_dispersant -= disp_actual - self._ts_payloads_delivered += (disp_actual / self.platform.get('payload', 'm^3')) + self._ts_payloads_delivered += (disp_actual / + self.platform.get('payload', 'm^3')) self.oil_treated_this_timestep += min(mass_treatable, oil_avail) model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) - if self._time_remaining > zero: #end of interval, end of operation, or out of dispersant/fuel + + if self._time_remaining > zero: + # end of interval, end of operation, or out of + # dispersant/fuel if self._remaining_dispersant == 0: - #go to reload + # go to reload if self.onsite_reload_refuel: self.cur_state = 'refuel_reload' - refuel_reload = datetime.timedelta(seconds=self.platform.refuel_reload(simul=self.loading_type)) - self._next_state_time = model_time + refuel_reload - self.report.append((model_time, 'Reloading/refueling')) + + refuel_reload = timedelta(seconds=self.platform + .refuel_reload(simul=self.loading_type)) + + self._next_state_time = (model_time + + refuel_reload) + self.report.append((model_time, + 'Reloading/refueling')) else: - #need to return to base + # need to return to base self.cur_state = 'rtb' - self._next_state_time = model_time + datetime.timedelta(seconds=self.platform.one_way_transit_time(self.transit)) - self.report.append((model_time, 'Out of dispersant, returning to base')) + self._next_state_time = model_time + timedelta(seconds=self.platform.one_way_transit_time(self.transit)) + + self.report.append((model_time, + 'Out of dispersant, ' + 'returning to base')) elif model_time == self._op_end: - self.report.append((model_time, 'Operation complete, returning to base')) + self.report.append((model_time, + 'Operation complete, ' + 'returning to base')) self.cur_state = 'rtb' - self._next_state_time = model_time + datetime.timedelta(seconds=self.platform.one_way_transit_time(self.transit)) + self._next_state_time = (model_time + + timedelta(seconds=self + .platform + .one_way_transit_time(self.transit))) else: - self._time_remaining -= min(self._time_remaining, remaining_op) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + self._time_remaining -= min(self._time_remaining, + remaining_op) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) if self._time_remaining > zero: self.cur_state = 'rtb' - self.report.append((model_time, 'Operation complete, returning to base')) - self._next_state_time = model_time + datetime.timedelta(seconds=self.platform.one_way_transit_time(self.transit)) + self.report.append((model_time, + 'Operation complete, ' + 'returning to base')) + self._next_state_time = (model_time + + timedelta(seconds=self + .platform + .one_way_transit_time(self.transit))) elif self.cur_state == 'rtb': time_left = self._next_state_time - model_time - self.state.append(['transit', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['transit', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: self.report.append((model_time, 'Returned to base')) print self.report[-1] - refuel_reload = datetime.timedelta(seconds=self.platform.refuel_reload(simul=self.loading_type)) + + refuel_reload = timedelta(seconds=self.platform + .refuel_reload(simul=self + .loading_type)) + self._next_state_time = model_time + refuel_reload self.cur_state = 'refuel_reload' elif self.cur_state == 'refuel_reload': time_left = self._next_state_time - model_time - self.state.append(['reload', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['reload', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) if self._time_remaining > zero: self.report.append((model_time, 'Refuel/reload complete')) print self.report[-1] - self._remaining_dispersant = self.platform.get('payload', 'm^3') + + self._remaining_dispersant = self.platform.get('payload', + 'm^3') + if self.onsite_reload_refuel: self.cur_state = 'onsite' else: @@ -882,244 +1137,449 @@ def simulate_boat(self, sc, time_step, model_time): def simulate_plane(self, sc, time_step, model_time): ttni = self.time_to_next_interval(model_time) - zero = datetime.timedelta(seconds=0) + zero = timedelta(seconds=0) + while self._time_remaining > zero: if ttni is None: if self.cur_state not in ['retired', 'reload', 'ready']: - raise ValueError('Operation is being deactivated while platform is active!') + raise ValueError('Operation is being deactivated ' + 'while platform is active!') + self.cur_state = 'deactivated' - self.report.append((model_time, 'Disperse operation has ended and is deactivated')) + + self.report.append((model_time, + 'Disperse operation has ended and is ' + 'deactivated')) print self.report[-1] + break if self.cur_state == 'retired': - if self.index_of(model_time) > -1 and self.timeseries[self.index_of(model_time)][0] == model_time: - #landed right on interval start, so ready immediately + if (self.index_of(model_time) > -1 and + self.timeseries[self.index_of(model_time)][0] == model_time): + # landed right on interval start, so ready immediately self.cur_state = 'ready' - self.report.append((model_time, 'Begin new operational period')) + + self.report.append((model_time, + 'Begin new operational period')) print self.report[-1] + continue + self._time_remaining -= min(self._time_remaining, ttni) + if self._time_remaining > zero: - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + # hit interval boundary before ending timestep. - # If ending current interval or no remaining time, do nothing + # If ending current interval or no remaining time, + # do nothing # if start of next interval, set state to 'ready' - # entering new operational interval - # ending current interval + # entering new operational interval + # ending current interval if self.index_of(model_time) > -1: self.cur_state = 'ready' - self.report.append((model_time, 'Begin new operational period')) + + self.report.append((model_time, + 'Begin new operational period')) print self.report[-1] else: - interval_idx = self.index_of(model_time - time_step + self._time_remaining) - self.report.append((model_time, 'Ending current operational period')) + interval_idx = self.index_of(model_time - + time_step + + self._time_remaining) + + self.report.append((model_time, + 'Ending current operational ' + 'period')) print self.report[-1] elif self.cur_state == 'ready': - if self.platform.sortie_possible(ttni, self.transit, self.pass_length): + if self.platform.sortie_possible(ttni, self.transit, + self.pass_length): # sortie is possible, so start immediately + self.report.append((model_time, 'Starting sortie')) print self.report[-1] - self._next_state_time = model_time + datetime.timedelta(seconds=self.platform.one_way_transit_time(self.transit)) + + self._next_state_time = (model_time + + timedelta(seconds=self.platform + .one_way_transit_time(self.transit))) self.cur_state = 'en_route' self._area_sprayed_this_sortie = 0 self._area_sprayed_this_ts = 0 else: # cannot sortie, so retire until next interval self.cur_state = 'retired' - self.report.append((model_time, 'Retiring due to insufficient time remaining to conduct sortie')) + + self.report.append((model_time, + 'Retiring due to insufficient ' + 'time remaining to conduct sortie')) print self.report[-1] + self._time_remaining -= min(self._time_remaining, ttni) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) elif self.cur_state == 'en_route': time_left = self._next_state_time - model_time - self.state.append(['transit', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['transit', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: self.report.append((model_time, 'Reached slick')) print self.report[-1] + self._op_start = model_time - self._op_end = model_time + datetime.timedelta(seconds=self.platform.max_onsite_time(self.transit, self.loading_type)) + self._op_end = (model_time + + timedelta(seconds=self.platform + .max_onsite_time(self.transit, + self.loading_type))) + self._cur_pass_num = 1 self.cur_state = 'approach' - dur = datetime.timedelta(seconds=self.platform.pass_duration_tuple(self.pass_length, self.pass_type)[0]) + + dur = timedelta(seconds=self.platform + .pass_duration_tuple(self.pass_length, + self.pass_type)[0]) + self._next_state_time = model_time + dur - self.report.append((model_time, 'Starting approach for pass ' + str(self._cur_pass_num))) + + self.report.append((model_time, + 'Starting approach for pass {}' + .format(self._cur_pass_num))) print self.report[-1] elif self.cur_state == 'approach': time_left = self._next_state_time - model_time - self.state.append(['onsite', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['onsite', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: - spray_time = self.platform.pass_duration_tuple(self.pass_length, self.pass_type)[1] - self._next_state_time = model_time + datetime.timedelta(seconds=spray_time) + spray_time = (self.platform + .pass_duration_tuple(self.pass_length, + self.pass_type)[1]) + + self._next_state_time = (model_time + + timedelta(seconds=spray_time)) self.cur_state = 'disperse_' + str(self._cur_pass_num) - self.report.append((model_time, 'Starting pass ' + str(self._cur_pass_num))) + + self.report.append((model_time, + 'Starting pass {}' + .format(self._cur_pass_num))) elif self.cur_state == 'u-turn': if self.pass_type != 'bidirectional': - raise ValueError('u-turns should not happen in uni-directional passes') + raise ValueError('u-turns should not happen ' + 'in uni-directional passes') + time_left = self._next_state_time - model_time - self.state.append(['onsite', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['onsite', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: - spray_time = self.platform.pass_duration_tuple(self.pass_length, self.pass_type)[1] - self._next_state_time = model_time + datetime.timedelta(seconds=spray_time) - self.cur_state = 'disperse_' + str(self._cur_pass_num) + 'u' - self.report.append((model_time, 'Begin return pass of pass ' + str(self._cur_pass_num))) + spray_time = (self.platform + .pass_duration_tuple(self.pass_length, + self.pass_type)[1]) + + self._next_state_time = (model_time + + timedelta(seconds=spray_time)) + self.cur_state = 'disperse_{}u'.format(self._cur_pass_num) + + self.report.append((model_time, + 'Begin return pass of pass {}' + .format(self._cur_pass_num))) elif self.cur_state == 'departure': time_left = self._next_state_time - model_time - self.state.append(['onsite', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['onsite', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: - self.report.append((model_time, 'Disperse pass ' + str(self._cur_pass_num) + ' completed')) - passes_possible = self.platform.num_passes_possible(self._op_end - model_time, self.pass_length, self.pass_type) - passes_possible_after_holding = self.platform.num_passes_possible(self._op_end - model_time + time_step, self.pass_length, self.pass_type) - o_w_t_t = datetime.timedelta(seconds=self.platform.one_way_transit_time(self.transit, payload=False)) + self.report.append((model_time, + 'Disperse pass {} completed' + .format(self._cur_pass_num))) + + passes_possible = (self.platform + .num_passes_possible(self._op_end - model_time, + self.pass_length, + self.pass_type)) + passes_possible_after_holding = (self.platform + .num_passes_possible(self._op_end - model_time + time_step, + self.pass_length, + self.pass_type)) + + o_w_t_t = timedelta(seconds=self.platform + .one_way_transit_time(self.transit, + payload=False)) self._cur_pass_num += 1 + if self._remaining_dispersant == 0: # no dispersant, so return to base - self.reset_for_return_to_base(model_time, 'No dispersant remaining, returning to base') + self.reset_for_return_to_base(model_time, + 'No dispersant ' + 'remaining, ' + 'returning to base') elif np.isclose(self.dispersable_oil_amount(sc, 'kg'), 0): if passes_possible_after_holding > 0: - # no oil left, but can still do a pass after holding for one timestep + # no oil left, but can still do a pass after + # holding for one timestep self.cur_state = 'holding' self._next_state_time = model_time + time_step else: - self.reset_for_return_to_base(model_time, 'No oil, no time for holding pattern, returning to base') + self.reset_for_return_to_base(model_time, + 'No oil, no time ' + 'for holding ' + 'pattern, returning ' + 'to base') elif passes_possible == 0: # no passes possible, so RTB - self.reset_for_return_to_base(model_time, 'No time for further passes, returning to base') + self.reset_for_return_to_base(model_time, + 'No time for further ' + 'passes, returning to ' + 'base') else: # oil and payload still remaining. Spray again. - self.report.append((model_time, 'Starting disperse pass ' + str(self._cur_pass_num))) + self.report.append((model_time, + 'Starting disperse pass {}' + .format(self._cur_pass_num))) print self.report[-1] + self.cur_state = 'disperse_' + str(self._cur_pass_num) - self._next_state_time = model_time + datetime.timedelta(seconds=self._pass_time_tuple[1]) + self._next_state_time = (model_time + + timedelta(seconds=self._pass_time_tuple[1])) elif self.cur_state == 'holding': time_left = self._next_state_time - model_time - self.state.append(['onsite', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['onsite', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) self.cur_state = 'approach' elif 'disperse' in self.cur_state: - pass_dur = datetime.timedelta(seconds=self.platform.pass_duration_tuple(self.pass_length, self.pass_type)[1]) + pass_dur = timedelta(seconds=self.platform + .pass_duration_tuple(self.pass_length, + self.pass_type)[1]) self._time_spraying = pass_dur.seconds + time_left_in_pass = self._next_state_time - model_time spray_time = min(self._time_remaining, time_left_in_pass) + if self.dosage_type == 'auto': self.dosage_from_thickness(sc) + dosage = self.dosage - disp_possible = spray_time.total_seconds() * self.platform.eff_pump_rate(dosage) + disp_possible = (spray_time.total_seconds() * + self.platform.eff_pump_rate(dosage)) + disp_actual = min(self._remaining_dispersant, disp_possible) treated_possible = disp_actual * self.disp_oil_ratio mass_treatable = None - if (np.isnan(np.mean(sc['density'][self.dispersable_oil_idxs(sc)]))): + + if np.isnan(np.mean(sc['density'][self.dispersable_oil_idxs(sc) + ])): mass_treatable = 0 else: mass_treatable = np.mean(sc['density'][self.dispersable_oil_idxs(sc)]) * treated_possible + oil_avail = self.dispersable_oil_amount(sc, 'kg') - self.report.append((model_time, 'Oil available: ' + str(oil_avail) + ' Treatable mass: ' + str(mass_treatable) + ' Dispersant Sprayed: ' + str(disp_actual))) - self.report.append((model_time, 'Sprayed ' + str(disp_actual) + 'm^3 dispersant in ' + str(spray_time) + ' seconds on ' + str(oil_avail) + ' kg of oil')) + + self.report.append((model_time, + 'Oil available: {} ' + 'Treatable mass: {} ' + 'Dispersant Sprayed: {}' + .format(oil_avail, + mass_treatable, + disp_actual))) + + self.report.append((model_time, + 'Sprayed {}m^3 dispersant ' + 'in {} seconds ' + 'on {} kg of oil' + .format(disp_actual, + spray_time, + oil_avail))) + self.state.append(['onsite', spray_time.total_seconds()]) + self._time_remaining -= spray_time self._disp_sprayed_this_timestep += disp_actual self._remaining_dispersant -= disp_actual - self._ts_payloads_delivered += (disp_actual / self.platform.get('payload', 'm^3')) - self.oil_treated_this_timestep += min(mass_treatable, oil_avail) + self._ts_payloads_delivered += (disp_actual / + self.platform.get('payload', + 'm^3')) + self.oil_treated_this_timestep += min(mass_treatable, + oil_avail) + + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) if self._time_remaining > zero: # completed a spray. - if self.pass_type == 'bidirectional' and self._remaining_dispersant > 0 and self.cur_state[-1] != 'u': + if (self.pass_type == 'bidirectional' and + self._remaining_dispersant > 0 and + self.cur_state[-1] != 'u'): self.cur_state = 'u-turn' self.report.append((model_time, 'Doing u-turn')) - self._next_state_time = model_time + datetime.timedelta(seconds=self._pass_time_tuple[2]) + self._next_state_time = (model_time + + timedelta(seconds=self + ._pass_time_tuple[2] + )) else: self.cur_state = 'departure' - self._next_state_time = model_time + datetime.timedelta(seconds=self._pass_time_tuple[-1]) - + self._next_state_time = (model_time + + timedelta(seconds=self + ._pass_time_tuple[-1] + )) elif self.cur_state == 'rtb': time_left = self._next_state_time - model_time - self.state.append(['transit', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['transit', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: self.report.append((model_time, 'Returned to base')) - refuel_reload = datetime.timedelta(seconds=self.platform.refuel_reload(simul=self.loading_type)) + + refuel_reload = timedelta(seconds=self.platform + .refuel_reload(simul=self + .loading_type)) + self._next_state_time = model_time + refuel_reload self.cur_state = 'refuel_reload' elif self.cur_state == 'refuel_reload': time_left = self._next_state_time - model_time - self.state.append(['reload', min(self._time_remaining, time_left).total_seconds()]) + + self.state.append(['reload', + min(self._time_remaining, + time_left).total_seconds()]) + self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: self.report.append((model_time, 'Refuel/reload complete')) print self.report[-1] - self._remaining_dispersant = self.platform.get('payload', 'm^3') + + self._remaining_dispersant = self.platform.get('payload', + 'm^3') self.cur_state = 'ready' elif self.cur_state == 'cascade': if self._next_state_time is None: - self._next_state_time = model_time + datetime.timedelta(seconds=self.platform.cascade_time(self.cascade_distance, payload=False)) + self._next_state_time = (model_time + + timedelta(seconds=self.platform + .cascade_time(self.cascade_distance, + payload=False))) + time_left = self._next_state_time - model_time self._time_remaining -= min(self._time_remaining, time_left) - model_time, time_step = self.update_time(self._time_remaining, model_time, time_step) + + model_time, time_step = self.update_time(self._time_remaining, + model_time, + time_step) + if self._time_remaining > zero: self.report.append((model_time, 'Cascade complete')) print self.report[-1] self.cur_state = 'ready' else: - raise ValueError('current state is not recognized: ' + self.cur_state) + raise ValueError('current state is not recognized: {}' + .format(self.cur_state)) def reset_for_return_to_base(self, model_time, message): self.report.append((model_time, message)) print self.report[-1] - o_w_t_t = datetime.timedelta(seconds=self.platform.one_way_transit_time(self.transit, payload=False)) + + o_w_t_t = timedelta(seconds=self.platform + .one_way_transit_time(self.transit, payload=False)) + self._next_state_time = model_time + o_w_t_t self._op_start = self._op_end = None self._cur_pass_num = 1 self.cur_state = 'rtb' def update_time(self, time_remaining, model_time, time_step): - if time_remaining > datetime.timedelta(seconds=0): + if time_remaining > timedelta(seconds=0): return model_time + time_step - time_remaining, time_remaining else: return model_time, time_step def dispersable_oil_idxs(self, sc): - # LEs must have a low viscosity, have not been fully chem dispersed, and must have a mass > 0 + # LEs must have a low viscosity, have not been fully chem dispersed, + # and must have a mass > 0 idxs = np.where(sc['viscosity'] * 1000000 < 1000000)[0] codes = sc['fate_status'][idxs] != bt_fate.disperse idxs = idxs[codes] nonzero_mass = sc['mass'][idxs] > 0 + idxs = idxs[nonzero_mass] + return idxs def dispersable_oil_amount(self, sc, units='gal'): idxs = self.dispersable_oil_idxs(sc) + if units in _valid_vol_units: tot_vol = np.sum(sc['mass'][idxs] / sc['density'][idxs]) return max(0, uc.convert('m^3', units, tot_vol)) else: tot_mass = np.sum(sc['mass'][idxs]) - return max(0, tot_mass - self.oil_treated_this_timestep / np.mean(sc['density'][idxs])) + return max(0, + (tot_mass - + self.oil_treated_this_timestep / + np.mean(sc['density'][idxs]))) def weather_elements(self, sc, time_step, model_time): - if not self.active or len(sc) == 0: sc.mass_balance['systems'][self.id]['state'] = [] return @@ -1127,36 +1587,53 @@ def weather_elements(self, sc, time_step, model_time): sc.mass_balance['systems'][self.id]['state'] = self.state idxs = self.dispersable_oil_idxs(sc) + if self.oil_treated_this_timestep != 0: - visc_eff_table = Disperse.visc_eff_table - wind_eff_list = Disperse.wind_eff_list + # visc_eff_table = Disperse.visc_eff_table + # wind_eff_list = Disperse.wind_eff_list + mass_proportions = sc['mass'][idxs] / np.sum(sc['mass'][idxs]) eff_reductions = self.get_disp_eff(sc, model_time) - mass_to_remove = self.oil_treated_this_timestep * mass_proportions * eff_reductions + mass_to_remove = (self.oil_treated_this_timestep * + mass_proportions * + eff_reductions) + + # org_mass = sc['mass'][idxs] - org_mass = sc['mass'][idxs] removed = self._remove_mass_indices(sc, mass_to_remove, idxs) print 'index, original mass, removed mass, final mass' - masstab = np.column_stack((idxs, org_mass, mass_to_remove, sc['mass'][idxs])) + + # masstab = np.column_stack((idxs, + # org_mass, + # mass_to_remove, + # sc['mass'][idxs])) + sc.mass_balance['chem_dispersed'] += sum(removed) - self.logger.warning('spray time: ' + str(type(self.platform._ts_spray_time))) - self.logger.warning('spray time out: ' + str(type(sc.mass_balance['systems'][self.id]['time_spraying']))) + + self.logger.warning('spray time: {}' + .format(type(self.platform._ts_spray_time))) + self.logger.warning('spray time out: {}' + .format(type(sc.mass_balance['systems'][self.id]['time_spraying']))) + sc.mass_balance['systems'][self.id]['time_spraying'] += self.platform._ts_spray_time sc.mass_balance['systems'][self.id]['dispersed'] += sum(removed) sc.mass_balance['systems'][self.id]['area_covered'] += self._area_sprayed_this_ts sc.mass_balance['systems'][self.id]['dispersant_applied'] += self._disp_sprayed_this_timestep sc.mass_balance['systems'][self.id]['oil_treated'] += self.oil_treated_this_timestep sc.mass_balance['systems'][self.id]['payloads_delivered'] += self._ts_payloads_delivered + sc.mass_balance['floating'] -= sum(removed) + zero_or_disp = np.isclose(sc['mass'][idxs], 0) new_status = sc['fate_status'][idxs] new_status[zero_or_disp] = bt_fate.disperse + sc['fate_status'][idxs] = new_status + self.oil_treated_this_timestep = 0 self.disp_sprayed_this_timestep = 0 - class BurnUnitsSchema(MappingSchema): offset = SchemaNode(String(), description='SI units for distance', @@ -1174,6 +1651,7 @@ class BurnUnitsSchema(MappingSchema): description='SI units for speed', validator=OneOf(_valid_vel_units)) + class BurnSchema(ResponseSchema): offset = SchemaNode(Integer()) boom_length = SchemaNode(Integer()) @@ -1183,18 +1661,8 @@ class BurnSchema(ResponseSchema): burn_efficiency_type = SchemaNode(String()) units = BurnUnitsSchema() -class Burn(Response): - _state = copy.deepcopy(Response._state) - _state += [Field('offset', save=True, update=True), - Field('boom_length', save=True, update=True), - Field('boom_draft', save=True, update=True), - Field('speed', save=True, update=True), - Field('throughput', save=True, update=True), - Field('burn_efficiency_type', save=True, update=True), - Field('units', save=True, update=True)] - - _schema = BurnSchema +class Burn(Response): _si_units = {'offset': 'ft', 'boom_length': 'ft', 'boom_draft': 'in', @@ -1207,6 +1675,17 @@ class Burn(Response): 'speed': ('velocity', _valid_vel_units), '_boom_capacity_max': ('volume', _valid_vol_units)} + _state = copy.deepcopy(Response._state) + _state += [Field('offset', save=True, update=True), + Field('boom_length', save=True, update=True), + Field('boom_draft', save=True, update=True), + Field('speed', save=True, update=True), + Field('throughput', save=True, update=True), + Field('burn_efficiency_type', save=True, update=True), + Field('units', save=True, update=True)] + + _schema = BurnSchema + def __init__(self, offset, boom_length, @@ -1218,18 +1697,21 @@ def __init__(self, **kwargs): super(Burn, self).__init__(**kwargs) + self.array_types.update({'mass': mass, 'density': density, 'frac_water': frac_water}) - self.offset = offset self._units = dict(self._si_units) self.units = units + + self.offset = offset self.boom_length = boom_length self.boom_draft = boom_draft self.speed = speed self.throughput = throughput self.burn_efficiency_type = burn_efficiency_type + self._swath_width = None self._area = None self._boom_capacity_max = 0 @@ -1252,23 +1734,35 @@ def __init__(self, def prepare_for_model_run(self, sc): self._setup_report(sc) + self._swath_width = 0.3 * self.get('boom_length') - self._area = self._swath_width * (0.4125 * self.get('boom_length') / 3) * 2 / 3 - self.set('_boom_capacity_max', self.get('boom_draft') / 36 * self._area, 'ft^3') + + self._area = (self._swath_width * + (0.4125 * self.get('boom_length') / 3) * + 2 / 3) + + self.set('_boom_capacity_max', + self.get('boom_draft') / 36 * self._area, + 'ft^3') + self._boom_capacity = self.get('_boom_capacity_max') self._offset_time = (self.offset * 0.00987 / self.get('speed')) * 60 self._area_coverage_rate = self._swath_width * self.get('speed') / 430 if self._swath_width > 1000: - self.report.append('Swaths > 1000 feet may not be achievable in the field') + self.report.append('Swaths > 1000 feet may not be achievable ' + 'in the field') if self.get('speed') > 1.2: - self.report.append('Excessive entrainment of oil likely to occur at speeds greater than 1.2 knots.') + self.report.append('Excessive entrainment of oil likely to occur ' + 'at speeds greater than 1.2 knots.') if self.on: sc.mass_balance['burned'] = 0.0 + if 'systems' not in sc.mass_balance: sc.mass_balance['systems'] = {} + sc.mass_balance['systems'][self.id] = {'boomed': 0.0, 'burned': 0.0, 'time_burning': 0.0, @@ -1299,7 +1793,9 @@ def prepare_for_model_step(self, sc, time_step, model_time): self._ts_area_covered = 0. self._state_list = [] - if self._is_active(model_time, time_step) or self._is_burning or self._is_cleaning: + if (self._is_active(model_time, time_step) or + self._is_burning or + self._is_cleaning): self._active = True else: self._active = False @@ -1308,10 +1804,13 @@ def prepare_for_model_step(self, sc, time_step, model_time): return self._time_remaining = time_step + while self._time_remaining > 0.: - if self._is_collecting == False and self._is_transiting == False \ - and self._is_burning == False and self._is_cleaning == False \ - and self._is_active(model_time, time_step): + if (self._is_collecting is False and + self._is_transiting is False and + self._is_burning is False and + self._is_cleaning is False and + self._is_active(model_time, time_step)): self._is_collecting = True if self._is_collecting: @@ -1332,39 +1831,52 @@ def _collect(self, sc, time_step, model_time): self._burn_rate = 0.14 * (1 - sc['frac_water'].mean()) oil_thickness = self._get_thickness(sc) - encounter_rate = 63.13 * self._swath_width * oil_thickness * self.get('speed') + encounter_rate = (63.13 * + self._swath_width * + oil_thickness * + self.get('speed')) emulsion_rr = encounter_rate * self.throughput + self._boomed_density = sc['density'].mean() + if oil_thickness > 0: # old ROC equation # time_to_fill = (self._boom_capacity_remaining / emulsion_rr) * 60 # new ebsp equation - time_to_fill = uc.convert('Volume', 'ft^3', 'gal', self._boom_capacity) / emulsion_rr - time_to_collect_remaining_oil = uc.convert('Volume', 'm^3', 'gal', sc.mass_balance['floating']) / emulsion_rr - + time_to_fill = (uc.convert('Volume', + 'ft^3', 'gal', + self._boom_capacity) / + emulsion_rr) else: time_to_fill = self._time_remaining if time_to_fill >= self._time_remaining: # doesn't finish filling the boom in this time step - self._ts_collected = uc.convert('Volume', 'gal', 'ft^3', emulsion_rr * self._time_remaining) + self._ts_collected = uc.convert('Volume', + 'gal', 'ft^3', + emulsion_rr * self._time_remaining) self._boom_capacity -= self._ts_collected - self._ts_area_covered = encounter_rate * (self._time_remaining / 60) + self._ts_area_covered = encounter_rate * self._time_remaining / 60. self._time_collecting_in_sim += self._time_remaining - self._state_list.append(['collect', self._time_remaining]) - self._time_remaining = 0.0 + self._state_list.append(['collect', self._time_remaining]) + self._time_remaining = 0.0 elif self._time_remaining > 0: # finishes filling the boom in this time step any time remaining # should be spend transiting to the burn position - self._ts_collected = uc.convert('Volume', 'gal', 'ft^3', emulsion_rr * time_to_fill) + self._ts_collected = uc.convert('Volume', + 'gal', 'ft^3', + emulsion_rr * time_to_fill) + self._ts_area_covered = encounter_rate * (time_to_fill / 60) self._boom_capacity -= self._ts_collected self._is_boom_full = True + self._time_remaining -= time_to_fill self._time_collecting_in_sim += time_to_fill self._offset_time_remaining = self._offset_time + self._is_collecting = False self._is_transiting = True @@ -1375,13 +1887,18 @@ def _transit(self, sc, time_step, model_time): # does it arrive and start burning? if self._offset_time_remaining > self._time_remaining: self._offset_time_remaining -= self._time_remaining + self._state_list.append(['transit', self._time_remaining]) + self._time_remaining = 0. elif self._time_remaining > 0: self._time_remaining -= self._offset_time_remaining + self._state_list.append(['transit', self._offset_time_remaining]) + self._offset_time_remaining = 0 + self._is_transiting = False if self._is_boom_full: self._is_burning = True @@ -1392,56 +1909,76 @@ def _burn(self, sc, time_step, model_time): # burning if self._burn_time is None: self._ts_num_burns = 1 - self._burn_time = (0.33 * self.get('boom_draft') / self._burn_rate) * 60 + self._burn_time = (0.33 * + self.get('boom_draft') / + self._burn_rate * + 60.) self._burn_time_remaining = self._burn_time + if not np.isclose(self._boom_capacity, 0): # this is a special case if the boom didn't fill up all the way # due to lack of oil or somethig. - self._burn_time_remaining = self._burn_time * ((1 - self._boom_capacity) / self.get('_boom_capacity_max')) + self._burn_time_remaining = (self._burn_time * + (1 - self._boom_capacity) / + self.get('_boom_capacity_max')) self._is_boom_full = False + if self._burn_time_remaining > self._time_remaining: frac_burned = self._time_remaining / self._burn_time burned = self.get('_boom_capacity_max') * frac_burned + self._burn_time_remaining -= self._time_remaining self._time_burning += self._burn_time_remaining + self._state_list.append(['burn', self._time_remaining]) self._time_remaining = 0. elif self._time_remaining > 0: burned = self.get('_boom_capacity_max') - self._boom_capacity + self._boom_capacity += burned self._ts_burned = burned + self._time_burning += self._burn_time_remaining self._time_remaining -= self._burn_time_remaining + self._state_list.append(['burn', self._burn_time_remaining]) + self._burn_time_remaining = 0. + self._ts_burned = burned + self._is_burning = False self._is_cleaning = True + self._cleaning_time_remaining = 3600 # 1hr in seconds def _clean(self, sc, time_step, model_time): # cleaning self._burn_time = None self._burn_rate = None + if self._cleaning_time_remaining > self._time_remaining: self._cleaning_time_remaining -= self._time_remaining + self._state_list.append(['clean', self._time_remaining]) self._time_remaining = 0. elif self._time_remaining > 0: self._time_remaining -= self._cleaning_time_remaining + self._state_list.append(['clean', self._cleaning_time_remaining]) self._cleaning_time_remaining = 0. + self._is_cleaning = False - if(self._is_active(model_time, time_step)): + + if self._is_active(model_time, time_step): self._is_transiting = True self._offset_time_remaining = self._offset_time else: self._time_remaining = 0. - def weather_elements(self, sc, time_step, model_time): ''' Remove mass from each le equally for now, no flagging for not @@ -1464,8 +2001,12 @@ def weather_elements(self, sc, time_step, model_time): sc.mass_balance['systems'][self.id]['state'] = self._state_list if self._ts_collected > 0: - collected = uc.convert('Volume', 'ft^3', 'm^3', self._ts_collected) * self._boomed_density + collected = (uc.convert('Volume', + 'ft^3', 'm^3', + self._ts_collected) * + self._boomed_density) actual_collected = self._remove_mass_simple(data, collected) + sc.mass_balance['boomed'] += actual_collected sc.mass_balance['systems'][self.id]['boomed'] += actual_collected @@ -1474,16 +2015,23 @@ def weather_elements(self, sc, time_step, model_time): self._boom_capacity += collected - actual_collected self.logger.debug('{0} amount boomed for {1}: {2}' - .format(self._pid, substance.name, collected)) + .format(self._pid, + substance.name, + collected)) if self._ts_burned > 0: - burned = uc.convert('Volume', 'ft^3', 'm^3', self._ts_burned) * self._boomed_density + burned = (uc.convert('Volume', + 'ft^3', 'm^3', + self._ts_burned) * + self._boomed_density) + sc.mass_balance['burned'] += burned sc.mass_balance['boomed'] -= burned sc.mass_balance['systems'][self.id]['burned'] += burned sc.mass_balance['systems'][self.id]['time_burning'] = self._time_burning - # make sure we didn't burn more than we boomed if so correct the amount + # make sure we didn't burn more than we boomed + # if so correct the amount if sc.mass_balance['boomed'] < 0: sc.mass_balance['burned'] += sc.mass_balance['boomed'] sc.mass_balance['systems'][self.id]['burned'] += sc.mass_balance['boomed'] @@ -1492,6 +2040,7 @@ def weather_elements(self, sc, time_step, model_time): self.logger.debug('{0} amount burned for {1}: {2}' .format(self._pid, substance.name, burned)) + class SkimUnitsSchema(MappingSchema): storage = SchemaNode(String(), description='SI units for onboard storage', @@ -1502,12 +2051,12 @@ class SkimUnitsSchema(MappingSchema): validator=OneOf(_valid_dis_units)) nameplate_pump = SchemaNode(String(), - description='SI units for nameplate', - validator=OneOf(_valid_dis_units)) + description='SI units for nameplate', + validator=OneOf(_valid_dis_units)) discharge_pump = SchemaNode(String(), - description='SI units for discharge', - validator=OneOf(_valid_dis_units)) + description='SI units for discharge', + validator=OneOf(_valid_dis_units)) speed = SchemaNode(String(), description='SI units for speed', @@ -1517,6 +2066,7 @@ class SkimUnitsSchema(MappingSchema): description='SI units for length', validator=OneOf(_valid_dist_units)) + class SkimSchema(ResponseSchema): units = SkimUnitsSchema() speed = SchemaNode(Float()) @@ -1528,8 +2078,8 @@ class SkimSchema(ResponseSchema): skim_efficiency_type = SchemaNode(String()) decant = SchemaNode(Float()) decant_pump = SchemaNode(Float()) - rig_time = SchemaNode(TimeDelta()) - transit_time = SchemaNode(TimeDelta()) + rig_time = SchemaNode(Float()) + transit_time = SchemaNode(Float()) offload_to = SchemaNode(String(), missing=drop) discharge_pump = SchemaNode(Float()) recovery = SchemaNode(String()) @@ -1538,7 +2088,22 @@ class SkimSchema(ResponseSchema): validator=validators.convertible_to_seconds, missing=drop) + class Skim(Response): + _si_units = {'storage': 'bbl', + 'decant_pump': 'gpm', + 'nameplate_pump': 'gpm', + 'speed': 'kts', + 'swath_width': 'ft', + 'discharge_pump': 'gpm'} + + _units_type = {'storage': ('volume', _valid_vol_units), + 'decant_pump': ('discharge', _valid_dis_units), + 'nameplate_pump': ('discharge', _valid_dis_units), + 'speed': ('velocity', _valid_vel_units), + 'swath_width': ('length', _valid_dist_units), + 'discharge_pump': ('discharge', _valid_dis_units)} + _state = copy.deepcopy(Response._state) _state += [Field('units', save=True, update=True), Field('speed', save=True, update=True), @@ -1558,20 +2123,6 @@ class Skim(Response): _schema = SkimSchema - _si_units = {'storage': 'bbl', - 'decant_pump': 'gpm', - 'nameplate_pump': 'gpm', - 'speed': 'kts', - 'swath_width': 'ft', - 'discharge_pump': 'gpm'} - - _units_type = {'storage': ('volume', _valid_vol_units), - 'decant_pump': ('discharge', _valid_dis_units), - 'nameplate_pump': ('discharge', _valid_dis_units), - 'speed': ('velocity', _valid_vel_units), - 'swath_width': ('length', _valid_dist_units), - 'discharge_pump': ('discharge', _valid_dis_units)} - def __init__(self, speed, storage, @@ -1589,7 +2140,6 @@ def __init__(self, transit_time, units=_si_units, **kwargs): - super(Skim, self).__init__(**kwargs) self.speed = speed @@ -1606,6 +2156,7 @@ def __init__(self, self.discharge_pump = discharge_pump self.skim_efficiency_type = skim_efficiency_type self.transit_time = transit_time + self._units = dict(self._si_units) self._is_collecting = False @@ -1616,40 +2167,48 @@ def __init__(self, def prepare_for_model_run(self, sc): self._setup_report(sc) self._storage_remaining = self.get('storage', 'gal') - self._coverage_rate = self.get('swath_width') * self.get('speed') * 0.00233 - self.offload = (self.get('storage', 'gal') / self.get('discharge_pump', 'gpm')) * 60 + + self._coverage_rate = (self.get('swath_width') * + self.get('speed') * + 0.00233) + + self.offload = (self.get('storage', 'gal') / + self.get('discharge_pump', 'gpm') * + 60.) if self.on: sc.mass_balance['skimmed'] = 0.0 if 'systems' not in sc.mass_balance: sc.mass_balance['systems'] = {} - sc.mass_balance['systems'][self.id] = { - 'skimmed': 0.0, - 'fluid_collected': 0.0, - 'time_collecting': 0.0, - 'emulsion_collected': 0.0, - 'oil_collected': 0.0, - 'water_collected': 0.0, - 'water_decanted': 0.0, - 'water_retained': 0.0, - 'area_covered': 0.0, - 'num_fills': 0., - 'storage_remaining': 0.0, - 'state': []} + sc.mass_balance['systems'][self.id] = {'skimmed': 0.0, + 'fluid_collected': 0.0, + 'time_collecting': 0.0, + 'emulsion_collected': 0.0, + 'oil_collected': 0.0, + 'water_collected': 0.0, + 'water_decanted': 0.0, + 'water_retained': 0.0, + 'area_covered': 0.0, + 'num_fills': 0., + 'storage_remaining': 0.0, + 'state': []} self._is_collecting = True def prepare_for_model_step(self, sc, time_step, model_time): - if self._is_active(model_time, time_step) or self._is_transiting or self._is_offloading: + if (self._is_active(model_time, time_step) or + self._is_transiting or + self._is_offloading): self._active = True - else : + else: self._active = False if not self.active: return self._state_list = [] + self._ts_num_fills = 0. self._ts_emulsion_collected = 0. self._ts_oil_collected = 0. @@ -1662,15 +2221,20 @@ def prepare_for_model_step(self, sc, time_step, model_time): self._time_remaining = time_step - if hasattr(self, 'barge_arrival') and self.barge_arrival is not None: #type(self.barge_arrival) is datetime.date: + if (hasattr(self, 'barge_arrival') and + self.barge_arrival is not None): # if there's a barge so a modified cycle while self._time_remaining > 0.: if self._is_collecting: self._collect(sc, time_step, model_time) else: - while self._time_remaining > 0. and self._is_active(model_time, time_step) \ - or self._time_remaining > 0. and self._is_transiting \ - or self._time_remaining > 0. and self._is_offloading: + while (self._time_remaining > 0. and + self._is_active(model_time, time_step) or + self._time_remaining > 0. and + self._is_transiting or + self._time_remaining > 0. and + self._is_offloading): + # TODO: A bunch of conditional logic above seems redundant if self._is_collecting: self._collect(sc, time_step, model_time) @@ -1680,39 +2244,54 @@ def prepare_for_model_step(self, sc, time_step, model_time): if self._is_offloading: self._offload(sc, time_step, model_time) - def _collect(self, sc, time_step, model_time): thickness = self._get_thickness(sc) + if self.recovery_ef > 0 and self.throughput > 0 and thickness > 0: - self._maximum_effective_swath = self.get('nameplate_pump') * self.get('recovery_ef') / (63.13 * self.get('speed', 'kts') * thickness * self.throughput) + self._maximum_effective_swath = (self.get('nameplate_pump') * + self.get('recovery_ef') / + (63.13 * + self.get('speed', 'kts') * + thickness * + self.throughput)) else: self._maximum_effective_swath = 0 if self.get('swath_width', 'ft') > self._maximum_effective_swath: - swath = self._maximum_effective_swath; + swath = self._maximum_effective_swath else: swath = self.get('swath_width', 'ft') if swath > 1000: - self.report.append('Swaths > 1000 feet may not be achievable in the field.') + self.report.append('Swaths > 1000 feet may not be achievable ' + 'in the field.') encounter_rate = thickness * self.get('speed', 'kts') * swath * 63.13 rate_of_coverage = swath * self.get('speed', 'kts') * 0.00233 + if encounter_rate > 0: recovery = self._getRecoveryEfficiency() if recovery > 0: - totalFluidRecoveryRate = encounter_rate * (self.throughput / recovery) + totalFluidRecoveryRate = (encounter_rate * + self.throughput / + recovery) if totalFluidRecoveryRate > self.get('nameplate_pump'): # total fluid recovery rate is greater than nameplate # pump, recalculate the throughput efficiency and # total fluid recovery rate again with the new throughput - throughput = self.get('nameplate_pump') * recovery / encounter_rate - totalFluidRecoveryRate = encounter_rate * (throughput / recovery) - msg = ('{0.name} - Total Fluid Recovery Rate is greater than Nameplate \ - Pump Rate, recalculating Throughput Efficiency').format(self) - self.logger.warning(msg) + throughput = (self.get('nameplate_pump') * + recovery / + encounter_rate) + totalFluidRecoveryRate = (encounter_rate * + throughput / + recovery) + + self.logger.warning('{0.name} - Total Fluid Recovery Rate ' + 'is greater than Nameplate Pump Rate. ' + 'Recalculating Throughput Efficiency' + .format(self)) else: throughput = self.throughput @@ -1721,43 +2300,61 @@ def _collect(self, sc, time_step, model_time): waterRecoveryRate = (1 - recovery) * totalFluidRecoveryRate waterRetainedRate = waterRecoveryRate * (1 - self.decant) - computedDecantRate = (totalFluidRecoveryRate - emulsionRecoveryRate) * self.decant + computedDecantRate = (self.decant * + (totalFluidRecoveryRate - + emulsionRecoveryRate)) decantRateDifference = 0. + if computedDecantRate > self.get('decant_pump'): - decantRateDifference = computedDecantRate - self.get('decant_pump') + decantRateDifference = (computedDecantRate - + self.get('decant_pump')) recoveryRate = emulsionRecoveryRate + waterRecoveryRate - retainRate = emulsionRecoveryRate + waterRetainedRate + decantRateDifference - oilRecoveryRate = emulsionRecoveryRate * (1 - sc['frac_water'].mean()) - waterTakenOn = totalFluidRecoveryRate - emulsionRecoveryRate + retainRate = (emulsionRecoveryRate + + waterRetainedRate + + decantRateDifference) + oilRecoveryRate = (emulsionRecoveryRate * + (1 - sc['frac_water'].mean())) + # waterTakenOn = (totalFluidRecoveryRate - + # emulsionRecoveryRate) freeWaterRecoveryRate = recoveryRate - emulsionRecoveryRate freeWaterRetainedRate = retainRate - emulsionRecoveryRate - freeWaterDecantRate = freeWaterRecoveryRate - freeWaterRetainedRate + freeWaterDecantRate = (freeWaterRecoveryRate - + freeWaterRetainedRate) - # timeToFill = .7 * self._storage_remaining / (emulsionRecoveryRate + (waterTakenOn - (waterTakenOn * self.get('decant_pump', 'gpm') / 100))) * 60 - timeToFill = (.7 * self._storage_remaining / retainRate * 60) * 60 + timeToFill = (.7 * + self._storage_remaining / + retainRate * + 60. * 60.) if timeToFill > self._time_remaining: - # going to take more than this timestep to fill the storage + # going to take more than this timestep to fill the + # storage time_collecting = self._time_remaining self._time_remaining = 0. else: # storage is filled during this timestep time_collecting = timeToFill + self._time_remaining -= timeToFill self._transit_remaining = (self.transit_time * 60) + self._is_collecting = False self._is_transiting = True self._state_list.append(['skim', time_collecting]) + fluid_collected = retainRate * (time_collecting / 60) - if fluid_collected > 0 and \ - fluid_collected <= self._storage_remaining: - self._ts_num_fills += fluid_collected / self.get('storage', 'gal') + + if (fluid_collected > 0 and + fluid_collected <= self._storage_remaining): + self._ts_num_fills += (fluid_collected / + self.get('storage', 'gal')) elif self._storage_remaining > 0: - self._ts_num_fills += self._storage_remaining / self.get('storage', 'gal') + self._ts_num_fills += (self._storage_remaining / + self.get('storage', 'gal')) if fluid_collected > self._storage_remaining: self._storage_remaining = 0 @@ -1766,12 +2363,18 @@ def _collect(self, sc, time_step, model_time): self._ts_time_collecting += time_collecting self._ts_fluid_collected += fluid_collected - self._ts_emulsion_collected += emulsionRecoveryRate * (time_collecting / 60) - self._ts_oil_collected += oilRecoveryRate * (time_collecting / 60) - self._ts_water_collected += freeWaterRecoveryRate * (time_collecting / 60) - self._ts_water_decanted += freeWaterDecantRate * (time_collecting / 60) - self._ts_water_retained += freeWaterRetainedRate * (time_collecting / 60) - self._ts_area_covered += rate_of_coverage * (time_collecting / 60) + self._ts_emulsion_collected += (emulsionRecoveryRate * + time_collecting / 60.) + self._ts_oil_collected += (oilRecoveryRate * + time_collecting / 60.) + self._ts_water_collected += (freeWaterRecoveryRate * + time_collecting / 60.) + self._ts_water_decanted += (freeWaterDecantRate * + time_collecting / 60.) + self._ts_water_retained += (freeWaterRetainedRate * + time_collecting / 60.) + self._ts_area_covered += (rate_of_coverage * + time_collecting / 60.) else: self._no_op_step() @@ -1779,16 +2382,21 @@ def _collect(self, sc, time_step, model_time): self._no_op_step() else: self._state_list.append(['skim', self._time_remaining]) + self._no_op_step() def _transit(self, sc, time_step, model_time): # transiting back to shore to offload - if self._time_remaining >= self._transit_remaining: + print('time', self._time_remaining) + print('remaining', self._transit_remaining) + if self._time_remaining >= self._transit_remaining: self._state_list.append(['transit', self._transit_remaining]) + self._time_remaining -= self._transit_remaining self._transit_remaining = 0. self._is_transiting = False + if self._storage_remaining == 0.0: self._is_offloading = True self._offload_remaining = self.offload + (self.rig_time * 60) @@ -1796,20 +2404,25 @@ def _transit(self, sc, time_step, model_time): self._is_collecting = True else: self._state_list.append(['transit', self._time_remaining]) + self._transit_remaining -= self._time_remaining self._time_remaining = 0. def _offload(self, sc, time_step, model_time): if self._time_remaining >= self._offload_remaining: self._state_list.append(['offload', self._offload_remaining]) + self._time_remaining -= self._offload_remaining self._offload_remaining = 0. self._storage_remaining = self.get('storage', 'gal') + self._is_offloading = False self._is_transiting = True + self._transit_remaining = (self.transit_time * 60) else: self._state_list.append(['offload', self._time_remaining]) + self._offload_remaining -= self._time_remaining self._time_remaining = 0. @@ -1830,12 +2443,16 @@ def weather_elements(self, sc, time_step, model_time): sc.mass_balance['systems'][self.id]['state'] = self._state_list - if hasattr(self, '_ts_oil_collected') and self._ts_oil_collected is not None: + if (hasattr(self, '_ts_oil_collected') and + self._ts_oil_collected is not None): actual = self._remove_mass_simple(data, self._ts_oil_collected) + sc.mass_balance['skimmed'] += actual self.logger.debug('{0} amount boomed for {1}: {2}' - .format(self._pid, substance.name, self._ts_oil_collected)) + .format(self._pid, + substance.name, + self._ts_oil_collected)) platform_balance = sc.mass_balance['systems'][self.id] platform_balance['skimmed'] += actual @@ -1851,17 +2468,15 @@ def weather_elements(self, sc, time_step, model_time): platform_balance['num_fills'] += self._ts_num_fills - def _getRecoveryEfficiency(self): - # scaffolding method - # will eventually include logic for calculating - # recovery efficiency based on wind and oil visc. - + # scaffolding method will eventually include logic for calculating + # recovery efficiency based on wind and oil viscosity. return self.recovery_ef + if __name__ == '__main__': print None - d = Disperse(name = 'test') + d = Disperse(name='test') p = Platform(_name='Test Platform') import pprint as pp ser = p.serialize() diff --git a/py_gnome/requirements.txt b/py_gnome/requirements.txt index e28518fba..29bcb16e3 100644 --- a/py_gnome/requirements.txt +++ b/py_gnome/requirements.txt @@ -34,7 +34,7 @@ Cython ## dependencies that aren't on PyPi -git+https://github.com/NOAA-ORR-ERD/PyNUCOS.git@v2.5.5#egg=unit_conversion +git+https://github.com/NOAA-ORR-ERD/PyNUCOS.git@v2.6.1#egg=unit_conversion git+https://github.com/NOAA-ORR-ERD/OilLibrary.git@v1.0.0#egg=oil_library diff --git a/py_gnome/tests/unit_tests/conftest.py b/py_gnome/tests/unit_tests/conftest.py index 59d8409af..baec3411e 100644 --- a/py_gnome/tests/unit_tests/conftest.py +++ b/py_gnome/tests/unit_tests/conftest.py @@ -51,11 +51,11 @@ def dump(): try: shutil.rmtree(dump_loc) - except: + except Exception: pass try: os.makedirs(dump_loc) - except: + except Exception: pass return dump_loc @@ -328,6 +328,7 @@ def invalid_rq(): # use this for wind and current deterministic (r,theta) + rq = np.array([(1, 0), (1, 45), (1, 90), @@ -445,7 +446,8 @@ def wind_circ(wind_timeseries): from gnome import environment dtv_rq = wind_timeseries['rq'] - wm = environment.Wind(timeseries=dtv_rq, format='r-theta', + + wm = environment.Wind(timeseries=dtv_rq, coord_sys='r-theta', units='meter per second') return {'wind': wm, 'rq': dtv_rq, 'uv': wind_timeseries['uv']} diff --git a/py_gnome/tests/unit_tests/test_environment/sample_data/gen_analytical_datasets.py b/py_gnome/tests/unit_tests/test_environment/gen_analytical_datasets.py similarity index 70% rename from py_gnome/tests/unit_tests/test_environment/sample_data/gen_analytical_datasets.py rename to py_gnome/tests/unit_tests/test_environment/gen_analytical_datasets.py index 380dfdb81..101e735f4 100644 --- a/py_gnome/tests/unit_tests/test_environment/sample_data/gen_analytical_datasets.py +++ b/py_gnome/tests/unit_tests/test_environment/gen_analytical_datasets.py @@ -1,38 +1,33 @@ -import numpy as np -import netCDF4 as nc4 - -from gnome.environment.gridded_objects_base import Grid_S, PyGrid - import os from datetime import datetime, timedelta +import numpy as np +import netCDF4 as nc4 -from gnome import scripting -from gnome import utilities - - -from gnome.model import Model - -from gnome.spill import point_line_release_spill -from gnome.movers import RandomMover, constant_wind_mover, GridCurrentMover - +from gnome.environment.gridded_objects_base import Grid_S, PyGrid from gnome.environment import GridCurrent -from gnome.movers.py_current_movers import PyCurrentMover -from gnome.outputters import Renderer, NetCDFOutput def gen_vortex_3D(filename=None): x, y = np.mgrid[-30:30:61j, -30:30:61j] + y = np.ascontiguousarray(y.T) x = np.ascontiguousarray(x.T) + x_size = 61 y_size = 61 + g = Grid_S(node_lon=x, node_lat=y) g.build_celltree() + lin_nodes = g._cell_trees['node'][1] - lin_faces = np.array([np.array([([lx, lx + x_size + 1, lx + 1], [lx, lx + x_size, lx + x_size + 1]) for lx in range(0, x_size - 1, 1)]) + ly * x_size for ly in range(0, y_size - 1)]) + lin_faces = np.array([np.array([([lx, lx + x_size + 1, lx + 1], + [lx, lx + x_size, lx + x_size + 1]) + for lx in range(0, x_size - 1, 1)]) + ly * x_size + for ly in range(0, y_size - 1)]) lin_faces = lin_faces.reshape(-1, 3) + # y += np.sin(x) / 1 # x += np.sin(x) / 5 @@ -40,10 +35,12 @@ def gen_vortex_3D(filename=None): tarr = [t0 + timedelta(hours=i) for i in range(0, 11)] angs = -np.arctan2(y, x) mag = np.sqrt(x ** 2 + y ** 2) + vx = np.cos(angs) * mag vy = np.sin(angs) * mag vx = vx[np.newaxis, :] * 20 vy = vy[np.newaxis, :] * 20 + vw = -0.001 d_scale = [1, 0.5, 0, -0.5, -1] @@ -71,35 +68,45 @@ def gen_vortex_3D(filename=None): lin_tdvy = np.array([lin_dvy * t for t in t_scale]) ds = None + if filename is not None: ds = nc4.Dataset(filename, 'w', diskless=True, persist=True) + ds.createDimension('y', y.shape[0]) ds.createDimension('x', x.shape[1]) ds.createDimension('time', len(tarr)) ds.createDimension('depth', len(d_scale)) + ds.createVariable('x', 'f8', dimensions=('x', 'y')) ds['x'][:] = x + ds.createVariable('y', 'f8', dimensions=('x', 'y')) ds['y'][:] = y + ds.createVariable('time', 'f8', dimensions=('time')) ds['time'][:] = nc4.date2num(tarr, 'hours since {0}'.format(t0)) ds['time'].setncattr('units', 'hours since {0}'.format(t0)) + ds.createVariable('vx', 'f8', dimensions=('x', 'y')) ds.createVariable('vy', 'f8', dimensions=('x', 'y')) ds['vx'][:] = vx ds['vy'][:] = vy + ds.createVariable('tvx', 'f8', dimensions=('time', 'x', 'y')) ds.createVariable('tvy', 'f8', dimensions=('time', 'x', 'y')) ds['tvx'][:] = tvx ds['tvy'][:] = tvy + ds.createVariable('dvx', 'f8', dimensions=('depth', 'x', 'y')) ds.createVariable('dvy', 'f8', dimensions=('depth', 'x', 'y')) ds['dvx'][:] = dvx ds['dvy'][:] = dvy + ds.createVariable('tdvx', 'f8', dimensions=('time', 'depth', 'x', 'y')) ds.createVariable('tdvy', 'f8', dimensions=('time', 'depth', 'x', 'y')) ds['tdvx'][:] = tdvx ds['tdvy'][:] = tdvy + for v in ds.variables: if 'v' in v: ds[v].units = 'm/s' @@ -108,6 +115,7 @@ def gen_vortex_3D(filename=None): ds.createDimension('nele', lin_faces.shape[0]) ds.createDimension('two', 2) ds.createDimension('three', 3) + ds.createVariable('nodes', 'f8', dimensions=('nv', 'two')) ds.createVariable('faces', 'f8', dimensions=('nele', 'three')) ds.createVariable('lin_vx', 'f8', dimensions=('nv')) @@ -118,6 +126,7 @@ def gen_vortex_3D(filename=None): ds.createVariable('lin_dvy', 'f8', dimensions=('depth', 'nv')) ds.createVariable('lin_tdvx', 'f8', dimensions=('time', 'depth', 'nv')) ds.createVariable('lin_tdvy', 'f8', dimensions=('time', 'depth', 'nv')) + for k, v in {'nodes': lin_nodes, 'faces': lin_faces, 'lin_vx': lin_vx, @@ -130,28 +139,48 @@ def gen_vortex_3D(filename=None): 'lin_tdvy': lin_tdvy }.items(): ds[k][:] = v + if 'lin' in k: ds[k].units = 'm/s' - PyGrid._get_grid_type(ds, grid_topology={'node_lon': 'x', 'node_lat': 'y'}) + + PyGrid._get_grid_type(ds, + grid_topology={'node_lon': 'x', 'node_lat': 'y'}) PyGrid._get_grid_type(ds) + ds.setncattr('grid_type', 'sgrid') + if ds is not None: # Need to test the dataset... sgt = {'node_lon': 'x', 'node_lat': 'y'} - sg = PyGrid.from_netCDF(dataset=ds, grid_topology=sgt, grid_type='sgrid') - sgc1 = GridCurrent.from_netCDF(dataset=ds, varnames=['vx', 'vy'], grid_topology=sgt) - sgc2 = GridCurrent.from_netCDF(dataset=ds, varnames=['tvx', 'tvy'], grid_topology=sgt) - sgc3 = GridCurrent.from_netCDF(dataset=ds, varnames=['dvx', 'dvy'], grid_topology=sgt) - sgc4 = GridCurrent.from_netCDF(dataset=ds, varnames=['tdvx', 'tdvy'], grid_topology=sgt) + _sg = PyGrid.from_netCDF(dataset=ds, grid_topology=sgt, + grid_type='sgrid') + + _sgc1 = GridCurrent.from_netCDF(dataset=ds, varnames=['vx', 'vy'], + grid_topology=sgt) + _sgc2 = GridCurrent.from_netCDF(dataset=ds, varnames=['tvx', 'tvy'], + grid_topology=sgt) + _sgc3 = GridCurrent.from_netCDF(dataset=ds, varnames=['dvx', 'dvy'], + grid_topology=sgt) + _sgc4 = GridCurrent.from_netCDF(dataset=ds, varnames=['tdvx', 'tdvy'], + grid_topology=sgt) ugt = {'nodes': 'nodes', 'faces': 'faces'} # ug = PyGrid_U(nodes=ds['nodes'][:], faces=ds['faces'][:]) - ugc1 = GridCurrent.from_netCDF(dataset=ds, varnames=['lin_vx', 'lin_vy'], grid_topology=ugt) - ugc2 = GridCurrent.from_netCDF(dataset=ds, varnames=['lin_tvx', 'lin_tvy'], grid_topology=ugt) - ugc3 = GridCurrent.from_netCDF(dataset=ds, varnames=['lin_dvx', 'lin_dvy'], grid_topology=ugt) - ugc4 = GridCurrent.from_netCDF(dataset=ds, varnames=['lin_tdvx', 'lin_tdvy'], grid_topology=ugt) + _ugc1 = GridCurrent.from_netCDF(dataset=ds, + varnames=['lin_vx', 'lin_vy'], + grid_topology=ugt) + _ugc2 = GridCurrent.from_netCDF(dataset=ds, + varnames=['lin_tvx', 'lin_tvy'], + grid_topology=ugt) + _ugc3 = GridCurrent.from_netCDF(dataset=ds, + varnames=['lin_dvx', 'lin_dvy'], + grid_topology=ugt) + _ugc4 = GridCurrent.from_netCDF(dataset=ds, + varnames=['lin_tdvx', 'lin_tdvy'], + grid_topology=ugt) ds.close() + return {'sgrid': (x, y), 'sgrid_vel': (dvx, dvy), 'sgrid_depth_vel': (tdvx, tdvy), @@ -160,70 +189,84 @@ def gen_vortex_3D(filename=None): def gen_sinusoid(filename=None): -# from mpl_toolkits.mplot3d import axes3d -# import matplotlib.pyplot as plt -# import numpy as np -# fig = plt.figure() -# ax = fig.add_subplot(111, projection='3d', zlim=[-2, 2], xlim=[0, 25], ylim=[-2, 2]) -# ax.autoscale(False) + # from mpl_toolkits.mplot3d import axes3d + # import matplotlib.pyplot as plt + # import numpy as np + # fig = plt.figure() + # ax = fig.add_subplot(111, projection='3d', + # zlim=[-2, 2], xlim=[0, 25], ylim=[-2, 2]) + # ax.autoscale(False) y, x = np.mgrid[-1:1:5j, 0:(6 * np.pi):25j] y = y + np.sin(x / 2) Z = np.zeros_like(x) # abs(np.sin(x / 2)) + + vx = np.ones_like(x) vy = np.cos(x / 2) / 2 vz = np.zeros_like(x) -# ax.plot_wireframe(x, y, Z, rstride=1, cstride=1, color='blue') -# ax.quiver(x, y, Z, vx, vy, vz, length=0.5, arrow_length_ratio=0.2, color='darkblue', pivot='tail') -# ax.quiver(x, y, vx, vy, color='darkblue', pivot='tail', angles='xy', scale=1.5, scale_units='xy', width=0.0025) -# ax.plot(x[2], y[2]) + # ax.plot_wireframe(x, y, Z, rstride=1, cstride=1, color='blue') + # ax.quiver(x, y, Z, vx, vy, vz, length=0.5, + # arrow_length_ratio=0.2, color='darkblue', pivot='tail') + # ax.quiver(x, y, vx, vy, color='darkblue', pivot='tail', angles='xy', + # scale=1.5, scale_units='xy', width=0.0025) + # ax.plot(x[2], y[2]) + rho = {'r_grid': (x, y, Z), 'r_vel': (vx, vy, vz)} yc, xc = np.mgrid[-0.75:0.75: 4j, 0.377:18.493:24j] yc = yc + np.sin(xc / 2) zc = np.zeros_like(xc) + 0.025 + vxc = np.ones_like(xc) vyc = np.cos(xc / 2) / 2 vzc = np.zeros_like(xc) -# ax.plot_wireframe(xc, yc, zc, rstride=1, cstride=1, color="red") -# ax.quiver(xc, yc, zc, vxc, vyc, vzc, length=0.3, arrow_length_ratio=0.2, color='darkred', pivot='tail') + # ax.plot_wireframe(xc, yc, zc, rstride=1, cstride=1, color="red") + # ax.quiver(xc, yc, zc, vxc, vyc, vzc, length=0.3, + # arrow_length_ratio=0.2, color='darkred', pivot='tail') psi = {'p_grid': (xc, yc, zc), 'p_vel': (vxc, vyc, vzc)} yu, xu = np.mgrid[-1:1:5j, 0.377:18.493:24j] yu = yu + np.sin(xu / 2) zu = np.zeros_like(xu) + 0.05 + vxu = np.ones_like(xu) * 2 vyu = np.zeros_like(xu) vzu = np.zeros_like(xu) -# ax.plot_wireframe(xu, yu, zu, rstride=1, cstride=1, color="purple") -# ax.quiver(xu, yu, zu, vxu, vyu, vzu, length=0.3, arrow_length_ratio=0.2, color='indigo', pivot='tail') + # ax.plot_wireframe(xu, yu, zu, rstride=1, cstride=1, color="purple") + # ax.quiver(xu, yu, zu, vxu, vyu, vzu, length=0.3, + # arrow_length_ratio=0.2, color='indigo', pivot='tail') u = {'u_grid': (xu, yu, zu), 'u_vel': (vzu, vxu, vzu)} yv, xv = np.mgrid[-0.75:0.75: 4j, 0:18.87:25j] yv = yv + np.sin(xv / 2) zv = np.zeros_like(xv) + 0.075 + vxv = np.zeros_like(xv) vyv = np.cos(xv / 2) / 2 vzv = np.zeros_like(xv) -# ax.plot_wireframe(xv, yv, zv, rstride=1, cstride=1, color="y") -# ax.quiver(xv, yv, zv, vxv, vyv, vzv, length=0.3, arrow_length_ratio=0.2, color='olive', pivot='tail') + # ax.plot_wireframe(xv, yv, zv, rstride=1, cstride=1, color="y") + # ax.quiver(xv, yv, zv, vxv, vyv, vzv, length=0.3, + # arrow_length_ratio=0.2, color='olive', pivot='tail') v = {'v_grid': (xv, yv, zv), 'v_vel': (vyv, vzv, vzv)} angle = np.cos(x / 2) / 2 ds = None + if filename is not None: ds = nc4.Dataset(filename, 'w', diskless=True, persist=True) + for k, v in {'eta_psi': 24, 'xi_psi': 4, 'eta_rho': 25, 'xi_rho': 5}.items(): ds.createDimension(k, v) + for k, v in {'lon_rho': ('xi_rho', 'eta_rho', x), 'lat_rho': ('xi_rho', 'eta_rho', y), 'lon_psi': ('xi_psi', 'eta_psi', xc), @@ -235,6 +278,7 @@ def gen_sinusoid(filename=None): }.items(): ds.createVariable(k, 'f8', dimensions=v[0:2]) ds[k][:] = v[2] + for k, v in {'u_rho': ('xi_rho', 'eta_rho', vx), 'v_rho': ('xi_rho', 'eta_rho', vy), 'u_psi': ('xi_psi', 'eta_psi', vxc), @@ -244,25 +288,33 @@ def gen_sinusoid(filename=None): ds.createVariable(k, 'f8', dimensions=v[0:2]) ds[k][:] = v[2] ds[k].units = 'm/s' + ds.grid_type = 'sgrid' - #ds.createVariable('angle', 'f8', dimensions=('xi_rho', 'eta_rho')) - #ds['angle'][:] = angle + # ds.createVariable('angle', 'f8', dimensions=('xi_rho', 'eta_rho')) + # ds['angle'][:] = angle + if ds is not None: # Need to test the dataset... -# from gnome.environment import GridCurrent -# sg = PyGrid.from_netCDF(dataset=ds) -# sgc1 = GridCurrent.from_netCDF(dataset=ds, varnames=['u_rho', 'v_rho'], grid=sg) -# sgc1.angle = None -# sgc2 = GridCurrent.from_netCDF(dataset=ds, varnames=['u_psi', 'v_psi'], grid=sg) -# sgc2.angle = None -# sgc3 = GridCurrent.from_netCDF(dataset=ds, grid=sg) + + # from gnome.environment import GridCurrent + # sg = PyGrid.from_netCDF(dataset=ds) + # sgc1 = GridCurrent.from_netCDF(dataset=ds, + # varnames=['u_rho', 'v_rho'], + # grid=sg) + # sgc1.angle = None + # sgc2 = GridCurrent.from_netCDF(dataset=ds, + # varnames=['u_psi', 'v_psi'], + # grid=sg) + # sgc2.angle = None + # sgc3 = GridCurrent.from_netCDF(dataset=ds, grid=sg) + ds.close() # plt.show() def gen_ring(filename=None): -# import matplotlib.pyplot as plt + # import matplotlib.pyplot as plt import matplotlib.tri as tri import math @@ -274,7 +326,7 @@ def gen_ring(filename=None): angles = np.linspace(0, 2 * math.pi, n_angles, endpoint=False) angles = np.repeat(angles[..., np.newaxis], n_radii, axis=1) angles[:, 1::2] += math.pi / n_angles -# print angles.shape + # print angles.shape x = (radii * np.cos(angles)).flatten() y = (radii * np.sin(angles)).flatten() @@ -297,6 +349,7 @@ def gen_ring(filename=None): ds.createDimension('nv', x.shape[0]) ds.createDimension('nele', faces.shape[0]) ds.createDimension('three', 3) + for k, v in {'node_lon': ('nv', x), 'node_lat': ('nv', y), 'faces': ('nele', 'three', faces), @@ -307,26 +360,32 @@ def gen_ring(filename=None): ds[k].units = 'm/s' if ds is not None: -# gc = GridCurrent.from_netCDF(dataset=ds) -# print gc.grid.node_lon.shape -# print gc.grid.faces.shape + # gc = GridCurrent.from_netCDF(dataset=ds) + # print gc.grid.node_lon.shape + # print gc.grid.faces.shape ds.close() # tripcolor plot. -# plt.figure() -# plt.gca().set_aspect('equal') -# plt.triplot(triang, 'bo-') -# plt.quiver(x, y, vy, vx) -# plt.title('triplot of Delaunay triangulation') + # plt.figure() + # plt.gca().set_aspect('equal') + # plt.triplot(triang, 'bo-') + # plt.quiver(x, y, vy, vx) + # plt.title('triplot of Delaunay triangulation') -def gen_all(path=None): +def gen_all(base_path=None): filenames = ['staggered_sine_channel.nc', '3D_circular.nc', 'tri_ring.nc'] - if path is not None: - filenames = [os.path.join(path, fn) for fn in filenames] + + if base_path is not None: + if not os.path.isdir(base_path): + os.mkdir(base_path) + + filenames = [os.path.join(base_path, fn) for fn in filenames] + for fn, func in zip(filenames, (gen_sinusoid, gen_vortex_3D, gen_ring)): func(fn) + if __name__ == '__main__': gen_sinusoid('staggered_sine_channel.nc') gen_vortex_3D('3D_circular.nc') diff --git a/py_gnome/tests/unit_tests/test_environment/test_environment.py b/py_gnome/tests/unit_tests/test_environment/test_environment.py index 99a24ba7a..c2860a11d 100644 --- a/py_gnome/tests/unit_tests/test_environment/test_environment.py +++ b/py_gnome/tests/unit_tests/test_environment/test_environment.py @@ -2,20 +2,69 @@ test object in environment module ''' import pytest -from datetime import datetime -import numpy as np from unit_conversion import InvalidUnitError from gnome.environment import Water -from gnome.environment import TemperatureTS - -# def test_Water_init(): -# w = Water() -# t = TemperatureTS(name='test', units='K', time=[w.temperature.time], data=np.array([[300]])) -# assert w.temperature == t -# assert w.salinity == 35.0 -# w = Water(temperature=273, salinity=0) -# assert w.temperature == 273.0 -# assert w.salinity == 0.0 + + +# pytest.mark.parametrize() is really finicky about what you can use as a +# string parameter. You can't use a list or a tuple, for example. +@pytest.mark.parametrize(('attr', 'sub_attr', 'value'), + [('name', None, 'NewWater'), + ('temperature', None, 400.0), + ('salinity', None, 50.0), + ('sediment', None, .01), + ('wave_height', None, 2.0), + ('fetch', None, 100.0), + + ('units', 'temperature', 'C'), + ('units', 'salinity', 'psu'), + ('units', 'sediment', 'kg/m^3'), + ('units', 'wave_height', 'm'), + ('units', 'fetch', 'm'), + ('units', 'density', 'kg/m^3'), + ('units', 'kinematic_viscosity', 'm^2/s'), + ]) +def test_Water_init(attr, sub_attr, value): + ''' + The initial default values that an object may have is a contract. + As such, we test this contract for a Water() object here. + + Specifically, we test: + - that the default values are what we expect, + - that the default values are immutable. + ''' + w = Water() + + check_water_defaults(w) + + if sub_attr is None: + setattr(w, attr, value) + else: + sub_value = getattr(w, attr) + print 'sub_value = ', sub_value + sub_value[sub_attr] = value + print 'sub_value = ', sub_value + + w = Water() + + check_water_defaults(w) + + +def check_water_defaults(water_obj): + assert water_obj.name == 'Water' + assert water_obj.temperature == 300.0 + assert water_obj.salinity == 35.0 + assert water_obj.sediment == .005 + assert water_obj.wave_height is None + assert water_obj.fetch is None + + assert water_obj.units['temperature'] == 'K' + assert water_obj.units['salinity'] == 'psu' + assert water_obj.units['sediment'] == 'kg/m^3' + assert water_obj.units['wave_height'] == 'm' + assert water_obj.units['fetch'] == 'm' + assert water_obj.units['density'] == 'kg/m^3' + assert water_obj.units['kinematic_viscosity'] == 'm^2/s' # currently salinity only have psu in there since there is no conversion from diff --git a/py_gnome/tests/unit_tests/test_environment/test_grid.py b/py_gnome/tests/unit_tests/test_environment/test_grid.py index 0232efe8c..51ed1b183 100644 --- a/py_gnome/tests/unit_tests/test_environment/test_grid.py +++ b/py_gnome/tests/unit_tests/test_environment/test_grid.py @@ -1,114 +1,119 @@ import os + import pytest import netCDF4 as nc + from gnome.environment.gridded_objects_base import PyGrid, Grid_U, Grid_S from gnome.utilities.remote_data import get_datafile + import pprint as pp + @pytest.fixture() def sg_data(): base_dir = os.path.dirname(__file__) - s_data = os.path.join(base_dir, 'sample_data') - filename = os.path.join(s_data, 'currents') - filename = get_datafile(os.path.join(filename, 'tbofs_example.nc')) + filename = get_datafile(os.path.join(base_dir, + 'sample_data', + 'currents', + 'tbofs_example.nc')) + return filename, nc.Dataset(filename) + @pytest.fixture() def sg_topology(): return {'node_lon': 'lonc', 'node_lat': 'latc'} + @pytest.fixture() def sg(): - return PyGrid.from_netCDF(sg_data()[0], sg_data()[1], grid_topology=sg_topology()) + return PyGrid.from_netCDF(sg_data()[0], sg_data()[1], + grid_topology=sg_topology()) + @pytest.fixture() def ug_data(): base_dir = os.path.dirname(__file__) - s_data = os.path.join(base_dir, 'sample_data') - filename = os.path.join(s_data, 'currents') - filename = get_datafile(os.path.join(filename, 'ChesBay.nc')) + filename = get_datafile(os.path.join(base_dir, + 'sample_data', + 'currents', + 'ChesBay.nc')) + return filename, nc.Dataset(filename) + @pytest.fixture() def ug_topology(): pass + @pytest.fixture() def ug(): - return PyGrid.from_netCDF(ug_data()[0], ug_data()[1], grid_topology=ug_topology()) + return PyGrid.from_netCDF(ug_data()[0], ug_data()[1], + grid_topology=ug_topology()) + class TestPyGrid_S: def test_construction(self, sg_data, sg_topology): filename = sg_data[0] dataset = sg_data[1] grid_topology = sg_topology - sg = Grid_S.from_netCDF(filename, dataset, grid_topology=grid_topology) + + sg = Grid_S.from_netCDF(filename, dataset, + grid_topology=grid_topology) assert sg.filename == filename sg2 = Grid_S.from_netCDF(filename) assert sg2.filename == filename - sg3 = PyGrid.from_netCDF(filename, dataset, grid_topology=grid_topology) - sg4 = PyGrid.from_netCDF(filename) + sg3 = PyGrid.from_netCDF(filename, dataset, + grid_topology=grid_topology) print sg3.shape - print sg4.shape assert sg == sg3 + + sg4 = PyGrid.from_netCDF(filename) + print sg4.shape assert sg2 == sg4 def test_serialize(self, sg, sg_data, sg_topology): filename = sg_data[0] dataset = sg_data[1] grid_topology = sg_topology - sg2 = Grid_S.from_netCDF(filename, dataset, grid_topology=grid_topology) -# pytest.set_trace() + + sg2 = Grid_S.from_netCDF(filename, dataset, + grid_topology=grid_topology) + print sg.serialize()['filename'] print sg2.serialize()['filename'] assert sg.serialize()['filename'] == sg2.serialize()['filename'] def test_deserialize(self, sg, sg_data, sg_topology): - filename = sg_data[0] - dataset = sg_data[1] - grid_topology = sg_topology - sg2 = Grid_S.from_netCDF(filename, dataset, grid_topology=grid_topology) d_sg = Grid_S.new_from_dict(sg.serialize()) pp.pprint(sg.serialize()) pp.pprint(d_sg.serialize()) - assert sg.name == d_sg.name -# fn1 = 'C:\\Users\\jay.hennen\\Documents\\Code\\pygnome\\py_gnome\\scripts\\script_TAP\\arctic_avg2_0001_gnome.nc' -# fn2 = 'C:\\Users\\jay.hennen\\Documents\\Code\\pygnome\\py_gnome\\scripts\\script_columbia_river\\COOPSu_CREOFS24.nc' -# sg = PyGrid.from_netCDF(fn1) -# ug = PyGrid.from_netCDF(fn2) -# sg.save(".\\testzip.zip", name="testjson.json") -# ug.save_as_netcdf("./testug.nc") -# # sg.save_as_netcdf("./testug.nc") -# k = PyGrid_U.from_netCDF("./testug.nc") -# k2 = PyGrid_U.from_netCDF(fn2) -# c1 = PyGrid_S.from_netCDF(fn1) -# ug4 = PyGrid.from_netCDF("./testug.nc") -# sg2 = PyGrid.from_netCDF("./testsg.nc") -# ug2 = PyGrid.from_netCDF(fn2) -# -# ug3 = PyGrid.new_from_dict(ug.serialize(json_='save')) + class TestPyGrid_U: def test_construction(self, ug_data, ug_topology): filename = ug_data[0] dataset = ug_data[1] grid_topology = ug_topology + ug = Grid_U.from_netCDF(filename, dataset, grid_topology=grid_topology) -# assert ug.filename == filename -# assert isinstance(ug.node_lon, nc.Variable) -# assert ug.node_lon.name == 'lonc' + # assert ug.filename == filename + # assert isinstance(ug.node_lon, nc.Variable) + # assert ug.node_lon.name == 'lonc' ug2 = Grid_U.from_netCDF(filename) assert ug2.filename == filename -# assert isinstance(ug2.node_lon, nc.Variable) -# assert ug2.node_lon.name == 'lon' + # assert isinstance(ug2.node_lon, nc.Variable) + # assert ug2.node_lon.name == 'lon' - ug3 = PyGrid.from_netCDF(filename, dataset, grid_topology=grid_topology) + ug3 = PyGrid.from_netCDF(filename, dataset, + grid_topology=grid_topology) ug4 = PyGrid.from_netCDF(filename) print ug3.shape print ug4.shape @@ -119,14 +124,12 @@ def test_serialize(self, ug, ug_data, ug_topology): filename = ug_data[0] dataset = ug_data[1] grid_topology = ug_topology - ug2 = Grid_U.from_netCDF(filename, dataset, grid_topology=grid_topology) + + ug2 = Grid_U.from_netCDF(filename, dataset, + grid_topology=grid_topology) assert ug.serialize()['filename'] == ug2.serialize()['filename'] def test_deserialize(self, ug, ug_data, ug_topology): - filename = ug_data[0] - dataset = ug_data[1] - grid_topology = ug_topology - ug2 = Grid_U.from_netCDF(filename, dataset, grid_topology=grid_topology) d_ug = Grid_U.new_from_dict(ug.serialize()) pp.pprint(ug.serialize()) diff --git a/py_gnome/tests/unit_tests/test_environment/test_property.py b/py_gnome/tests/unit_tests/test_environment/test_property.py index 6cbde187e..06eb09db7 100644 --- a/py_gnome/tests/unit_tests/test_environment/test_property.py +++ b/py_gnome/tests/unit_tests/test_environment/test_property.py @@ -1,31 +1,31 @@ import os -import sys -import pytest import datetime as dt + +import pytest + import numpy as np -import datetime -from gnome.environment.gridded_objects_base import Variable, VectorVariable, Grid_S, PyGrid -from gnome.environment.ts_property import TimeSeriesProp, TSVectorProp -from gnome.environment.environment_objects import (VelocityGrid, - VelocityTS, - Bathymetry, - S_Depth_T1) -from gnome.utilities.remote_data import get_datafile -from unit_conversion import NotSupportedUnitError import netCDF4 as nc + import unit_conversion -base_dir = os.path.dirname(__file__) -sys.path.append(os.path.join(base_dir, 'sample_data')) -from gen_analytical_datasets import gen_all +from gnome.environment.gridded_objects_base import (Variable, + VectorVariable, + Grid_S, + PyGrid) +from gnome.environment.ts_property import TimeSeriesProp +from gnome.environment.environment_objects import (Bathymetry, + S_Depth_T1) +from gen_analytical_datasets import gen_all ''' Need to hook this up to existing test data infrastructure ''' +base_dir = os.path.dirname(__file__) + s_data = os.path.join(base_dir, 'sample_data') -gen_all(path=s_data) +gen_all(base_path=s_data) sinusoid = os.path.join(s_data, 'staggered_sine_channel.nc') sinusoid = nc.Dataset(sinusoid) @@ -41,8 +41,14 @@ class TestS_Depth_T1: def test_construction(self): - test_grid = Grid_S(node_lon=np.array([[0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3], [0, 1, 2, 3]]), - node_lat=np.array([[0, 0, 0, 0], [1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]])) + test_grid = Grid_S(node_lon=np.array([[0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3], + [0, 1, 2, 3]]), + node_lat=np.array([[0, 0, 0, 0], + [1, 1, 1, 1], + [2, 2, 2, 2], + [3, 3, 3, 3]])) u = np.zeros((3, 4, 4), dtype=np.float64) u[0, :, :] = 0 @@ -56,9 +62,9 @@ def test_construction(self): w[3, :, :] = 3 bathy_data = -np.array([[1, 1, 1, 1], - [1, 2, 2, 1], - [1, 2, 2, 1], - [1, 1, 1, 1]], dtype=np.float64) + [1, 2, 2, 1], + [1, 2, 2, 1], + [1, 1, 1, 1]], dtype=np.float64) Cs_w = np.array([1.0, 0.6667, 0.3333, 0.0]) s_w = np.array([1.0, 0.6667, 0.3333, 0.0]) @@ -66,15 +72,24 @@ def test_construction(self): s_rho = np.array([0.8333, 0.5, 0.1667]) hc = np.array([1]) - b = Bathymetry(name='bathymetry', data=bathy_data, grid=test_grid, time=None) + b = Bathymetry(name='bathymetry', data=bathy_data, grid=test_grid, + time=None) - dep = S_Depth_T1(bathymetry=b, terms=dict(zip(S_Depth_T1.default_terms[0], [Cs_w, s_w, hc, Cs_r, s_rho])), dataset='dummy') + dep = S_Depth_T1(bathymetry=b, + terms=dict(zip(S_Depth_T1.default_terms[0], + [Cs_w, s_w, hc, Cs_r, s_rho])), + dataset='dummy') assert dep is not None - corners = np.array([[0, 0, 0], [0, 3, 0], [3, 3, 0], [3, 0, 0]], dtype=np.float64) + corners = np.array([[0, 0, 0], + [0, 3, 0], + [3, 3, 0], + [3, 0, 0]], dtype=np.float64) + res, alph = dep.interpolation_alphas(corners, w.shape) assert res is None # all particles on surface assert alph is None # all particles on surface + res, alph = dep.interpolation_alphas(corners, u.shape) assert res is None # all particles on surface assert alph is None # all particles on surface @@ -83,11 +98,15 @@ def test_construction(self): res = dep.interpolation_alphas(pts2, w.shape) assert all(res[0] == 0) # all particles underground assert np.allclose(res[1], -2.0) # all particles underground + res = dep.interpolation_alphas(pts2, u.shape) assert all(res[0] == 0) # all particles underground assert np.allclose(res[1], -2.0) # all particles underground - layers = np.array([[0.5, 0.5, .251], [1.5, 1.5, 1.0], [2.5, 2.5, 1.25]]) + layers = np.array([[0.5, 0.5, .251], + [1.5, 1.5, 1.0], + [2.5, 2.5, 1.25]]) + res, alph = dep.interpolation_alphas(layers, w.shape) print res print alph @@ -96,34 +115,28 @@ def test_construction(self): class TestTSprop: - def test_construction(self): - - u = None - v = None with pytest.raises(ValueError): # mismatched data and dates length - dates = [] - u = TimeSeriesProp('u', 'm/s', [datetime.datetime.now(), datetime.datetime.now()], [5, ]) + _u = TimeSeriesProp('u', 'm/s', + [dt.datetime.now(), dt.datetime.now()], + [5, ]) - u = TimeSeriesProp('u', 'm/s', [datetime.datetime.now()], [5, ]) + u = TimeSeriesProp('u', 'm/s', [dt.datetime.now()], [5, ]) assert u is not None assert u.name == 'u' assert u.units == 'm/s' - v = None with pytest.raises(ValueError): - v = TimeSeriesProp('v', 'nm/hr', [datetime.datetime.now()], [5, ]) - - assert v is None + _v = TimeSeriesProp('v', 'nm/hr', [dt.datetime.now()], [5, ]) constant = TimeSeriesProp.constant('const', 'm/s', 5) assert constant.data[0] == 5 - assert all(constant.at(np.array((0, 0)), datetime.datetime.now()) == 5) + assert all(constant.at(np.array((0, 0)), dt.datetime.now()) == 5) def test_unit_conversion(self): - u = TimeSeriesProp('u', 'm/s', [datetime.datetime.now()], [5, ]) + u = TimeSeriesProp('u', 'm/s', [dt.datetime.now()], [5, ]) t = u.in_units('km/hr') @@ -154,9 +167,11 @@ def test_at(self): # No extrapolation. out of bounds time should fail with pytest.raises(ValueError): u.at(corners, t1) + assert (u.at(corners, t2) == np.array([2])).all() assert (u.at(corners, t3) == np.array([3])).all() assert (u.at(corners, t4) == np.array([10])).all() + with pytest.raises(ValueError): u.at(corners, t5) @@ -168,12 +183,14 @@ def test_at(self): # # def test_construction(self, u, v): # vp = None -# vp = TSVectorProp(name='vp', units='m/s', time=dates2, variables=[u_data, v_data]) +# vp = TSVectorProp(name='vp', units='m/s', time=dates2, +# variables=[u_data, v_data]) # pytest.set_trace() # assert vp.variables[0].data == u_data # # # 3 components -# vp = TSVectorProp(name='vp', units='m/s', time=dates2, variables=[u_data, v_data, u_data]) +# vp = TSVectorProp(name='vp', units='m/s', time=dates2, +# variables=[u_data, v_data, u_data]) # # # Using TimeSeriesProp # vp = TSVectorProp(name='vp', variables=[u, v]) @@ -181,15 +198,18 @@ def test_at(self): # # # SHORT TIME # with pytest.raises(ValueError): -# vp = TSVectorProp(name='vp', units='m/s', time=dates, variables=[u_data, v_data]) +# vp = TSVectorProp(name='vp', units='m/s', time=dates, +# variables=[u_data, v_data]) # # # DIFFERENT LENGTH VARS # with pytest.raises(ValueError): -# vp = TSVectorProp(name='vp', units='m/s', time=dates2, variables=[s_data, v_data]) +# vp = TSVectorProp(name='vp', units='m/s', time=dates2, +# variables=[s_data, v_data]) # # # UNSUPPORTED UNITS # with pytest.raises(ValueError): -# vp = TSVectorProp(name='vp', units='km/s', time=dates2, variables=[s_data, v_data, u_data]) +# vp = TSVectorProp(name='vp', units='km/s', time=dates2, +# variables=[s_data, v_data, u_data]) # # def test_unit_conversion(self, vp): # nvp = vp.in_units('km/hr') @@ -263,6 +283,7 @@ def test_at(self): # assert (vp.at(corners, t1, extrapolate=True) == np.array([2, 5])).all() # assert (vp.at(corners, t5, extrapolate=True) == np.array([10, 13])).all() + ''' Analytical cases: @@ -304,10 +325,7 @@ def test_at(self): class TestGriddedProp: - - def test_construction(self): - data = sinusoid['u'][:] grid = PyGrid.from_netCDF(dataset=sinusoid) time = None @@ -321,6 +339,7 @@ def test_construction(self): grid_file='staggered_sine_channel.nc') curr_file = os.path.join(s_data, 'staggered_sine_channel.nc') + k = Variable.from_netCDF(filename=curr_file, varname='u', name='u') assert k.name == u.name assert k.units == 'm/s' @@ -334,56 +353,67 @@ def test_at(self): v = Variable.from_netCDF(filename=curr_file, varname='v_rho') points = np.array(([0, 0, 0], [np.pi, 1, 0], [2 * np.pi, 0, 0])) - time = datetime.datetime.now() + time = dt.datetime.now() assert all(u.at(points, time) == [1, 1, 1]) print np.cos(points[:, 0] / 2) / 2 - assert all(np.isclose(v.at(points, time), np.cos(points[:, 0] / 2) / 2)) + assert all(np.isclose(v.at(points, time), + np.cos(points[:, 0] / 2) / 2)) class TestGridVectorProp: - def test_construction(self): curr_file = os.path.join(s_data, 'staggered_sine_channel.nc') u = Variable.from_netCDF(filename=curr_file, varname='u_rho') v = Variable.from_netCDF(filename=curr_file, varname='v_rho') - gvp = VectorVariable(name='velocity', units='m/s', time=u.time, variables=[u, v]) + + gvp = VectorVariable(name='velocity', units='m/s', time=u.time, + variables=[u, v]) assert gvp.name == 'velocity' assert gvp.units == 'm/s' assert gvp.varnames[0] == 'u_rho' -# pytest.set_trace() def test_at(self): curr_file = os.path.join(s_data, 'staggered_sine_channel.nc') gvp = VectorVariable.from_netCDF(filename=curr_file, varnames=['u_rho', 'v_rho']) points = np.array(([0, 0, 0], [np.pi, 1, 0], [2 * np.pi, 0, 0])) - time = datetime.datetime.now() + time = dt.datetime.now() - assert all(np.isclose(gvp.at(points, time)[:, 1], np.cos(points[:, 0] / 2) / 2)) + assert all(np.isclose(gvp.at(points, time)[:, 1], + np.cos(points[:, 0] / 2) / 2)) def test_gen_varnames(self): import netCDF4 as nc4 from gnome.environment import GridCurrent, GridWind, IceVelocity + ds = nc4.Dataset('testname', 'w', diskless=True, persist=False) ds.createDimension('y', 5) ds.createDimension('x', 5) + ds.createVariable('x', 'f8', dimensions=('x', 'y')) ds['x'].standard_name = 'eastward_sea_water_velocity' + ds.createVariable('y', 'f8', dimensions=('x', 'y')) ds['y'].standard_name = 'northward_sea_water_velocity' + ds.createVariable('xw', 'f8', dimensions=('x', 'y')) ds['xw'].long_name = 'eastward_wind' + ds.createVariable('yw', 'f8', dimensions=('x', 'y')) ds['yw'].long_name = 'northward_wind' + ds.createVariable('ice_u', 'f8', dimensions=('x', 'y')) ds.createVariable('ice_v', 'f8', dimensions=('x', 'y')) + names = GridCurrent._gen_varnames(dataset=ds) assert names[0] == names.u == 'x' assert names[1] == names.v == 'y' + names = GridWind._gen_varnames(dataset=ds) assert names[0] == names.u == 'xw' assert names[1] == names.v == 'yw' + names = IceVelocity._gen_varnames(dataset=ds) assert names[0] == names.u == 'ice_u' assert names[1] == names.v == 'ice_v' @@ -393,5 +423,6 @@ def test_gen_varnames(self): assert gc.u == gc.variables[0] assert gc.varnames[0] == 'u' + if __name__ == "__main__": pass diff --git a/py_gnome/tests/unit_tests/test_environment/test_wind.py b/py_gnome/tests/unit_tests/test_environment/test_wind.py index 57caca163..f03ee4fdd 100755 --- a/py_gnome/tests/unit_tests/test_environment/test_wind.py +++ b/py_gnome/tests/unit_tests/test_environment/test_wind.py @@ -1,7 +1,5 @@ #!/usr/bin/env python - import os -import sys from datetime import datetime, timedelta import shutil @@ -23,11 +21,11 @@ from ..conftest import testdata -wind_file = testdata['timeseries']['wind_ts'] - from gnome.environment.environment_objects import GridWind from gnome.environment.gridded_objects_base import Grid_S, Variable +wind_file = testdata['timeseries']['wind_ts'] + def test_exceptions(): """ @@ -80,6 +78,7 @@ def test_units(): wm = Wind(filename=wind_file) new_units = 'meter per second' assert wm.units != new_units + wm.units = new_units assert wm.units == new_units @@ -90,7 +89,6 @@ def test_default_init(): assert wind.timeseries == np.array([(sec_to_date(zero_time()), [0.0, 0.0])], dtype=datetime_value_2d) - assert wind.units == 'mps' @@ -117,14 +115,15 @@ def test_wind_circ_fixture(wind_circ): # output is in knots - gtime_val = wm.get_wind_data(format='uv').view(dtype=np.recarray) + gtime_val = wm.get_wind_data(coord_sys='uv').view(dtype=np.recarray) assert np.all(gtime_val.time == wind_circ['uv'].time) - assert np.allclose(gtime_val.value, wind_circ['uv'].value, rtol=rtol, atol=atol) + assert np.allclose(gtime_val.value, wind_circ['uv'].value, + rtol=rtol, atol=atol) # output is in meter per second - gtime_val = wm.get_wind_data(format='uv', units='meter per second' - ).view(dtype=np.recarray) + gtime_val = (wm.get_wind_data(coord_sys='uv', units='meter per second') + .view(dtype=np.recarray)) expected = unit_conversion.convert('Velocity', wm.units, 'meter per second', @@ -137,33 +136,37 @@ def test_wind_circ_fixture(wind_circ): def test_get_value(wind_circ): 'test get_value(..) function' wind = wind_circ['wind'] + for rec in wind_circ['rq']: time = rec['time'] val = wind.get_value(time) assert all(np.isclose(rec['value'], val)) -@pytest.mark.parametrize("_format", ['r-theta','uv', 'r','theta','u','v']) -def test_at(_format, wind_circ): +@pytest.mark.parametrize("coord_sys", + ['r-theta', 'uv', 'r', 'theta', 'u', 'v']) +def test_at(coord_sys, wind_circ): 'test at(...) function' wind = wind_circ['wind'] - tp1 = np.array([[0,0],]) - tp2 = np.array([[0,0],[1,1]]) - d_name = 'rq' if _format in ('r-theta','r','theta') else 'uv' + tp1 = np.array([[0, 0], ]) + # tp2 = np.array([[0, 0], [1, 1]]) + + d_name = 'rq' if coord_sys in ('r-theta', 'r', 'theta') else 'uv' + for rec in wind_circ[d_name]: time = rec['time'] d_val0 = rec['value'][0] d_val1 = rec['value'][1] - val1 = wind.at(tp1, time, format=_format) + val1 = wind.at(tp1, time, coord_sys=coord_sys) print val1 - if _format in ('r-theta', 'uv'): + + if coord_sys in ('r-theta', 'uv'): assert np.isclose(val1[0][0], d_val0) assert np.isclose(val1[0][1], d_val1) + elif coord_sys in ('theta', 'v'): + assert np.isclose(val1[0], d_val1) else: - if _format in ('theta', 'v'): - assert np.isclose(val1[0], d_val1) - else: - assert np.isclose(val1[0], d_val0) + assert np.isclose(val1[0], d_val0) @pytest.fixture(scope='module') @@ -185,16 +188,16 @@ def wind_rand(rq_rand): dtv_rq.time = [datetime(2012, 11, 06, 20, 10 + i, 30) for i in range(len(dtv_rq))] dtv_rq.value = rq_rand['rq'] + dtv_uv = np.zeros((len(dtv_rq), ), dtype=datetime_value_2d).view(dtype=np.recarray) dtv_uv.time = dtv_rq.time dtv_uv.value = transforms.r_theta_to_uv_wind(rq_rand['rq']) - wm = Wind(timeseries=dtv_rq, format='r-theta', units='meter per second') - return {'wind': wm, 'rq': dtv_rq, 'uv': dtv_uv} + wm = Wind(timeseries=dtv_rq, coord_sys='r-theta', units='meter per second') + return {'wind': wm, 'rq': dtv_rq, 'uv': dtv_uv} -# @pytest.fixture(scope="module",params=['wind_circ','wind_rand']) @pytest.fixture(scope='module', params=['wind_circ']) def all_winds(request): @@ -229,7 +232,7 @@ def test_init_units(self, all_winds): Also check that init doesn't fail if timeseries given in (u,v) format """ - Wind(timeseries=all_winds['uv'], format='uv', units='meter per second') + Wind(timeseries=all_winds['uv'], coord_sys='uv', units='meter per second') assert True def test_str_repr_no_errors(self, all_winds): @@ -254,9 +257,9 @@ def test_get_wind_data(self, all_winds): # check get_time_value() gtime_val = all_winds['wind'].get_wind_data() + assert np.all(gtime_val['time'] == all_winds['rq'].time) - assert np.allclose(gtime_val['value'], - all_winds['rq'].value, + assert np.allclose(gtime_val['value'], all_winds['rq'].value, rtol=rtol, atol=atol) def test_set_wind_data(self, all_winds): @@ -264,9 +267,8 @@ def test_set_wind_data(self, all_winds): get_wind_data with default output format """ # check get_time_value() - wm = Wind(timeseries=all_winds['wind'].get_wind_data(), - format='r-theta', units='meter per second') + coord_sys='r-theta', units='meter per second') gtime_val = wm.get_wind_data() x = gtime_val[:2] x['value'] = [(1, 10), (2, 20)] @@ -275,8 +277,7 @@ def test_set_wind_data(self, all_winds): wm.set_wind_data(x, 'meter per second') # only matches to 10^-14 - assert np.allclose(wm.get_wind_data()['value'][:, 0], - x['value'][:, 0], + assert np.allclose(wm.get_wind_data()['value'][:, 0], x['value'][:, 0], rtol=rtol, atol=atol) assert np.all(wm.get_wind_data()['time'] == x['time']) @@ -286,22 +287,22 @@ def test_get_wind_data_rq(self, all_winds): """ # check get_time_value() - gtime_val = all_winds['wind'].get_wind_data(format='r-theta') + gtime_val = all_winds['wind'].get_wind_data(coord_sys='r-theta') + assert np.all(gtime_val['time'] == all_winds['rq'].time) - assert np.allclose(gtime_val['value'], - all_winds['rq'].value, - atol, rtol) + assert np.allclose(gtime_val['value'], all_winds['rq'].value, + rtol=rtol, atol=atol) def test_get_wind_data_uv(self, all_winds): """ Initialize from timeseries and test the get_time_value method """ gtime_val = (all_winds['wind'] - .get_wind_data(format='uv') + .get_wind_data(coord_sys='uv') .view(dtype=np.recarray)) + assert np.all(gtime_val.time == all_winds['uv'].time) - assert np.allclose(gtime_val.value, - all_winds['uv'].value, + assert np.allclose(gtime_val.value, all_winds['uv'].value, rtol=rtol, atol=atol) def test_get_wind_data_by_time(self, all_winds): @@ -309,11 +310,14 @@ def test_get_wind_data_by_time(self, all_winds): get time series, but this time provide it with the datetime values for which you want timeseries """ - gtime_val = (all_winds['wind'].get_wind_data(format='r-theta', - datetime=all_winds['rq'].time) + gtime_val = (all_winds['wind'] + .get_wind_data(coord_sys='r-theta', + datetime=all_winds['rq'].time) .view(dtype=np.recarray)) + assert np.all(gtime_val.time == all_winds['rq'].time) - assert np.allclose(gtime_val.value, all_winds['rq'].value, rtol=rtol, atol=atol) + assert np.allclose(gtime_val.value, all_winds['rq'].value, + rtol=rtol, atol=atol) def test_get_wind_data_by_time_scalar(self, all_winds): """ @@ -329,11 +333,11 @@ def test_get_wind_data_by_time_scalar(self, all_winds): dt = t0 + ((t1 - t0) / 2) get_rq = (all_winds['wind'] - .get_wind_data(format='r-theta', datetime=dt) + .get_wind_data(coord_sys='r-theta', datetime=dt) .view(dtype=np.recarray)) get_uv = (all_winds['wind'] - .get_wind_data(format='uv', datetime=dt) + .get_wind_data(coord_sys='uv', datetime=dt) .view(dtype=np.recarray)) np.set_printoptions(precision=4) @@ -453,25 +457,6 @@ def test_eq(): assert w == w2 -# removed -- no longer doing the minutes truncation -# not sure why we ever did. -# def test_timeseries_res_sec(): -# ''' -# check the timeseries resolution is changed to minutes. -# Drop seconds from datetime, if given -# ''' -# ts = np.zeros((3,), dtype=datetime_value_2d) -# ts[:] = [(datetime(2014, 1, 1, 10, 10, 30), (1, 10)), -# (datetime(2014, 1, 1, 11, 10, 10), (2, 10)), -# (datetime(2014, 1, 1, 12, 10), (3, 10))] -# w = Wind(timeseries=ts, units='m/s') -# # check that seconds resolution has been dropped -# for dt1, dt2 in zip(ts['time'].astype(datetime), -# w.timeseries['time'].astype(datetime)): -# print dt1, dt2 -# assert dt1.replace(second=0) == dt2 - - def test_update_from_dict(): 'wind_json only used here so take it out of conftest' wind_json = {'obj_type': 'gnome.environment.Wind', @@ -527,6 +512,7 @@ def gen_timeseries_for_dst(which='spring'): end_dt = transition_date + timedelta(hours=num_hours) timeseries = [] dt = start_dt + while dt <= end_dt: timeseries.append((dt.isoformat(), vel)) dt += timedelta(minutes=30) @@ -564,8 +550,7 @@ def test_update_from_dict_with_dst_spring_transition(): # this should raise if there is a problem wind._check_timeseries(ts) - assert True # if we got here, the test passed. - + assert True # if we got here, the test passed. def test_new_from_dict_with_dst_fall_transition(): @@ -614,15 +599,13 @@ def test_roundtrip_dst_spring_transition(): wind_dict = Wind.deserialize(wind_json) wind = Wind.new_from_dict(wind_dict.copy()) # new munges the dict! (pop?) - # now re-serialize: - wind_dict2 = wind.serialize('webapi') - # now make one from the new dict... wind_dict2 = Wind.deserialize(wind_json) wind2 = Wind.new_from_dict(wind_dict2.copy()) assert wind2 == wind + def test_wind_from_values(): """ simple test for the utility @@ -638,7 +621,8 @@ def test_wind_from_values(): for dt, r, theta in values: vals = wind.get_value(dt) assert np.allclose(vals[0], r) - assert np.allclose(vals[1],theta) + assert np.allclose(vals[1], theta) + def test_wind_from_values_knots(): """ @@ -654,7 +638,8 @@ def test_wind_from_values_knots(): # see if it's got the correct data for dt, r, theta in values: vals = wind.get_value(dt) - assert np.allclose(vals[0], unit_conversion.convert('velocity', 'knot', 'm/s', r)) + assert np.allclose(vals[0], unit_conversion.convert('velocity', + 'knot', 'm/s', r)) assert np.allclose(vals[1], theta) @@ -667,13 +652,13 @@ def test_wind_from_values_knots(): center_lon = np.array(([0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6], [0, 2, 4, 6])) center_lat = np.array(([0, 0, 0, 0], [2, 2, 2, 2], [4, 4, 4, 4], [6, 6, 6, 6])) g = Grid_S(node_lon=node_lon, - node_lat=node_lat, - edge1_lon=edge1_lon, - edge1_lat=edge1_lat, - edge2_lon=edge2_lon, - edge2_lat=edge2_lat, - center_lon=center_lon, - center_lat=center_lat) + node_lat=node_lat, + edge1_lon=edge1_lon, + edge1_lat=edge1_lat, + edge2_lon=edge2_lon, + edge2_lat=edge2_lat, + center_lon=center_lon, + center_lat=center_lat) c_var = np.array(([0, 0, 0, 0], [0, 1, 2, 0], [0, 2, 1, 0], [0, 0, 0, 0])) e2_var = np.array(([1, 0, 0, 1], [0, 1, 2, 0], [0, 0, 0, 0])) @@ -683,19 +668,21 @@ def test_wind_from_values_knots(): e2_var.setflags(write=False) e1_var.setflags(write=False) n_var.setflags(write=False) + import pdb + class TestGridWind(object): def test_init(self): u = Variable(grid=g, data=e1_var) v = Variable(grid=g, data=e2_var) - gw = GridWind(name='test', grid=g, variables=[u,v]) + gw = GridWind(name='test', grid=g, variables=[u, v]) + assert gw is not None assert gw.u is u assert gw.variables[0] is u assert gw.variables[1] is v assert np.all(gw.grid.node_lon == node_lon) - pass def test_netCDF(self): pass @@ -703,82 +690,90 @@ def test_netCDF(self): def test_at(self): u = Variable(grid=g, data=e1_var) v = Variable(grid=g, data=e2_var) - gw = GridWind(name='test', grid=g, variables=[u,v]) - pts_arr = ([1,1], #1 - [1,1,3], #2 - [[2,2],[4,4]], #3 - [[2,4],[2,4]], #4 - [[1.5,1.5],[2,2],[3,3],[3.5,3.5]], #5 - [[1.5, 2, 3, 3.5], #6 + gw = GridWind(name='test', grid=g, variables=[u, v]) + + pts_arr = ([1, 1], # 1 + [1, 1, 3], # 2 + [[2, 2], [4, 4]], # 3 + [[2, 4], [2, 4]], # 4 + [[1.5, 1.5], [2, 2], [3, 3], [3.5, 3.5]], # 5 + [[1.5, 2, 3, 3.5], # 6 [1.5, 2, 3, 3.5]], - ((1.5,2,3,3.5), #7 - (1.5,2,3,3.5), + ((1.5, 2, 3, 3.5), # 7 + (1.5, 2, 3, 3.5), (1, 0, 0, 2))) ans_arr = (np.array([[0.5, 0.5, 0]]), np.array([[0, 0, 0]]), - np.array([[0.5, 0.5, 0],[1, 1, 0]]), - np.array([[1,0.5, 0],[1,0.5, 0]]), + np.array([[0.5, 0.5, 0], [1, 1, 0]]), + np.array([[1, 0.5, 0], [1, 0.5, 0]]), np.array([[0.4375, 0.375, 0], - [0.5,0.5,0], - [1.5,1.5,0], - [1.3125,1.3125,0]]), - np.array([[0.4375,0.5,1.5,1.3125], - [0.375,0.5,1.5,1.3125], + [0.5, 0.5, 0], + [1.5, 1.5, 0], + [1.3125, 1.3125, 0]]), + np.array([[0.4375, 0.5, 1.5, 1.3125], + [0.375, 0.5, 1.5, 1.3125], [0, 0, 0, 0]]), - np.array([[0,0.5,1.5,0], - [0,0.5,1.5,0], - [0,0,0,0]])) + np.array([[0, 0.5, 1.5, 0], + [0, 0.5, 1.5, 0], + [0, 0, 0, 0]])) + for pts, ans in zip(pts_arr, ans_arr): result = gw.at(pts, datetime.now()) assert np.allclose(result, ans) - @pytest.mark.parametrize("_format", ['r-theta', 'r','theta','u','v']) - def test_at_format(self, _format): + @pytest.mark.parametrize("coord_sys", ['r-theta', 'r', 'theta', 'u', 'v']) + def test_at_format(self, coord_sys): u = Variable(grid=g, data=e1_var) v = Variable(grid=g, data=e2_var) - gw = GridWind(name='test', grid=g, variables=[u,v]) - pts_arr = ([1,1], #1 - [1,1,3], #2 - [[2,2],[4,4]], #3 - [[2,4],[2,4]], #4 - [[1.5,1.5],[2,2],[3,3],[3.5,3.5]], #5 - [[1.5, 2, 3, 3.5], #6 + gw = GridWind(name='test', grid=g, variables=[u, v]) + + pts_arr = ([1, 1], # 1 + [1, 1, 3], # 2 + [[2, 2], [4, 4]], # 3 + [[2, 4], [2, 4]], # 4 + [[1.5, 1.5], [2, 2], [3, 3], [3.5, 3.5]], # 5 + [[1.5, 2, 3, 3.5], # 6 [1.5, 2, 3, 3.5]], - ((1.5,2,3,3.5), #7 - (1.5,2,3,3.5), + ((1.5, 2, 3, 3.5), # 7 + (1.5, 2, 3, 3.5), (1, 0, 0, 2))) - ans_arr = (np.array([[0.5, 0.5, 0],]), - np.array([[0, 0, 0],]), - np.array([[0.5, 0.5, 0],[1, 1, 0]]), - np.array([[1,0.5, 0],[1,0.5, 0]]), + ans_arr = (np.array([[0.5, 0.5, 0], ]), + np.array([[0, 0, 0], ]), + np.array([[0.5, 0.5, 0], [1, 1, 0]]), + np.array([[1, 0.5, 0], [1, 0.5, 0]]), np.array([[0.4375, 0.375, 0], - [0.5,0.5,0], - [1.5,1.5,0], - [1.3125,1.3125,0]]), - np.array([[0.4375,0.5,1.5,1.3125], - [0.375,0.5,1.5,1.3125], + [0.5, 0.5, 0], + [1.5, 1.5, 0], + [1.3125, 1.3125, 0]]), + np.array([[0.4375, 0.5, 1.5, 1.3125], + [0.375, 0.5, 1.5, 1.3125], [0, 0, 0, 0]]).T, - np.array([[0,0.5,1.5,0], - [0,0.5,1.5,0], - [0,0,0,0]]).T) + np.array([[0, 0.5, 1.5, 0], + [0, 0.5, 1.5, 0], + [0, 0, 0, 0]]).T) + for pts, ans in zip(pts_arr, ans_arr): - raw_result = gw.at(pts, datetime.now(), format=_format, _auto_align=False) - ans_mag = np.sqrt(ans[:,0]**2 + ans[:,1]**2) - print 'ans_mag',ans_mag + raw_result = gw.at(pts, datetime.now(), coord_sys=coord_sys, + _auto_align=False) + + ans_mag = np.sqrt(ans[:, 0] ** 2 + ans[:, 1] ** 2) + print 'ans_mag', ans_mag print - ans_dir = np.arctan2(ans[:,1], ans[:,0]) * 180./np.pi - if _format in ('r-theta', 'r', 'theta'): - if _format == 'r': + + ans_dir = np.arctan2(ans[:, 1], ans[:, 0]) * 180./np.pi + + if coord_sys in ('r-theta', 'r', 'theta'): + if coord_sys == 'r': assert np.allclose(raw_result, ans_mag) - elif _format == 'theta': + elif coord_sys == 'theta': assert np.allclose(raw_result, ans_dir) else: - assert np.allclose(raw_result, np.column_stack((ans_mag, ans_dir))) + assert np.allclose(raw_result, + np.column_stack((ans_mag, ans_dir))) else: - if _format == 'u': - assert np.allclose(raw_result, ans[:,0]) + if coord_sys == 'u': + assert np.allclose(raw_result, ans[:, 0]) else: - assert np.allclose(raw_result, ans[:,1]) - + assert np.allclose(raw_result, ans[:, 1]) diff --git a/py_gnome/tests/unit_tests/test_model_multiproc.py b/py_gnome/tests/unit_tests/test_model_multiproc.py index ad7acd90d..d35496c50 100644 --- a/py_gnome/tests/unit_tests/test_model_multiproc.py +++ b/py_gnome/tests/unit_tests/test_model_multiproc.py @@ -1,5 +1,7 @@ import os +import sys import time +import traceback from datetime import datetime, timedelta @@ -122,9 +124,11 @@ def test_init(): model = make_model() with pytest.raises(TypeError): + # no uncertainty arguments ModelBroadcaster(model) with pytest.raises(TypeError): + # no spill amount uncertainties ModelBroadcaster(model, ('down', 'normal', 'up')) @@ -179,7 +183,6 @@ def test_uncertainty_array_indexing(): ('down', 'normal', 'up')) try: - print '\nGetting time & spill values for just the (down, down) model:' res = model_broadcaster.cmd('get_wind_timeseries', {}, ('down', 'down')) assert np.allclose([r[0] for r in res], 17.449237) @@ -187,9 +190,7 @@ def test_uncertainty_array_indexing(): res = model_broadcaster.cmd('get_spill_amounts', {}, ('down', 'down')) assert np.isclose(res[0], 333.33333) - print '\nGetting time & spill values for just the (up, up) model:' res = model_broadcaster.cmd('get_wind_timeseries', {}, ('up', 'up')) - print 'get_wind_timeseries:' assert np.allclose([r[0] for r in res], 20.166224) res = model_broadcaster.cmd('get_spill_amounts', {}, ('up', 'up')) @@ -414,6 +415,39 @@ def test_weathering_output_only(): model_broadcaster.stop() +@pytest.mark.timeout(10) +def test_child_exception(): + ''' + This one is a bit tricky. We would like to simulate an exception + by making the spill.amount a None value and then instantiating + our broadcaster. This is expected to raise a TypeError based on + the current codebase, but could change. + + We would like to get the exception raised in the child process and + re-raise it in the broadcaster parent process, complete with good + traceback information. + ''' + model = make_model(geojson_output=True) + + model.spills[0].amount = None + print 'amount:', model.spills[0].amount + + try: + _model_broadcaster = ModelBroadcaster(model, + ('down', 'normal', 'up'), + ('down', 'normal', 'up')) + except Exception as e: + assert type(e) == TypeError + + exc_type, exc_value, exc_traceback = sys.exc_info() + fmt = traceback.format_exception(exc_type, exc_value, exc_traceback) + + last_file_entry = [l for l in fmt if l.startswith(' File ')][-1] + last_file = last_file_entry.split('"')[1] + + assert os.path.basename(last_file) == 'spill.py' + + if __name__ == '__main__': scripting.make_images_dir() diff --git a/py_gnome/tests/unit_tests/test_movers/test_gridcurrent_mover.py b/py_gnome/tests/unit_tests/test_movers/test_gridcurrent_mover.py index 90e9d37a3..61737c9eb 100644 --- a/py_gnome/tests/unit_tests/test_movers/test_gridcurrent_mover.py +++ b/py_gnome/tests/unit_tests/test_movers/test_gridcurrent_mover.py @@ -68,7 +68,7 @@ def test_uncertain_loop(uncertain_time_delay=0): pSpill = sample_sc_release(num_le, start_pos, rel_time, uncertain=True) curr = GridCurrentMover(curr_file, topology_file) - curr.uncertain_time_delay=uncertain_time_delay + curr.uncertain_time_delay = uncertain_time_delay u_delta = _uncertain_loop(pSpill, curr) _assert_move(u_delta) @@ -94,7 +94,7 @@ def test_certain_uncertain(): assert np.all(delta[:, :2] == u_delta[:, :2]) -c_grid = GridCurrentMover(curr_file,topology_file) +c_grid = GridCurrentMover(curr_file, topology_file) def test_default_props(): @@ -107,7 +107,7 @@ def test_default_props(): assert c_grid.uncertain_duration == 24 assert c_grid.uncertain_cross == .25 assert c_grid.uncertain_along == .5 - assert c_grid.extrapolate == False + assert c_grid.extrapolate is False assert c_grid.time_offset == 0 @@ -137,7 +137,7 @@ def test_extrapolate(): c_grid.extrapolate = True print c_grid.extrapolate - assert c_grid.extrapolate == True + assert c_grid.extrapolate is True def test_offset_time(): diff --git a/py_gnome/tests/unit_tests/test_movers/test_wind_mover.py b/py_gnome/tests/unit_tests/test_movers/test_wind_mover.py index b315e1d87..eca63267f 100644 --- a/py_gnome/tests/unit_tests/test_movers/test_wind_mover.py +++ b/py_gnome/tests/unit_tests/test_movers/test_wind_mover.py @@ -7,10 +7,9 @@ import numpy as np -import unit_conversion +import unit_conversion as uc -from gnome.basic_types import (datetime_value_2d, - ts_format) +from gnome.basic_types import datetime_value_2d, ts_format from gnome.utilities.projections import FlatEarthProjection @@ -19,7 +18,7 @@ from gnome.utilities.transforms import r_theta_to_uv_wind from gnome.utilities import convert -from gnome.environment import Wind, constant_wind +from gnome.environment import Wind from gnome.spill import point_line_release_spill from gnome.spill_container import SpillContainer @@ -30,6 +29,7 @@ wind_mover_from_file) from gnome.persist import References, load from gnome.exceptions import ReferencedObjectNotSet + from ..conftest import sample_sc_release, testdata @@ -72,7 +72,7 @@ def test_read_file_init(): wind = Wind(filename=file_) wm = WindMover(wind) - wind_ts = wind.get_wind_data(format='uv', units='meter per second') + wind_ts = wind.get_wind_data(coord_sys='uv', units='meter per second') _defaults(wm) # check defaults set correctly assert not wm.make_default_refs cpp_timeseries = _get_timeseries_from_cpp(wm) @@ -82,11 +82,10 @@ def test_read_file_init(): # NOTE: Following functionality is already tested in test_wind.py, # but what the heck - do it here too. - wind_ts = wind.get_wind_data(format=ts_format.uv) - cpp_timeseries['value'] = unit_conversion.convert('Velocity', - 'meter per second', - wind.units, - cpp_timeseries['value']) + wind_ts = wind.get_wind_data(coord_sys=ts_format.uv) + cpp_timeseries['value'] = uc.convert('Velocity', + 'meter per second', wind.units, + cpp_timeseries['value']) _assert_timeseries_equivalence(cpp_timeseries, wind_ts) @@ -111,6 +110,7 @@ def test_empty_init(): ''' wm = WindMover() assert wm.make_default_refs + _defaults(wm) assert wm.name == 'WindMover' print wm.validate() @@ -159,7 +159,7 @@ def test_update_wind(wind_circ): for i in range(3)] t_dtv.value = np.random.uniform(1, 5, (3, 2)) - o_wind.set_wind_data(t_dtv, units='meter per second', format='uv') + o_wind.set_wind_data(t_dtv, units='meter per second', coord_sys='uv') cpp_timeseries = _get_timeseries_from_cpp(wm) @@ -353,8 +353,7 @@ def test_timespan(): wm = WindMover(Wind(timeseries=time_val, units='meter per second'), - active_start=model_time + timedelta(seconds=time_step) - ) + active_start=model_time + timedelta(seconds=time_step)) wm.prepare_for_model_run() wm.prepare_for_model_step(sc, time_step, model_time) @@ -412,7 +411,7 @@ def test_constant_wind_mover(): with raises(Exception): # it should raise an InvalidUnitError, but I don't want to have to # import unit_conversion just for that... - wm = constant_wind_mover(10, 45, units='some_random_string') + _wm = constant_wind_mover(10, 45, units='some_random_string') wm = constant_wind_mover(10, 45, units='m/s') @@ -431,6 +430,7 @@ def test_constant_wind_mover(): # 45 degree wind at the equator -- u,v should be the same assert delta[0][0] == delta[0][1] + def test_constant_wind_mover_bounds(): wm = constant_wind_mover(10, 45, units='knots') @@ -439,7 +439,6 @@ def test_constant_wind_mover_bounds(): assert wm.real_data_stop == InfDateTime("inf") - def test_wind_mover_from_file(): wm = wind_mover_from_file(file_) print wm.wind.filename @@ -517,7 +516,7 @@ def _defaults(wm): are as expected """ # timespan is as big as possible - assert wm.active == True + assert wm.active is True assert wm.uncertain_duration == 3.0 assert wm.uncertain_time_delay == 0 assert wm.uncertain_speed_scale == 2 @@ -535,9 +534,10 @@ def _get_timeseries_from_cpp(windmover): This is simply used for testing. """ - dtv = windmover.wind.get_wind_data(format=ts_format.uv) + dtv = windmover.wind.get_wind_data(coord_sys=ts_format.uv) tv = convert.to_time_value_pair(dtv, ts_format.uv) val = windmover.mover.get_time_value(tv['time']) + tv['value']['u'] = val['u'] tv['value']['v'] = val['v'] diff --git a/py_gnome/tests/unit_tests/test_outputters/test_netcdf_outputter.py b/py_gnome/tests/unit_tests/test_outputters/test_netcdf_outputter.py index d9767269d..63c77567c 100644 --- a/py_gnome/tests/unit_tests/test_outputters/test_netcdf_outputter.py +++ b/py_gnome/tests/unit_tests/test_outputters/test_netcdf_outputter.py @@ -185,7 +185,8 @@ def test_prepare_for_model_run(model): @pytest.mark.slow def test_write_output_standard(model): """ - rewind model defined by model fixture. + Rewind model defined by model fixture. + invoke model.step() till model runs all 5 steps For each step, compare the standard variables in the model.cache to the @@ -220,8 +221,13 @@ def test_write_output_standard(model): scp = model._cache.load_timestep(step) # check time - - assert scp.LE('current_time_stamp', uncertain) == time_[step] + # conversion from floats to datetime can be off by microseconds + # fixme: this should probably round! + # this may help: https://stackoverflow.com/questions/3463930/how-to-round-the-minute-of-a-datetime-object-python + print "***** scp timestamp", scp.LE('current_time_stamp', uncertain) + print "***** netcdf time:", time_[step] + print type(time_[step]) + assert scp.LE('current_time_stamp', uncertain) == time_[step].replace(microsecond=0) assert np.allclose(scp.LE('positions', uncertain)[:, 0], (dv['longitude'])[idx[step]:idx[step + 1]], diff --git a/py_gnome/tests/unit_tests/test_persist/test_model_save_load.py b/py_gnome/tests/unit_tests/test_persist/test_model_save_load.py index a0b01af68..bef8154af 100644 --- a/py_gnome/tests/unit_tests/test_persist/test_model_save_load.py +++ b/py_gnome/tests/unit_tests/test_persist/test_model_save_load.py @@ -166,8 +166,10 @@ def test_init_exception(saveloc_): @pytest.mark.slow @pytest.mark.parametrize(('uncertain', 'zipsave'), - [(False, False), (True, False), - (False, True), (True, True)]) + [(False, False), + (True, False), + (False, True), + (True, True)]) def test_save_load_model(uncertain, zipsave, saveloc_): ''' create a model, save it, then load it back up and check it is equal to @@ -181,10 +183,10 @@ def test_save_load_model(uncertain, zipsave, saveloc_): model.zipsave = zipsave - print 'saving scenario ..' + print 'saving scenario to {}...'.format(saveloc_) model.save(saveloc_) - print 'loading scenario ..' + print 'loading scenario ...' model2 = load(zipname(saveloc_, model)) assert model == model2 @@ -192,8 +194,10 @@ def test_save_load_model(uncertain, zipsave, saveloc_): @pytest.mark.slow @pytest.mark.parametrize(('uncertain', 'zipsave'), - [(False, False), (True, False), - (False, True), (True, True)]) + [(False, False), + (True, False), + (False, True), + (True, True)]) def test_save_load_midrun_scenario(uncertain, zipsave, saveloc_): """ create model, save it after 1step, then load and check equality of original diff --git a/py_gnome/tests/unit_tests/test_utilities/test_helpers_convert.py b/py_gnome/tests/unit_tests/test_utilities/test_helpers_convert.py index fb9ddf57d..f720d24b3 100644 --- a/py_gnome/tests/unit_tests/test_utilities/test_helpers_convert.py +++ b/py_gnome/tests/unit_tests/test_utilities/test_helpers_convert.py @@ -35,23 +35,24 @@ def wind_ts(rq_wind): .view(dtype=np.recarray)) date_times = ([zero_time()] * len(rq_wind['uv'])) - tv = (np.array(zip(date_times, rq_wind['uv']), dtype=time_value_pair) - .view(dtype=np.recarray)) - print 'Test Case - actual values:' - print 'datetime_value_2d: datetime, (r, theta):' - print dtv_rq.time - print dtv_rq.value + values = zip(date_times, (tuple(w) for w in rq_wind['uv'])) + tv = (np.array(values, dtype=time_value_pair).view(dtype=np.recarray)) - print '----------' - print 'datetime_value_2d: datetime, (u, v):' - print dtv_uv.time - print dtv_uv.value + # print 'Test Case - actual values:' + # print 'datetime_value_2d: datetime, (r, theta):' + # print dtv_rq.time + # print dtv_rq.value - print '----------' - print 'time_value_pair: time, (u, v):' - print tv.time - print tv.value.reshape(len(tv.value), -1) + # print '----------' + # print 'datetime_value_2d: datetime, (u, v):' + # print dtv_uv.time + # print dtv_uv.value + + # print '----------' + # print 'time_value_pair: time, (u, v):' + # print tv.time + # print tv.value.reshape(len(tv.value), -1) return {'dtv_rq': dtv_rq, 'dtv_uv': dtv_uv, 'tv': tv} @@ -149,3 +150,5 @@ def test_to_datetime_value_2d_uv(wind_ts, out_ts_format): .view(dtype=np.recarray)) assert np.allclose(out_dtval.value, wind_ts['dtv_uv'].value, atol, rtol) + + diff --git a/py_gnome/tests/unit_tests/test_utilities/test_timeseries.py b/py_gnome/tests/unit_tests/test_utilities/test_timeseries.py index 9962f619f..f723c1509 100644 --- a/py_gnome/tests/unit_tests/test_utilities/test_timeseries.py +++ b/py_gnome/tests/unit_tests/test_utilities/test_timeseries.py @@ -18,6 +18,7 @@ def test_str(): ts = Timeseries() s = str(ts) + # not much of a check, not much of a str. assert 'Timeseries' in s @@ -26,6 +27,7 @@ def test_filename(): """ should really check for a real filename """ ts = Timeseries() fn = ts.filename + assert fn is None @@ -48,7 +50,8 @@ def test_exceptions(invalid_rq): invalid_dtv_rq = np.zeros((len(invalid_rq['rq']), ), dtype=datetime_value_2d) invalid_dtv_rq['value'] = invalid_rq['rq'] - Timeseries(timeseries=invalid_dtv_rq, format='r-theta') + + Timeseries(timeseries=invalid_dtv_rq, coord_sys='r-theta') # exception raised if datetime values are not in ascending order # or not unique @@ -59,6 +62,7 @@ def test_exceptions(invalid_rq): dtype=datetime_value_2d).view(dtype=np.recarray) (dtv_rq.value[0])[:] = (1, 0) (dtv_rq.value[1])[:] = (1, 10) + Timeseries(timeseries=dtv_rq) with raises(TimeseriesError): @@ -68,6 +72,7 @@ def test_exceptions(invalid_rq): dtv_rq.value = (1, 0) dtv_rq.time[:len(dtv_rq) - 1] = [datetime(2012, 11, 06, 20, 10 + i, 30) for i in range(len(dtv_rq) - 1)] + Timeseries(timeseries=dtv_rq) @@ -75,9 +80,10 @@ def test_init(wind_timeseries): '' rq = wind_timeseries['rq'] uv = wind_timeseries['uv'] - ts = Timeseries(rq, format='r-theta') + ts = Timeseries(rq, coord_sys='r-theta') + assert np.all(ts.get_timeseries()['time'] == rq['time']) - assert np.allclose(ts.get_timeseries(format='r-theta')['value'], + assert np.allclose(ts.get_timeseries(coord_sys='r-theta')['value'], rq['value'], atol=1e-10) assert np.allclose(ts.get_timeseries()['value'], @@ -87,7 +93,7 @@ def test_init(wind_timeseries): def test_get_timeseries(wind_timeseries): uv = wind_timeseries['uv'] - ts = Timeseries(uv, format='uv') + ts = Timeseries(uv, coord_sys='uv') result = ts.get_timeseries() assert len(result) == 6 @@ -99,7 +105,7 @@ def test_get_timeseries(wind_timeseries): def test_get_timeseries_multiple(wind_timeseries): uv = wind_timeseries['uv'] - ts = Timeseries(uv, format='uv') + ts = Timeseries(uv, coord_sys='uv') dts = [datetime(2012, 11, 6, 20, 12), datetime(2012, 11, 6, 20, 14)] @@ -120,6 +126,7 @@ def test_empty(): """ ts = Timeseries() arr = ts.get_timeseries() + assert len(arr) == 1 assert arr[0][1][0] == 0.0 assert arr[0][1][1] == 0.0 @@ -135,6 +142,7 @@ def test_set_timeseries_prop(): # converted to a 1-D array correctly x = (datetime.now().replace(microsecond=0, second=0), (4, 5)) ts.set_timeseries(x) + assert ts.get_timeseries()['time'] == x[0] assert np.allclose(ts.get_timeseries()['value'], x[1], atol=1e-6) @@ -143,6 +151,7 @@ def test__eq(): ''' only checks timeseries values match ''' ts1 = Timeseries(filename=wind_file) ts2 = Timeseries(timeseries=ts1.get_timeseries()) + assert ts1 == ts2 @@ -152,6 +161,7 @@ def test_ne(): ts = ts1.get_timeseries() ts[0]['value'] += (1, 1) ts2 = Timeseries(timeseries=ts) + assert ts1 != ts2 @@ -173,7 +183,6 @@ def test__check_timeseries_single_value(): assert result with raises(TimeseriesError): - print (('2007-03-01T13:00:00', (1.0, 2.0)),) result = ts._check_timeseries((('2007-03-01T13:00:00', (1.0, 2.0)),)) assert result diff --git a/py_gnome/tests/unit_tests/test_weatherers/test_roc.py b/py_gnome/tests/unit_tests/test_weatherers/test_roc.py index b627c3902..e9c040d29 100644 --- a/py_gnome/tests/unit_tests/test_weatherers/test_roc.py +++ b/py_gnome/tests/unit_tests/test_weatherers/test_roc.py @@ -4,27 +4,22 @@ from datetime import datetime, timedelta import numpy as np -from pytest import raises, mark, set_trace -import pytest -import unit_conversion as us +import pytest -from gnome.basic_types import oil_status, fate +from gnome.environment import Waves, constant_wind, Water from gnome.weatherers.roc import (Burn, Disperse, Skim, Platform) -from gnome.persist import load -from gnome.weatherers import (WeatheringData, - FayGravityViscous, - weatherer_sort, - Emulsification, +from gnome.weatherers import (Emulsification, Evaporation) -from gnome.spill_container import SpillContainer -from gnome.spill import point_line_release_spill -from gnome.utilities.inf_datetime import InfDateTime -from gnome.environment import Waves, constant_wind, Water + +from gnome.persist import load from ..conftest import (test_oil, sample_model_weathering2) +import pprint as pp + + delay = 1. time_step = 900 rel_time = datetime(2012, 9, 15, 12, 0) @@ -36,19 +31,24 @@ water = Water() waves = Waves(wind, water) + class ROCTests: @classmethod def mk_objs(cls, sample_model_fcn2): model = sample_model_weathering2(sample_model_fcn2, test_oil, 333.0) model.set_make_default_refs(True) + model.environment += [waves, wind, water] + model.weatherers += Evaporation(wind=wind, water=water) model.weatherers += Emulsification(waves=waves) + return (model.spills.items()[0], model) def prepare_test_objs(self, obj_arrays=None): self.model.rewind() self.model.rewind() + at = set() for wd in self.model.weatherers: @@ -62,6 +62,7 @@ def prepare_test_objs(self, obj_arrays=None): def reset_and_release(self, rel_time=None, time_step=900.0): self.prepare_test_objs() + if rel_time is None: rel_time = self.sc.spills[0].release_time @@ -75,124 +76,154 @@ def step(self, test_weatherer, time_step, model_time): self.model.step() test_weatherer.weather_elements(self.sc, time_step, model_time) + class TestRocGeneral(ROCTests): + timeseries = np.array([(rel_time, rel_time + timedelta(hours=12.))]) burn = Burn(offset=50.0, boom_length=250.0, boom_draft=10.0, speed=2.0, throughput=0.75, burn_efficiency_type=1, - timeseries=np.array([(rel_time, rel_time + timedelta(hours=12.))])) + timeseries=timeseries) def test_get_thickness(self, sample_model_fcn2): (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) self.reset_and_release() assert self.burn._get_thickness(self.sc) == 0.0 + self.model.step() -# assert self.burn._get_thickness(self.sc) == 0.16786582186002749 + # assert self.burn._get_thickness(self.sc) == 0.16786582186002749 + self.model.step() -# assert self.burn._get_thickness(self.sc) == 0.049809899105767913 + # assert self.burn._get_thickness(self.sc) == 0.049809899105767913 + class TestROCBurn(ROCTests): + timeseries = np.array([(rel_time, rel_time + timedelta(hours=12.))]) burn = Burn(offset=50.0, - boom_length=250.0, - boom_draft=10.0, - speed=2.0, - throughput=0.75, - burn_efficiency_type=1, - timeseries=np.array([(rel_time, rel_time + timedelta(hours=12.))])) + boom_length=250.0, + boom_draft=10.0, + speed=2.0, + throughput=0.75, + burn_efficiency_type=1, + timeseries=timeseries) def test_prepare_for_model_run(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() self.burn.prepare_for_model_run(self.sc) + assert self.sc.mass_balance['burned'] == 0.0 + assert 'systems' in self.sc.mass_balance assert self.burn.id in self.sc.mass_balance['systems'] + assert self.sc.mass_balance['systems'][self.burn.id]['boomed'] == 0.0 assert self.sc.mass_balance['boomed'] == 0.0 + assert self.burn._swath_width == 75 assert self.burn._area == 1718.75 assert self.burn.boom_draft == 10 assert self.burn._offset_time == 14.805 + assert self.burn._area_coverage_rate == 0.3488372093023256 + assert round(self.burn._boom_capacity) == 477 + assert len(self.sc.report[self.burn.id]) == 1 - assert self.burn._area_coverage_rate == 0.3488372093023256 assert len(self.burn.timeseries) == 1 def test_reports(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() + self.burn.boom_length = 3500.0 + self.burn.prepare_for_model_run(self.sc) + assert self.burn._swath_width == 1050 assert len(self.burn.report) == 2 def test_serialize(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() self.burn.serialize() def test_prepare_for_model_step(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() self.burn.prepare_for_model_run(self.sc) self.burn.prepare_for_model_step(self.sc, time_step, active_start) - assert self.burn._active == True + assert self.burn._active is True @pytest.mark.skip("Needs fix after test subject was refactored") def test_weather_elements(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.model.time_step = 900 self.reset_and_release() + + timeseries = np.array([(rel_time, rel_time + timedelta(hours=12.))]) burn = Burn(offset=6000.0, - boom_length=100.0, - boom_draft=10.0, - speed=2.0, - throughput=0.75, - burn_efficiency_type=1, - timeseries=np.array([(rel_time, rel_time + timedelta(hours=12.))])) + boom_length=100.0, + boom_draft=10.0, + speed=2.0, + throughput=0.75, + burn_efficiency_type=1, + timeseries=timeseries) self.model.weatherers.append(burn) self.model.rewind() + self.model.step() - assert burn._is_burning == False - assert burn._is_collecting == True + assert burn._is_burning is False + assert burn._is_collecting is True assert self.sc.mass_balance['burned'] == 0 + self.model.step() - assert burn._is_burning == False - assert np.isclose(burn._boom_capacity, 0, atol=0.01) - assert burn._is_transiting == True - assert burn._is_boom_full == True + assert burn._is_burning is False + assert burn._is_transiting is True + assert burn._is_boom_full is True assert burn._burn_rate == 0.14 + + assert np.isclose(burn._boom_capacity, 0, atol=0.01) assert self.sc.mass_balance['burned'] == 0 collected = self.sc.mass_balance['boomed'] + self.model.step() assert burn._burn_time == 1414.2857142857142 assert burn._burn_time_remaining <= burn._burn_time + assert burn._is_collecting is False + assert burn._is_cleaning is False + assert burn._is_burning is True assert np.isclose(collected, 1877.2886248344857, 0.001) - assert burn._is_collecting == False - assert burn._is_cleaning == False - assert burn._is_burning == True + self.model.step() - assert burn._is_transiting == False - assert burn._is_burning == True - assert burn._is_boom_full == False + assert burn._is_transiting is False + assert burn._is_burning is True + assert burn._is_boom_full is False + self.model.step() self.model.step() - assert burn._is_burning == False - assert burn._is_cleaning == True - assert np.isclose(self.sc.mass_balance['boomed'], 0) + assert burn._is_burning is False + assert burn._is_cleaning is True assert self.sc.mass_balance['boomed'] >= 0 - #assert np.isclose(self.sc.mass_balance['burned'], collected) + assert np.isclose(self.sc.mass_balance['boomed'], 0) + # assert np.isclose(self.sc.mass_balance['burned'], collected) + self.model.step() - assert burn._is_burning == False - assert burn._is_cleaning == True + assert burn._is_burning is False + assert burn._is_cleaning is True self.model.rewind() self.model.rewind() + for step in self.model: print 'amount in boom', self.sc.mass_balance['boomed'] assert self.sc.mass_balance['boomed'] >= 0 @@ -206,33 +237,36 @@ def test_serialization(self): b = TestROCBurn.burn ser = b.serialize() deser = Burn.deserialize(ser) + b2 = Burn.new_from_dict(deser) ser2 = b2.serialize() ser.pop('id') ser2.pop('id') - assert ser == ser2 + assert ser == ser2 def test_step(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() self.model.step() -class TestPlatform(ROCTests): +class TestPlatform(ROCTests): def test_construction(self): p = Platform() assert p.units == dict([(k, v[0]) for k, v in Platform._attr.items()]) - p = Platform(_name = "Test Platform") + + p = Platform(_name="Test Platform") assert p.transit_speed == 150 assert p.max_op_time == 4 - p = Platform(_name = "Test Platform", units = {'transit_speed': 'm/s'}) + + p = Platform(_name="Test Platform", units={'transit_speed': 'm/s'}) assert p.units['transit_speed'] == 'm/s' def test_serialization(self): p = Platform(_name='Test Platform') - import pprint as pp ser = p.serialize() pp.pprint(ser) deser = Platform.deserialize(ser) @@ -249,15 +283,16 @@ def test_serialization(self): ser2.pop('id') assert ser == ser2 -class TestRocChemDispersion(ROCTests): +class TestRocChemDispersion(ROCTests): + timeseries = np.array([(rel_time, rel_time + timedelta(hours=12.))]) disp = Disperse(name='test_disperse', transit=100, pass_length=4, -# dosage=1, + # dosage=1, cascade_on=False, cascade_distance=None, - timeseries=np.array([(rel_time, rel_time + timedelta(hours=12.))]), + timeseries=timeseries, loading_type='simultaneous', pass_type='bidirectional', disp_oil_ratio=None, @@ -266,26 +301,27 @@ class TestRocChemDispersion(ROCTests): units=None,) def test_construction(self): - d = Disperse(name='testname', - transit=100, - platform='Test Platform') - #payload in gallons, computation in gallons, so no conversion + _d = Disperse(name='testname', + transit=100, + platform='Test Platform') + # payload in gallons, computation in gallons, so no conversion def test_serialization(self): - import pprint as pp + timeseries = np.array([(rel_time, rel_time + timedelta(hours=12.))]) p = Disperse(name='test_disperse', - transit=100, - pass_length=4, -# dosage=1, - cascade_on=False, - cascade_distance=None, - timeseries=np.array([(rel_time, rel_time + timedelta(hours=12.))]), - loading_type='simultaneous', - pass_type='bidirectional', - disp_oil_ratio=None, - disp_eff=None, - platform='Test Platform', - units=None,) + transit=100, + pass_length=4, + # dosage=1, + cascade_on=False, + cascade_distance=None, + timeseries=timeseries, + loading_type='simultaneous', + pass_type='bidirectional', + disp_oil_ratio=None, + disp_eff=None, + platform='Test Platform', + units=None,) + ser = p.serialize() print 'Ser' pp.pprint(ser) @@ -304,204 +340,213 @@ def test_serialization(self): ser2.pop('id') ser['platform'].pop('id') ser2['platform'].pop('id') + assert ser['platform']['swath_width'] == 100.0 assert ser2['platform']['swath_width'] == 100.0 assert ser == ser2 def test_prepare_for_model_run(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() self.disp.prepare_for_model_run(self.sc) -# assert self.sc.mass_balance[self.disp.id] == 0.0 + + # assert self.sc.mass_balance[self.disp.id] == 0.0 assert self.disp.cur_state == 'retired' assert len(self.sc.report[self.disp.id]) == 0 assert len(self.disp.timeseries) == 1 def test_prepare_for_model_step(self, sample_model_fcn2): + timeseries = np.array([(rel_time, rel_time + timedelta(hours=12.))]) disp = Disperse(name='test_disperse', - transit=100, - pass_length=4, - dosage=1, - cascade_on=False, - cascade_distance=None, - timeseries=np.array([(rel_time, rel_time + timedelta(hours=12.))]), - loading_type='simultaneous', - pass_type='bidirectional', - disp_oil_ratio=None, - disp_eff=None, - platform='Test Platform', - units=None,) - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + transit=100, + pass_length=4, + dosage=1, + cascade_on=False, + cascade_distance=None, + timeseries=timeseries, + loading_type='simultaneous', + pass_type='bidirectional', + disp_oil_ratio=None, + disp_eff=None, + platform='Test Platform', + units=None,) + + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.model.weatherers += disp self.model.spills[0].amount = 1000 self.model.spills[0].units = 'gal' + self.reset_and_release() disp.prepare_for_model_run(self.sc) + print self.model.start_time print self.disp.timeseries assert disp.cur_state == 'retired' + self.model.step() print self.model.current_time_step + self.model.step() print self.model.spills.items()[0]['viscosity'] assert disp.cur_state == 'en_route' print disp._next_state_time + self.model.step() assert disp.cur_state == 'en_route' print disp.transit print disp.platform.transit_speed print disp.platform.one_way_transit_time(disp.transit)/60 + while disp.cur_state == 'en_route': self.model.step() off = self.model.current_time_step * self.model.time_step print self.model.start_time + timedelta(seconds=off) + print 'pump_rate ', disp.platform.eff_pump_rate(disp.dosage) + try: - for step in self.model: + for _step in self.model: off = self.model.current_time_step * self.model.time_step print self.model.start_time + timedelta(seconds=off) except StopIteration: pass def test_prepare_for_model_step_cont(self, sample_model_fcn2): + timeseries = np.array([(rel_time, rel_time + timedelta(hours=12.))]) disp = Disperse(name='test_disperse', - transit=100, - pass_length=4, -# dosage=1, - cascade_on=False, - cascade_distance=None, - timeseries=np.array([(rel_time, rel_time + timedelta(hours=12.))]), - loading_type='simultaneous', - pass_type='bidirectional', - disp_oil_ratio=None, - disp_eff=None, - platform='Test Platform', - units=None,) - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + transit=100, + pass_length=4, + # dosage=1, + cascade_on=False, + cascade_distance=None, + timeseries=timeseries, + loading_type='simultaneous', + pass_type='bidirectional', + disp_oil_ratio=None, + disp_eff=None, + platform='Test Platform', + units=None,) + + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.model.weatherers += disp + self.model.spills[0].amount = 20000 self.model.spills[0].units = 'gal' - self.model.spills[0].end_release_time = self.model.start_time + timedelta(hours=3) + self.model.spills[0].end_release_time = (self.model.start_time + + timedelta(hours=3)) + self.reset_and_release() disp.prepare_for_model_run(self.sc) try: - for step in self.model: + for _step in self.model: off = self.model.current_time_step * self.model.time_step print self.model.start_time + timedelta(seconds=off) except StopIteration: pass -# print self.model.start_time -# print self.disp.timeseries -# assert self.disp.cur_state == 'retired' -# self.model.step() -# print self.model.current_time_step -# self.model.step() -# print self.model.spills.items()[0]['viscosity'] -# assert self.disp.cur_state == 'en_route' -# print self.disp._next_state_time -# self.model.step() -# assert self.disp.cur_state == 'en_route' -# print self.disp.transit -# print self.disp.platform.transit_speed -# print self.disp.platform.one_way_transit_time(self.disp.transit)/60 -# while self.disp.cur_state == 'en_route': -# self.model.step() -# off = self.model.current_time_step * self.model.time_step -# print self.model.start_time + timedelta(seconds=off) -# print 'pump_rate ', self.disp.platform.eff_pump_rate(self.disp.dosage) -# assert 'disperse' in self.disp.cur_state + + # print self.model.start_time + # print self.disp.timeseries + # assert self.disp.cur_state == 'retired' + + # self.model.step() + # print self.model.current_time_step + + # self.model.step() + # print self.model.spills.items()[0]['viscosity'] + # assert self.disp.cur_state == 'en_route' + # print self.disp._next_state_time + + # self.model.step() + # assert self.disp.cur_state == 'en_route' + # print self.disp.transit + # print self.disp.platform.transit_speed + # print self.disp.platform.one_way_transit_time(self.disp.transit)/60 + + # while self.disp.cur_state == 'en_route': + # self.model.step() + # off = self.model.current_time_step * self.model.time_step + # print self.model.start_time + timedelta(seconds=off) + # print ('pump_rate {}' + # .format(self.disp.platform.eff_pump_rate(self.disp.dosage))) + # assert 'disperse' in self.disp.cur_state + try: - for step in self.model: + for _step in self.model: off = self.model.current_time_step * self.model.time_step -# print '********', self.model.start_time + timedelta(seconds=off) -# print self.sc['mass'] -# print self.sc.mass_balance['dispersed'] + assert all(self.sc['mass'] >= 0) assert np.all(self.sc['mass_components'] >= 0) - assert self.sc.mass_balance['chem_dispersed'] + self.sc.mass_balance['evaporated'] < sum(self.sc['init_mass']) + assert ((self.sc.mass_balance['chem_dispersed'] + + self.sc.mass_balance['evaporated']) < + sum(self.sc['init_mass'])) except StopIteration: pass def test_prepare_for_model_step_boat(self, sample_model_fcn2): + timeseries = np.array([(rel_time, rel_time + timedelta(hours=12.))]) disp = Disperse(name='boat_disperse', - transit=20, - pass_length=4, -# dosage=1, - cascade_on=False, - cascade_distance=None, - timeseries=np.array([(rel_time, rel_time + timedelta(hours=12.))]), - loading_type='simultaneous', - pass_type='bidirectional', - disp_oil_ratio=None, - disp_eff=None, - platform='Typical Large Vessel', - units=None, - onsite_reload_refuel=True) - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + transit=20, + pass_length=4, + # dosage=1, + cascade_on=False, + cascade_distance=None, + timeseries=timeseries, + loading_type='simultaneous', + pass_type='bidirectional', + disp_oil_ratio=None, + disp_eff=None, + platform='Typical Large Vessel', + units=None, + onsite_reload_refuel=True) + + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.model.weatherers += disp + self.model.spills[0].amount = 20000 self.model.spills[0].units = 'gal' - self.model.spills[0].end_release_time = self.model.start_time + timedelta(hours=3) + self.model.spills[0].end_release_time = (self.model.start_time + + timedelta(hours=3)) + self.reset_and_release() disp.prepare_for_model_run(self.sc) print self.model.start_time print self.disp.timeseries assert disp.cur_state == 'retired' + self.model.step() print self.model.current_time_step + self.model.step() print self.model.spills.items()[0]['viscosity'] assert disp.cur_state == 'en_route' print disp._next_state_time + self.model.step() assert disp.cur_state == 'en_route' print disp.transit print disp.platform.transit_speed - print disp.platform.one_way_transit_time(disp.transit)/60 + print disp.platform.one_way_transit_time(disp.transit) / 60 + while disp.cur_state == 'en_route': self.model.step() off = self.model.current_time_step * self.model.time_step print self.model.start_time + timedelta(seconds=off) + print 'pump_rate ', disp.platform.eff_pump_rate(disp.dosage) + try: - for step in self.model: + for _step in self.model: off = self.model.current_time_step * self.model.time_step print self.model.start_time + timedelta(seconds=off) except StopIteration: pass -# def test_reports(self, sample_model_fcn2): -# (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) -# self.reset_and_release() -# self.burn.boom_length = 3500.0 -# self.burn.prepare_for_model_run(self.sc) -# assert self.burn._swath_width == 1050 -# assert len(self.burn.report) == 2 -# -# def test_serialize(self, sample_model_fcn2): -# (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) -# self.reset_and_release() -# self.burn.serialize() -# -# def test_prepare_for_model_step(self, sample_model_fcn2): -# (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) -# self.reset_and_release() -# -# self.burn.prepare_for_model_run(self.sc) -# self.burn.prepare_for_model_step(self.sc, time_step, active_start) -# -# assert self.burn._active == True -# assert self.burn._ts_collected == 93576.38888888889 - - -# def test_inactive(self): -# d = Disperse(name='test', -# platform='Test Platform', -# timeseries=[(datetime(2000, 1, 1, 1, 0, 0), datetime(2000, 1, 1, 2, 0, 0))]) -# d.prepare_for_model_run() -# -# + class TestRocSkim(ROCTests): skim = Skim(speed=2.0, storage=2000.0, @@ -515,45 +560,53 @@ class TestRocSkim(ROCTests): decant=0.75, decant_pump=150.0, discharge_pump=1000.0, - rig_time=timedelta(minutes=30), - timeseries=[(datetime(2012, 9, 15, 12, 0), datetime(2012, 9, 16, 1, 0))], - transit_time=timedelta(hours=2)) + rig_time=timedelta(minutes=30).total_seconds(), + timeseries=[(datetime(2012, 9, 15, 12, 0), + datetime(2012, 9, 16, 1, 0))], + transit_time=timedelta(hours=2).total_seconds()) def test_prepare_for_model_run(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() self.skim.prepare_for_model_run(self.sc) def test_prepare_for_model_step(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() self.skim.prepare_for_model_run(self.sc) self.skim.prepare_for_model_step(self.sc, time_step, active_start) - assert self.skim._active == True + assert self.skim._active is True def test_weather_elements(self, sample_model_fcn2): - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.reset_and_release() + skim = Skim(speed=2.0, - storage=2000.0, - swath_width=150, - group='A', - throughput=0.75, - nameplate_pump=100.0, - skim_efficiency_type='meh', - recovery=0.75, - recovery_ef=0.75, - decant=0.75, - decant_pump=150.0, - discharge_pump=1000.0, - rig_time=timedelta(minutes=30), - timeseries=[(datetime(2012, 9, 15, 12, 0), datetime(2012, 9, 16, 1, 0))], - transit_time=timedelta(hours=2)) + storage=2000.0, + swath_width=150, + group='A', + throughput=0.75, + nameplate_pump=100.0, + skim_efficiency_type='meh', + recovery=0.75, + recovery_ef=0.75, + decant=0.75, + decant_pump=150.0, + discharge_pump=1000.0, + rig_time=timedelta(minutes=30), + timeseries=[(datetime(2012, 9, 15, 12, 0), + datetime(2012, 9, 16, 1, 0))], + transit_time=timedelta(hours=2)) self.model.weatherers.append(skim) + self.model.rewind() + self.model.step() self.model.step() self.model.step() @@ -564,18 +617,23 @@ def test_serialization(self): ser = s.serialize() assert 'timeseries' in ser + deser = Skim.deserialize(ser) s2 = Skim.new_from_dict(deser) ser2 = s2.serialize() + ser.pop('id') ser2.pop('id') + assert ser == ser2 def test_model_save(self, sample_model_fcn2): s = TestRocSkim.skim - (self.sc, self.model) = ROCTests.mk_objs(sample_model_fcn2) + + self.sc, self.model = ROCTests.mk_objs(sample_model_fcn2) + self.model.weatherers.append(s) self.model.save('./') def test_model_load(self): - m = load('./Model.zip') + _m = load('./Model.zip') diff --git a/py_gnome/tests/unit_tests/test_weatherers/test_spreading.py b/py_gnome/tests/unit_tests/test_weatherers/test_spreading.py index 23b4f3860..8cf0b74bf 100644 --- a/py_gnome/tests/unit_tests/test_weatherers/test_spreading.py +++ b/py_gnome/tests/unit_tests/test_weatherers/test_spreading.py @@ -1,8 +1,7 @@ ''' Test Langmuir() - very simple object with only one method ''' - -from datetime import datetime, timedelta +from datetime import datetime import numpy as np import pytest @@ -45,10 +44,15 @@ def expected(self, init_vol, p_age, dbuoy=rel_buoy): k2 = self.spread.spreading_const[1] g = constants.gravity nu_h2o = water_viscosity - A0 = np.pi*(k2**4/k1**2)*(((init_vol)**5*g*dbuoy)/(nu_h2o**2))**(1./6.) - p_area = (np.pi*k2**2 * (init_vol**2 * g * dbuoy * p_age**1.5)**(1./3) - / (nu_h2o**(1./6.))) + A0 = (np.pi * + (k2 ** 4 / k1 ** 2) * + (((init_vol) ** 5 * g * dbuoy) / (nu_h2o ** 2)) ** (1. / 6.)) + + p_area = (np.pi * + k2 ** 2 * + (init_vol ** 2 * g * dbuoy * p_age ** 1.5) ** (1. / 3.) / + (nu_h2o ** (1. / 6.))) return (A0, p_area) @@ -58,14 +62,12 @@ def test_exceptions(self): ''' with pytest.raises(ValueError): 'relative_bouyancy >= 0' - self.spread.init_area(water_viscosity, - -rel_buoy, - bulk_init_vol) + self.spread.init_area(water_viscosity, -rel_buoy, bulk_init_vol) with pytest.raises(ValueError): 'age must be > 0' - (bulk_init_volume, relative_bouyancy, age, area) = \ - data_arrays() + bulk_init_volume, relative_bouyancy, age, area = data_arrays() + self.spread.update_area(water_viscosity, relative_bouyancy, bulk_init_volume, @@ -78,11 +80,11 @@ def test_values_same_age(self, num): Compare output of _init_area and _update_thickness to expected output returned by self.expected() function. ''' - (bulk_init_volume, age, area) = \ - data_arrays(num) + bulk_init_volume, age, area = data_arrays(num) + area[:] = self.spread.init_area(water_viscosity, rel_buoy, - bulk_init_volume[0])/len(area) + bulk_init_volume[0]) / len(area) # bulk_init_volume[0] and age[0] represents the volume and age of all # particles released at once @@ -104,15 +106,16 @@ def test_values_vary_age(self): test update_area works correctly for a continuous spill with varying age array ''' - (bulk_init_volume, age, area) = \ - data_arrays(10) - (a0, area_900) = self.expected(bulk_init_volume[0], 900) + bulk_init_volume, age, area = data_arrays(10) + a0, area_900 = self.expected(bulk_init_volume[0], 900) + age[0::2] = 900 - area[0::2] = a0/len(area[0::2]) # initialize else divide by 0 error + area[0::2] = a0 / len(area[0::2]) # initialize else divide by 0 error + + a0, area_1800 = self.expected(bulk_init_volume[1], 1800) - (a0, area_1800) = self.expected(bulk_init_volume[1], 1800) age[1::2] = 1800 - area[1::2] = a0/len(area[1::2]) # initialize else divide by 0 error + area[1::2] = a0 / len(area[1::2]) # initialize else divide by 0 error # now invoke update_area area[:] = self.spread.update_area(water_viscosity, @@ -120,6 +123,7 @@ def test_values_vary_age(self): bulk_init_volume, area, age) + assert np.isclose(area[0::2].sum(), area_900) assert np.isclose(area[1::2].sum(), area_1800) @@ -127,16 +131,18 @@ def test_values_vary_age_bulk_init_vol(self): ''' vary bulk_init_vol and age ''' - (bulk_init_volume, age, area) = \ - data_arrays(10) + bulk_init_volume, age, area = data_arrays(10) + age[0::2] = 900 bulk_init_volume[0::2] = 60 - (a0, area_900) = self.expected(bulk_init_volume[0], age[0], rel_buoy) - area[0::2] = a0/len(area[0::2]) # initialize else divide by 0 error + + a0, area_900 = self.expected(bulk_init_volume[0], age[0], rel_buoy) + area[0::2] = a0 / len(area[0::2]) # initialize else divide by 0 error age[1::2] = 1800 - (a0, area_1800) = self.expected(bulk_init_volume[1], age[1]) - area[1::2] = a0/len(area[1::2]) # initialize else divide by 0 error + a0, area_1800 = self.expected(bulk_init_volume[1], age[1]) + + area[1::2] = a0 / len(area[1::2]) # initialize else divide by 0 error # now invoke update_area area[:] = self.spread.update_area(water_viscosity, @@ -144,6 +150,7 @@ def test_values_vary_age_bulk_init_vol(self): bulk_init_volume, area, age) + assert np.isclose(area[0::2].sum(), area_900) assert np.isclose(area[1::2].sum(), area_1800) @@ -151,8 +158,8 @@ def test_minthickness_values(self): ''' tests that when blob reaches minimum thickness, area no longer changes ''' - (bulk_init_volume, age, area) = \ - data_arrays() + bulk_init_volume, age, area = data_arrays() + area[:] = self.spread.init_area(water_viscosity, rel_buoy, bulk_init_volume[0]) @@ -165,7 +172,7 @@ def test_minthickness_values(self): bulk_init_volume[0]) age[:4] = np.ceil(time) # divide max blob area into 4 LEs - i_area = bulk_init_volume[0]/self.spread.thickness_limit/4 + i_area = bulk_init_volume[0] / self.spread.thickness_limit / 4 age[4:] = 900 @@ -174,6 +181,7 @@ def test_minthickness_values(self): bulk_init_volume, area, age) + assert np.all(area[:4] == i_area) assert np.all(area[4:] < i_area) @@ -184,45 +192,49 @@ class TestLangmuir(ObjForTests): model_time = datetime(2015, 1, 1, 12, 0) water = Water() - l = Langmuir(water, wind) - (vmin, vmax) = l._wind_speed_bound(rel_buoy, thick) + lang = Langmuir(water, wind) + + vmin, vmax = lang._wind_speed_bound(rel_buoy, thick) - (sc, weatherers) = ObjForTests.mk_test_objs(water) + sc, weatherers = ObjForTests.mk_test_objs(water) def test_init(self): - l = Langmuir(self.water, self.wind) - assert l.wind is self.wind + langmuir = Langmuir(self.water, self.wind) + assert langmuir.wind is self.wind - @pytest.mark.parametrize(("l", "speed", "exp_bound"), - [(l, vmin - 0.01 * vmin, 1.0), - (l, vmax + 0.01 * vmax, 0.1)]) - def test_speed_bounds(self, l, speed, exp_bound): + @pytest.mark.parametrize(("langmuir", "speed", "exp_bound"), + [(lang, vmin - 0.01 * vmin, 1.0), + (lang, vmax + 0.01 * vmax, 0.1)]) + def test_speed_bounds(self, langmuir, speed, exp_bound): ''' Check that the input speed for Langmuir object returns frac_coverage within bounds: 0.1 <= frac_cov <= 1.0 ''' - self.l.wind.timeseries = (self.l.wind.timeseries['time'][0], - (speed, 0.0)) + self.lang.wind.timeseries = (self.lang.wind.timeseries['time'][0], + (speed, 0.0)) # rel_buoy is always expected to be a numpy array - frac_cov = l._get_frac_coverage(np.array([0,0]), - self.model_time, - np.asarray([rel_buoy]), - self.thick) + frac_cov = langmuir._get_frac_coverage(np.array([0, 0]), + self.model_time, + np.asarray([rel_buoy]), + self.thick) assert frac_cov == exp_bound - @pytest.mark.skipif(reason="serialization for weatherers overall needs review") + @pytest.mark.skipif(reason='serialization for weatherers overall ' + 'needs review') def test_update_from_dict(self): ''' - just a simple test to ensure schema/serialize/deserialize is correclty + just a simple test to ensure schema/serialize/deserialize is correctly setup ''' j = self.l.serialize() - j['wind']['timeseries'][0] = \ - (j['wind']['timeseries'][0][0], - (j['wind']['timeseries'][0][1][0] + 1, 0)) + j['wind']['timeseries'][0] = (j['wind']['timeseries'][0][0], + (j['wind']['timeseries'][0][1][0] + 1, 0) + ) + updated = self.l.update_from_dict(Langmuir.deserialize(j)) + assert updated assert self.l.serialize() == j @@ -232,20 +244,20 @@ def test_weather_elements(self): ''' use ObjMakeTests from test_cleanup to setup test Langmuir weather_elements must be called after weather elements - for other objectss + for other objects ''' - l = Langmuir(self.water, constant_wind(5., 0.)) + langmuir = Langmuir(self.water, constant_wind(5., 0.)) - self.prepare_test_objs(l.array_types) - l.prepare_for_model_run(self.sc) + self.prepare_test_objs(langmuir.array_types) + langmuir.prepare_for_model_run(self.sc) # create WeatheringData object, initialize instantaneously released # elements model_time = self.sc.spills[0].release_time time_step = 900. self.release_elements(time_step, model_time) - self.step(l, time_step, model_time) + self.step(langmuir, time_step, model_time) - assert l.active + assert langmuir.active assert np.all(self.sc['area'] < self.sc['fay_area']) assert np.all(self.sc['frac_coverage'] < 1.0)