diff --git a/.buildinfo b/.buildinfo new file mode 100644 index 000000000..3aa329107 --- /dev/null +++ b/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. +config: 3a4f3931352ab9be6d7f5e2ab219f8c4 +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/_autosummary/reV.SAM.SAM.RevPySam.html b/_autosummary/reV.SAM.SAM.RevPySam.html new file mode 100644 index 000000000..ad63b0530 --- /dev/null +++ b/_autosummary/reV.SAM.SAM.RevPySam.html @@ -0,0 +1,882 @@ + + + + + + + reV.SAM.SAM.RevPySam — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.SAM.RevPySam

+
+
+class RevPySam(meta, sam_sys_inputs, output_request, site_sys_inputs=None)[source]
+

Bases: Sam

+

Base class for reV-SAM simulations (generation and econ).

+

Initialize a SAM object.

+
+
Parameters:
+
    +
  • meta (pd.DataFrame | pd.Series | None) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone. Can be None for econ runs.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

collect_outputs(output_lookup)

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

attr_dict

Get the heirarchical PySAM object attribute dictionary.

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static get_sam_res(*args, **kwargs)[source]
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+static drop_leap(resource)[source]
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)[source]
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+static make_datetime(series)[source]
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+classmethod get_time_interval(time_index)[source]
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+outputs_to_utc_arr()[source]
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+collect_outputs(output_lookup)[source]
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict) – Lookup dictionary mapping output keys to special output methods.

+
+
+
+ +
+
+assign_inputs()[source]
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+PYSAM = <module 'PySAM.GenericSystem' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/GenericSystem.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+classmethod default()
+

Get the executed default pysam object.

+
+
Returns:
+

PySAM.GenericSystem

+
+
+
+ +
+
+execute()[source]
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.SAM.Sam.html b/_autosummary/reV.SAM.SAM.Sam.html new file mode 100644 index 000000000..2cc3bb693 --- /dev/null +++ b/_autosummary/reV.SAM.SAM.Sam.html @@ -0,0 +1,740 @@ + + + + + + + reV.SAM.SAM.Sam — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.SAM.Sam

+
+
+class Sam[source]
+

Bases: object

+

reV wrapper on the PySAM framework.

+

Methods

+ + + + + + + + + + + + +

assign_inputs(inputs[, raise_warning])

Assign a flat dictionary of inputs to the PySAM object.

default()

Get the executed default pysam object.

execute()

Call the PySAM execute method.

+

Attributes

+ + + + + + + + + + + + + + + +

IGNORE_ATTRS

attr_dict

Get the heirarchical PySAM object attribute dictionary.

input_list

Get the list of lowest level input attribute/variable names.

pysam

Get the pysam object.

+
+
+PYSAM = <module 'PySAM.GenericSystem' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/GenericSystem.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod default()[source]
+

Get the executed default pysam object.

+
+
Returns:
+

PySAM.GenericSystem

+
+
+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+execute()[source]
+

Call the PySAM execute method. Raise SAMExecutionError if error.

+
+ +
+
+assign_inputs(inputs, raise_warning=False)[source]
+

Assign a flat dictionary of inputs to the PySAM object.

+
+
Parameters:
+
    +
  • inputs (dict) – Flat (single-level) dictionary of PySAM inputs.

  • +
  • raise_warning (bool) – Flag to raise a warning for inputs that are not set because they +are not found in the PySAM object.

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.SAM.SamResourceRetriever.html b/_autosummary/reV.SAM.SAM.SamResourceRetriever.html new file mode 100644 index 000000000..15ad454db --- /dev/null +++ b/_autosummary/reV.SAM.SAM.SamResourceRetriever.html @@ -0,0 +1,699 @@ + + + + + + + reV.SAM.SAM.SamResourceRetriever — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.SAM.SamResourceRetriever

+
+
+class SamResourceRetriever[source]
+

Bases: object

+

Factory utility to get the SAM resource handler.

+

Methods

+ + + + + + +

get(res_file, project_points, module[, ...])

Get the SAM resource iterator object (single year, single file).

+

Attributes

+ + + + + + +

RESOURCE_TYPES

+
+
+classmethod get(res_file, project_points, module, output_request=('cf_mean',), gid_map=None, lr_res_file=None, nn_map=None, bias_correct=None)[source]
+

Get the SAM resource iterator object (single year, single file).

+
+
Parameters:
+
    +
  • res_file (str) – Single resource file (with full path) to retrieve.

  • +
  • project_points (reV.config.ProjectPoints) – reV Project Points instance used to retrieve resource data at a +specific set of sites.

  • +
  • module (str) – SAM module name or reV technology to force interpretation +of the resource file type. +Example: module set to ‘pvwatts’ or ‘tcsmolten’ means that this +expects a SolarResource file. If ‘nsrdb’ is in the res_file name, +the NSRDB handler will be used.

  • +
  • output_request (list | tuple, optional) – Outputs to retrieve from SAM, by default (‘cf_mean’, )

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

res (reV.resource.SAMResource) – Resource iterator object to pass to SAM.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.SAM.html b/_autosummary/reV.SAM.SAM.html new file mode 100644 index 000000000..7dd703c98 --- /dev/null +++ b/_autosummary/reV.SAM.SAM.html @@ -0,0 +1,646 @@ + + + + + + + reV.SAM.SAM — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.SAM

+

reV-to-SAM interface module.

+

Wraps the NREL-PySAM library with additional reV features.

+

Classes

+ + + + + + + + + + + + +

RevPySam(meta, sam_sys_inputs, output_request)

Base class for reV-SAM simulations (generation and econ).

Sam()

reV wrapper on the PySAM framework.

SamResourceRetriever()

Factory utility to get the SAM resource handler.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.AbstractDefaultFromConfigFile.html b/_autosummary/reV.SAM.defaults.AbstractDefaultFromConfigFile.html new file mode 100644 index 000000000..5b847bc67 --- /dev/null +++ b/_autosummary/reV.SAM.defaults.AbstractDefaultFromConfigFile.html @@ -0,0 +1,675 @@ + + + + + + + reV.SAM.defaults.AbstractDefaultFromConfigFile — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.AbstractDefaultFromConfigFile

+
+
+class AbstractDefaultFromConfigFile[source]
+

Bases: object

+

Class for default PySAM object from a config file.

+

Methods

+ + + + + + +

init_default_pysam_obj()

Initialize a defualt PySM object from a config file.

+

Attributes

+ + + + + + + + + +

CONFIG_FILE_NAME

Name of JSON config file containing default PySAM inputs.

PYSAM_MODULE

PySAM module to initialize (e.g.

+
+
+abstract property CONFIG_FILE_NAME
+

Name of JSON config file containing default PySAM inputs.

+
+ +
+
+abstract property PYSAM_MODULE
+

PySAM module to initialize (e.g. Pvwattsv5, Geothermal, etc.).

+
+ +
+
+classmethod init_default_pysam_obj()[source]
+

Initialize a defualt PySM object from a config file.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultGeothermal.html b/_autosummary/reV.SAM.defaults.DefaultGeothermal.html new file mode 100644 index 000000000..131fa6880 --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultGeothermal.html @@ -0,0 +1,674 @@ + + + + + + + reV.SAM.defaults.DefaultGeothermal — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultGeothermal

+
+
+class DefaultGeothermal[source]
+

Bases: AbstractDefaultFromConfigFile

+

Class for default Geothermal

+

Methods

+ + + + + + + + + +

default()

Get the default PySAM Geothermal object

init_default_pysam_obj()

Initialize a defualt PySM object from a config file.

+

Attributes

+ + + + + + +

CONFIG_FILE_NAME

+
+
+PYSAM_MODULE = <module 'PySAM.Geothermal' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Geothermal.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the default PySAM Geothermal object

+
+ +
+
+classmethod init_default_pysam_obj()
+

Initialize a defualt PySM object from a config file.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultLCOE.html b/_autosummary/reV.SAM.defaults.DefaultLCOE.html new file mode 100644 index 000000000..6ca95845c --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultLCOE.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultLCOE — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultLCOE

+
+
+class DefaultLCOE[source]
+

Bases: object

+

Class for default LCOE calculator

+

Methods

+ + + + + + +

default()

Get the default PySAM object

+
+
+static default()[source]
+

Get the default PySAM object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultLinearFresnelDsgIph.html b/_autosummary/reV.SAM.defaults.DefaultLinearFresnelDsgIph.html new file mode 100644 index 000000000..dd7ca783a --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultLinearFresnelDsgIph.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultLinearFresnelDsgIph — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultLinearFresnelDsgIph

+
+
+class DefaultLinearFresnelDsgIph[source]
+

Bases: object

+

Class for default linear direct steam heat

+

Methods

+ + + + + + +

default()

Get the default PySAM object

+
+
+static default()[source]
+

Get the default PySAM object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultMhkWave.html b/_autosummary/reV.SAM.defaults.DefaultMhkWave.html new file mode 100644 index 000000000..c03e7c158 --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultMhkWave.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultMhkWave — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultMhkWave

+
+
+class DefaultMhkWave[source]
+

Bases: object

+

Class for default mhkwave

+

Methods

+ + + + + + +

default()

Get the default PySAM object

+
+
+static default()[source]
+

Get the default PySAM object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultPvSamv1.html b/_autosummary/reV.SAM.defaults.DefaultPvSamv1.html new file mode 100644 index 000000000..1d5704210 --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultPvSamv1.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultPvSamv1 — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultPvSamv1

+
+
+class DefaultPvSamv1[source]
+

Bases: object

+

class for default detailed PV

+

Methods

+ + + + + + +

default()

Get the default PySAM Pvsamv1 object

+
+
+static default()[source]
+

Get the default PySAM Pvsamv1 object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultPvWattsv5.html b/_autosummary/reV.SAM.defaults.DefaultPvWattsv5.html new file mode 100644 index 000000000..089736ff3 --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultPvWattsv5.html @@ -0,0 +1,674 @@ + + + + + + + reV.SAM.defaults.DefaultPvWattsv5 — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultPvWattsv5

+
+
+class DefaultPvWattsv5[source]
+

Bases: AbstractDefaultFromConfigFile

+

Class for default PVWattsv5

+

Methods

+ + + + + + + + + +

default()

Get the default PySAM pvwattsv5 object

init_default_pysam_obj()

Initialize a defualt PySM object from a config file.

+

Attributes

+ + + + + + +

CONFIG_FILE_NAME

+
+
+PYSAM_MODULE = <module 'PySAM.Pvwattsv5' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Pvwattsv5.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the default PySAM pvwattsv5 object

+
+ +
+
+classmethod init_default_pysam_obj()
+

Initialize a defualt PySM object from a config file.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultPvWattsv8.html b/_autosummary/reV.SAM.defaults.DefaultPvWattsv8.html new file mode 100644 index 000000000..4da1f506a --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultPvWattsv8.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultPvWattsv8 — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultPvWattsv8

+
+
+class DefaultPvWattsv8[source]
+

Bases: object

+

class for default PVWattsv8

+

Methods

+ + + + + + +

default()

Get the default PySAM pvwattsv8 object

+
+
+static default()[source]
+

Get the default PySAM pvwattsv8 object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultSingleOwner.html b/_autosummary/reV.SAM.defaults.DefaultSingleOwner.html new file mode 100644 index 000000000..c5b316d9e --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultSingleOwner.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultSingleOwner — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultSingleOwner

+
+
+class DefaultSingleOwner[source]
+

Bases: object

+

class for default Single Owner (PPA) calculator

+

Methods

+ + + + + + +

default()

Get the default PySAM object

+
+
+static default()[source]
+

Get the default PySAM object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultSwh.html b/_autosummary/reV.SAM.defaults.DefaultSwh.html new file mode 100644 index 000000000..2c38d7eb6 --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultSwh.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultSwh — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultSwh

+
+
+class DefaultSwh[source]
+

Bases: object

+

Class for default solar water heating

+

Methods

+ + + + + + +

default()

Get the default PySAM object

+
+
+static default()[source]
+

Get the default PySAM object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultTcsMoltenSalt.html b/_autosummary/reV.SAM.defaults.DefaultTcsMoltenSalt.html new file mode 100644 index 000000000..c064a7d92 --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultTcsMoltenSalt.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultTcsMoltenSalt — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultTcsMoltenSalt

+
+
+class DefaultTcsMoltenSalt[source]
+

Bases: object

+

Class for default CSP

+

Methods

+ + + + + + +

default()

Get the default PySAM object

+
+
+static default()[source]
+

Get the default PySAM object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultTroughPhysicalProcessHeat.html b/_autosummary/reV.SAM.defaults.DefaultTroughPhysicalProcessHeat.html new file mode 100644 index 000000000..372e1ec6f --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultTroughPhysicalProcessHeat.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultTroughPhysicalProcessHeat — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultTroughPhysicalProcessHeat

+
+
+class DefaultTroughPhysicalProcessHeat[source]
+

Bases: object

+

Class for default parabolic trough process heat

+

Methods

+ + + + + + +

default()

Get the default PySAM object

+
+
+static default()[source]
+

Get the default PySAM object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.DefaultWindPower.html b/_autosummary/reV.SAM.defaults.DefaultWindPower.html new file mode 100644 index 000000000..dedf26d89 --- /dev/null +++ b/_autosummary/reV.SAM.defaults.DefaultWindPower.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM.defaults.DefaultWindPower — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults.DefaultWindPower

+
+
+class DefaultWindPower[source]
+

Bases: object

+

Class for default windpower

+

Methods

+ + + + + + +

default()

Get the default PySAM object

+
+
+static default()[source]
+

Get the default PySAM object

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.defaults.html b/_autosummary/reV.SAM.defaults.html new file mode 100644 index 000000000..e6550e99f --- /dev/null +++ b/_autosummary/reV.SAM.defaults.html @@ -0,0 +1,675 @@ + + + + + + + reV.SAM.defaults — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.defaults

+

PySAM default implementations.

+

Classes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

AbstractDefaultFromConfigFile()

Class for default PySAM object from a config file.

DefaultGeothermal()

Class for default Geothermal

DefaultLCOE()

Class for default LCOE calculator

DefaultLinearFresnelDsgIph()

Class for default linear direct steam heat

DefaultMhkWave()

Class for default mhkwave

DefaultPvSamv1()

class for default detailed PV

DefaultPvWattsv5()

Class for default PVWattsv5

DefaultPvWattsv8()

class for default PVWattsv8

DefaultSingleOwner()

class for default Single Owner (PPA) calculator

DefaultSwh()

Class for default solar water heating

DefaultTcsMoltenSalt()

Class for default CSP

DefaultTroughPhysicalProcessHeat()

Class for default parabolic trough process heat

DefaultWindPower()

Class for default windpower

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.econ.Economic.html b/_autosummary/reV.SAM.econ.Economic.html new file mode 100644 index 000000000..4cb6e4953 --- /dev/null +++ b/_autosummary/reV.SAM.econ.Economic.html @@ -0,0 +1,966 @@ + + + + + + + reV.SAM.econ.Economic — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.econ.Economic

+
+
+class Economic(sam_sys_inputs, site_sys_inputs=None, output_request='lcoe_fcr')[source]
+

Bases: RevPySam

+

Base class for SAM economic models.

+

Initialize a SAM economic model object.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list | tuple | str) – Requested SAM output(s) (e.g., ‘ppa_price’, ‘lcoe_fcr’).

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

collect_outputs()

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

flip_actual_irr()

Get actual IRR (from PPA/SingleOwner model).

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

gross_revenue()

Get cash flow total revenue (from PPA/SingleOwner model).

lcoe_fcr()

Get LCOE ($/MWh).

lcoe_nom()

Get nominal LCOE ($/MWh) (from PPA/SingleOwner model).

lcoe_real()

Get real LCOE ($/MWh) (from PPA/SingleOwner model).

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

npv()

Get net present value (NPV) ($).

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

ppa_price()

Get PPA price ($/MWh).

reV_run(site, site_df, inputs, output_request)

Run the SAM econ model for a single site.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

attr_dict

Get the heirarchical PySAM object attribute dictionary.

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+ppa_price()[source]
+

Get PPA price ($/MWh).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+npv()[source]
+

Get net present value (NPV) ($).

+

Native units are dollars.

+
+ +
+
+lcoe_fcr()[source]
+

Get LCOE ($/MWh).

+

Native units are $/kWh, mult by 1000 for $/MWh.

+
+ +
+
+lcoe_nom()[source]
+

Get nominal LCOE ($/MWh) (from PPA/SingleOwner model).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+lcoe_real()[source]
+

Get real LCOE ($/MWh) (from PPA/SingleOwner model).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+flip_actual_irr()[source]
+

Get actual IRR (from PPA/SingleOwner model).

+

Native units are %.

+
+ +
+
+gross_revenue()[source]
+

Get cash flow total revenue (from PPA/SingleOwner model).

+

Native units are $.

+
+ +
+
+collect_outputs()[source]
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+ +
+
+classmethod reV_run(site, site_df, inputs, output_request)[source]
+

Run the SAM econ model for a single site.

+
+
Parameters:
+
    +
  • site (int) – Site gid.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • inputs (dict) – Dictionary of SAM system input parameters.

  • +
  • output_request (list | tuple | str) – Requested SAM output(s) (e.g., ‘ppa_price’, ‘lcoe_fcr’).

  • +
+
+
Returns:
+

sim.outputs (dict) – Dictionary keyed by SAM variable names with SAM numerical results.

+
+
+
+ +
+
+PYSAM = <module 'PySAM.GenericSystem' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/GenericSystem.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+classmethod default()
+

Get the executed default pysam object.

+
+
Returns:
+

PySAM.GenericSystem

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.econ.LCOE.html b/_autosummary/reV.SAM.econ.LCOE.html new file mode 100644 index 000000000..df90943a4 --- /dev/null +++ b/_autosummary/reV.SAM.econ.LCOE.html @@ -0,0 +1,962 @@ + + + + + + + reV.SAM.econ.LCOE — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.econ.LCOE

+
+
+class LCOE(sam_sys_inputs, site_sys_inputs=None, output_request=('lcoe_fcr',))[source]
+

Bases: Economic

+

SAM LCOE model.

+

Initialize a SAM LCOE economic model object.

+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

collect_outputs()

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam LCOE FCR object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

flip_actual_irr()

Get actual IRR (from PPA/SingleOwner model).

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

gross_revenue()

Get cash flow total revenue (from PPA/SingleOwner model).

lcoe_fcr()

Get LCOE ($/MWh).

lcoe_nom()

Get nominal LCOE ($/MWh) (from PPA/SingleOwner model).

lcoe_real()

Get real LCOE ($/MWh) (from PPA/SingleOwner model).

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

npv()

Get net present value (NPV) ($).

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

ppa_price()

Get PPA price ($/MWh).

reV_run(points_control, site_df, cf_file, year)

Execute SAM LCOE simulations based on a reV points control instance.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

attr_dict

Get the heirarchical PySAM object attribute dictionary.

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Lcoefcr' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Lcoefcr.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the executed default pysam LCOE FCR object.

+
+
Returns:
+

PySAM.Lcoefcr

+
+
+
+ +
+
+classmethod reV_run(points_control, site_df, cf_file, year, output_request=('lcoe_fcr',))[source]
+

Execute SAM LCOE simulations based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • cf_file (str) – reV generation capacity factor output file with path.

  • +
  • year (int | str | None) – reV generation year to calculate econ for. Looks for cf_mean_{year} +or cf_profile_{year}. None will default to a non-year-specific cf +dataset (cf_mean, cf_profile).

  • +
  • output_request (list | tuple | str) – Output(s) to retrieve from SAM.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+collect_outputs()
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+flip_actual_irr()
+

Get actual IRR (from PPA/SingleOwner model).

+

Native units are %.

+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+gross_revenue()
+

Get cash flow total revenue (from PPA/SingleOwner model).

+

Native units are $.

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+lcoe_fcr()
+

Get LCOE ($/MWh).

+

Native units are $/kWh, mult by 1000 for $/MWh.

+
+ +
+
+lcoe_nom()
+

Get nominal LCOE ($/MWh) (from PPA/SingleOwner model).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+lcoe_real()
+

Get real LCOE ($/MWh) (from PPA/SingleOwner model).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+npv()
+

Get net present value (NPV) ($).

+

Native units are dollars.

+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+ppa_price()
+

Get PPA price ($/MWh).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.econ.SingleOwner.html b/_autosummary/reV.SAM.econ.SingleOwner.html new file mode 100644 index 000000000..85d4f34c9 --- /dev/null +++ b/_autosummary/reV.SAM.econ.SingleOwner.html @@ -0,0 +1,962 @@ + + + + + + + reV.SAM.econ.SingleOwner — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.econ.SingleOwner

+
+
+class SingleOwner(sam_sys_inputs, site_sys_inputs=None, output_request=('ppa_price',))[source]
+

Bases: Economic

+

SAM single owner economic model.

+

Initialize a SAM single owner economic model object.

+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

collect_outputs()

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam Single Owner object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

flip_actual_irr()

Get actual IRR (from PPA/SingleOwner model).

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

gross_revenue()

Get cash flow total revenue (from PPA/SingleOwner model).

lcoe_fcr()

Get LCOE ($/MWh).

lcoe_nom()

Get nominal LCOE ($/MWh) (from PPA/SingleOwner model).

lcoe_real()

Get real LCOE ($/MWh) (from PPA/SingleOwner model).

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

npv()

Get net present value (NPV) ($).

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

ppa_price()

Get PPA price ($/MWh).

reV_run(points_control, site_df, cf_file, year)

Execute SAM SingleOwner simulations based on reV points control.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

attr_dict

Get the heirarchical PySAM object attribute dictionary.

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Singleowner' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Singleowner.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the executed default pysam Single Owner object.

+
+
Returns:
+

PySAM.Singleowner

+
+
+
+ +
+
+collect_outputs()[source]
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property. This includes windbos outputs.

+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+flip_actual_irr()
+

Get actual IRR (from PPA/SingleOwner model).

+

Native units are %.

+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+gross_revenue()
+

Get cash flow total revenue (from PPA/SingleOwner model).

+

Native units are $.

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+lcoe_fcr()
+

Get LCOE ($/MWh).

+

Native units are $/kWh, mult by 1000 for $/MWh.

+
+ +
+
+lcoe_nom()
+

Get nominal LCOE ($/MWh) (from PPA/SingleOwner model).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+lcoe_real()
+

Get real LCOE ($/MWh) (from PPA/SingleOwner model).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+npv()
+

Get net present value (NPV) ($).

+

Native units are dollars.

+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+ppa_price()
+

Get PPA price ($/MWh).

+

Native units are cents/kWh, mult by 10 for $/MWh.

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, site_df, cf_file, year, output_request=('ppa_price',))[source]
+

Execute SAM SingleOwner simulations based on reV points control.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • cf_file (str) – reV generation capacity factor output file with path.

  • +
  • year (int | str | None) – reV generation year to calculate econ for. Looks for cf_mean_{year} +or cf_profile_{year}. None will default to a non-year-specific cf +dataset (cf_mean, cf_profile).

  • +
  • output_request (list | tuple | str) – Output(s) to retrieve from SAM.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.econ.html b/_autosummary/reV.SAM.econ.html new file mode 100644 index 000000000..5bc5138bb --- /dev/null +++ b/_autosummary/reV.SAM.econ.html @@ -0,0 +1,647 @@ + + + + + + + reV.SAM.econ — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.econ

+

reV-to-SAM econ interface module.

+

Wraps the NREL-PySAM lcoefcr and singleowner modules with +additional reV features.

+

Classes

+ + + + + + + + + + + + +

Economic(sam_sys_inputs[, site_sys_inputs, ...])

Base class for SAM economic models.

LCOE(sam_sys_inputs[, site_sys_inputs, ...])

SAM LCOE model.

SingleOwner(sam_sys_inputs[, ...])

SAM single owner economic model.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.AbstractSamGeneration.html b/_autosummary/reV.SAM.generation.AbstractSamGeneration.html new file mode 100644 index 000000000..d12f0323f --- /dev/null +++ b/_autosummary/reV.SAM.generation.AbstractSamGeneration.html @@ -0,0 +1,1178 @@ + + + + + + + reV.SAM.generation.AbstractSamGeneration — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.AbstractSamGeneration

+
+
+class AbstractSamGeneration(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: RevPySam, ScheduledLossesMixin, ABC

+

Base class for SAM generation simulations.

+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_resource_data(resource, meta)

Placeholder for resource data setting (nsrdb or wtk)

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+check_resource_data(resource)[source]
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+abstract set_resource_data(resource, meta)[source]
+

Placeholder for resource data setting (nsrdb or wtk)

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)[source]
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+cf_mean()[source]
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+cf_profile()[source]
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+annual_energy()[source]
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+energy_yield()[source]
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+gen_profile()[source]
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+collect_outputs(output_lookup=None)[source]
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+run_gen_and_econ()[source]
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+run()[source]
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)[source]
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+PYSAM = <module 'PySAM.GenericSystem' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/GenericSystem.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+classmethod default()
+

Get the executed default pysam object.

+
+
Returns:
+

PySAM.GenericSystem

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.AbstractSamGenerationFromWeatherFile.html b/_autosummary/reV.SAM.generation.AbstractSamGenerationFromWeatherFile.html new file mode 100644 index 000000000..bb1718aa7 --- /dev/null +++ b/_autosummary/reV.SAM.generation.AbstractSamGenerationFromWeatherFile.html @@ -0,0 +1,1210 @@ + + + + + + + reV.SAM.generation.AbstractSamGenerationFromWeatherFile — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.AbstractSamGenerationFromWeatherFile

+
+
+class AbstractSamGenerationFromWeatherFile(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamGeneration, ABC

+

Base class for running sam generation with a weather file on disk.

+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation and possibility follow-on econ analysis.

set_resource_data(resource, meta)

Generate the weather file and set the path as an input.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

PYSAM_WEATHER_TAG

Name of the weather file input used by SAM generation module.

WF_META_DROP_COLS

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+abstract property PYSAM_WEATHER_TAG
+

Name of the weather file input used by SAM generation module.

+
+ +
+
+set_resource_data(resource, meta)[source]
+

Generate the weather file and set the path as an input.

+

Some PySAM models require a data file, not raw data. This method +generates the weather data, writes it to a file on disk, and +then sets the file as an input to the generation module. The +function +run_gen_and_econ() +deletes the file on disk after a run is complete.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Time series resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the +required variables to run the respective SAM simulation. +Remapping will be done to convert typical NSRDB/WTK names +into SAM names (e.g. DNI -> dn and wind_speed -> windspeed).

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, +elevation, and timezone.

  • +
+
+
+
+ +
+
+run_gen_and_econ()[source]
+

Run SAM generation and possibility follow-on econ analysis.

+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+PYSAM = <module 'PySAM.GenericSystem' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/GenericSystem.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+classmethod default()
+

Get the executed default pysam object.

+
+
Returns:
+

PySAM.GenericSystem

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.AbstractSamPv.html b/_autosummary/reV.SAM.generation.AbstractSamPv.html new file mode 100644 index 000000000..3759c5fd3 --- /dev/null +++ b/_autosummary/reV.SAM.generation.AbstractSamPv.html @@ -0,0 +1,1400 @@ + + + + + + + reV.SAM.generation.AbstractSamPv — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.AbstractSamPv

+
+
+class AbstractSamPv(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamSolar, ABC

+

Photovoltaic (PV) generation with either pvwatts of detailed pv.

+

Initialize a SAM solar object.

+

See the PySAM Pvwattsv8 (or older +version model) documentation for the configuration keys required +in the sam_sys_inputs config. You may also include the +following reV-specific keys:

+
+
    +
  • reV_outages : Specification for reV-scheduled +stochastic outage losses. For example:

    +
    outage_info = [
    +    {
    +        'count': 6,
    +        'duration': 24,
    +        'percentage_of_capacity_lost': 100,
    +        'allowed_months': ['January', 'March'],
    +        'allow_outage_overlap': True
    +    },
    +    {
    +        'count': 10,
    +        'duration': 1,
    +        'percentage_of_capacity_lost': 10,
    +        'allowed_months': ['January'],
    +        'allow_outage_overlap': False
    +    },
    +    ...
    +]
    +
    +
    +

    See the description of +add_scheduled_losses() +or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages_seed : Integer value used to seed the RNG +used to compute stochastic outage losses.

  • +
  • time_index_step : Integer representing the step size +used to sample the time_index in the resource data. +This can be used to reduce temporal resolution (i.e. for +30 minute NSRDB input data, time_index_step=1 yields +the full 30 minute time series as output, while +time_index_step=2 yields hourly output, and so forth).

    +
    +

    Note

    +

    The reduced data shape (i.e. after applying a +step size of time_index_step) must still be an +integer multiple of 8760, or the execution will +fail.

    +
    +
  • +
  • clearsky : Boolean flag value indicating wether +computation should use clearsky resource data to compute +generation data.

  • +
+
+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ac()

Get AC inverter power generation profile (local timezone) in kW.

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

agg_albedo(time_index, albedo)

Aggregate a timeseries of albedo data to monthly values w len 12 as required by pysam Pvsamv1

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_mean_ac()

Get mean AC capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

cf_profile_ac()

Get hourly AC capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

clipped_power()

Get the clipped DC power generated behind the inverter (local timezone) in kW.

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

dc()

Get DC array power generation profile (local timezone) in kW.

default()

Get the executed default pysam object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get AC inverter power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_latitude_tilt_az(sam_sys_inputs, meta)

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

set_resource_data(resource, meta)

Set NSRDB resource data arrays.

system_capacity_ac()

Get AC system capacity from SAM inputs.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

PYSAM

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = None
+
+ +
+
+set_resource_data(resource, meta)[source]
+

Set NSRDB resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+

:raises ValueError : If lat/lon outside of -90 to 90 and -180 to 180,: respectively.

+
+ +
+
+static set_latitude_tilt_az(sam_sys_inputs, meta)[source]
+

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments. +If for a pv simulation the “tilt” parameter was originally not +present or set to ‘lat’ or ‘latitude’, the tilt will be set to +the absolute value of the latitude found in meta and the azimuth +will be 180 if lat>0, 0 if lat<0.

+
+
+
+ +
+
+system_capacity_ac()[source]
+

Get AC system capacity from SAM inputs.

+

NOTE: AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (float) – AC nameplate = DC nameplate / ILR

+
+
+
+ +
+
+cf_mean()[source]
+

Get mean capacity factor (fractional) from SAM.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

output (float) – Mean capacity factor (fractional). +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_mean_ac()[source]
+

Get mean AC capacity factor (fractional) from SAM.

+

NOTE: This value only available in PVWattsV8 and up.

+
+
Returns:
+

output (float) – Mean AC capacity factor (fractional). +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+cf_profile()[source]
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_profile_ac()[source]
+

Get hourly AC capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV AC capacity factor is the AC power production / the AC +nameplate. AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+gen_profile()[source]
+

Get AC inverter power generation profile (local timezone) in kW. +This is an alias of the “ac” SAM output variable if PySAM version>=3. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+ac()[source]
+

Get AC inverter power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+dc()[source]
+

Get DC array power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of DC array power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+clipped_power()[source]
+

Get the clipped DC power generated behind the inverter +(local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

clipped (np.ndarray) – 1D array of clipped DC power in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+abstract static default()[source]
+

Get the executed default pysam object.

+
+ +
+
+collect_outputs(output_lookup=None)[source]
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+static agg_albedo(time_index, albedo)
+

Aggregate a timeseries of albedo data to monthly values w len 12 as +required by pysam Pvsamv1

+

Tech spec from pysam docs: +https://nrel-pysam.readthedocs.io/en/master/modules/Pvsamv1.html +#PySAM.Pvsamv1.Pvsamv1.SolarResource.albedo

+
+
Parameters:
+
    +
  • time_index (pd.DatetimeIndex) – Timeseries solar resource datetimeindex

  • +
  • albedo (list) – Timeseries Albedo data to be aggregated. Should be 0-1 and likely +hourly or less.

  • +
+
+
Returns:
+

monthly_albedo (list) – 1D list of monthly albedo values with length 12

+
+
+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.AbstractSamSolar.html b/_autosummary/reV.SAM.generation.AbstractSamSolar.html new file mode 100644 index 000000000..000ddf5d0 --- /dev/null +++ b/_autosummary/reV.SAM.generation.AbstractSamSolar.html @@ -0,0 +1,1217 @@ + + + + + + + reV.SAM.generation.AbstractSamSolar — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.AbstractSamSolar

+
+
+class AbstractSamSolar(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamGeneration, ABC

+

Base Class for Solar generation from SAM

+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

agg_albedo(time_index, albedo)

Aggregate a timeseries of albedo data to monthly values w len 12 as required by pysam Pvsamv1

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_resource_data(resource, meta)

Set NSRDB resource data arrays.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+static agg_albedo(time_index, albedo)[source]
+

Aggregate a timeseries of albedo data to monthly values w len 12 as +required by pysam Pvsamv1

+

Tech spec from pysam docs: +https://nrel-pysam.readthedocs.io/en/master/modules/Pvsamv1.html +#PySAM.Pvsamv1.Pvsamv1.SolarResource.albedo

+
+
Parameters:
+
    +
  • time_index (pd.DatetimeIndex) – Timeseries solar resource datetimeindex

  • +
  • albedo (list) – Timeseries Albedo data to be aggregated. Should be 0-1 and likely +hourly or less.

  • +
+
+
Returns:
+

monthly_albedo (list) – 1D list of monthly albedo values with length 12

+
+
+
+ +
+
+set_resource_data(resource, meta)[source]
+

Set NSRDB resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+PYSAM = <module 'PySAM.GenericSystem' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/GenericSystem.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+classmethod default()
+

Get the executed default pysam object.

+
+
Returns:
+

PySAM.GenericSystem

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.AbstractSamWind.html b/_autosummary/reV.SAM.generation.AbstractSamWind.html new file mode 100644 index 000000000..389e3ad5b --- /dev/null +++ b/_autosummary/reV.SAM.generation.AbstractSamWind.html @@ -0,0 +1,1305 @@ + + + + + + + reV.SAM.generation.AbstractSamWind — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.AbstractSamWind

+
+
+class AbstractSamWind(*args, **kwargs)[source]
+

Bases: AbstractSamGeneration, PowerCurveLossesMixin, ABC

+

Wind generation from SAM.

+

See the PySAM Windpower +documentation for the configuration keys required in the +sam_sys_inputs config. You may also include the following +reV-specific keys:

+
+
    +
  • reV_power_curve_losses : A dictionary that can be used +to initialize +PowerCurveLossesInput. +For example:

    +
    reV_power_curve_losses = {
    +    'target_losses_percent': 9.8,
    +    'transformation': 'exponential_stretching'
    +}
    +
    +
    +

    See the description of the class mentioned above or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages : Specification for reV-scheduled +stochastic outage losses. For example:

    +
    outage_info = [
    +    {
    +        'count': 6,
    +        'duration': 24,
    +        'percentage_of_capacity_lost': 100,
    +        'allowed_months': ['January', 'March'],
    +        'allow_outage_overlap': True
    +    },
    +    {
    +        'count': 10,
    +        'duration': 1,
    +        'percentage_of_capacity_lost': 10,
    +        'allowed_months': ['January'],
    +        'allow_outage_overlap': False
    +    },
    +    ...
    +]
    +
    +
    +

    See the description of +add_scheduled_losses() +or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages_seed : Integer value used to seed the RNG +used to compute stochastic outage losses.

  • +
  • time_index_step : Integer representing the step size +used to sample the time_index in the resource data. +This can be used to reduce temporal resolution (i.e. for +30 minute input data, time_index_step=1 yields the +full 30 minute time series as output, while +time_index_step=2 yields hourly output, and so forth).

    +
    +

    Note

    +

    The reduced data shape (i.e. after applying a +step size of time_index_step) must still be +an integer multiple of 8760, or the execution +will fail.

    +
    +
  • +
+
+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_power_curve_losses()

Adjust power curve in SAM config file to account for losses.

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_resource_data(resource, meta)

Placeholder for resource data setting (nsrdb or wtk)

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

wind_resource_from_input()

Collect wind resource and weights from inputs.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

POWER_CURVE_CONFIG_KEY

Specify power curve loss target in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

input_power_curve

Original power curve for site.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+POWER_CURVE_CONFIG_KEY = 'reV_power_curve_losses'
+

Specify power curve loss target in the config file using this key.

+
+ +
+
+PYSAM = <module 'PySAM.GenericSystem' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/GenericSystem.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+add_power_curve_losses()
+

Adjust power curve in SAM config file to account for losses.

+

This function reads the information in the +reV_power_curve_losses key of the sam_sys_inputs +dictionary and computes a new power curve that accounts for the +loss percentage specified from that input. If no power curve +loss info is specified in sam_sys_inputs, the power curve +will not be adjusted.

+
+

See also

+
+
adjust_power_curve()

Power curve shift calculation.

+
+
+
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+classmethod default()
+

Get the executed default pysam object.

+
+
Returns:
+

PySAM.GenericSystem

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+property input_power_curve
+

Original power curve for site.

+
+
Type:
+

PowerCurve

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+abstract set_resource_data(resource, meta)
+

Placeholder for resource data setting (nsrdb or wtk)

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+
+wind_resource_from_input()
+

Collect wind resource and weights from inputs.

+
+
Returns:
+

PowerCurveWindResource – Wind resource used to compute power curve shift.

+
+
Raises:
+

reVLossesValueError – If power curve losses are not compatible with the + ‘wind_resource_model_choice’.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.Geothermal.html b/_autosummary/reV.SAM.generation.Geothermal.html new file mode 100644 index 000000000..c94479d23 --- /dev/null +++ b/_autosummary/reV.SAM.generation.Geothermal.html @@ -0,0 +1,1365 @@ + + + + + + + reV.SAM.generation.Geothermal — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.Geothermal

+
+
+class Geothermal(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamGenerationFromWeatherFile

+

reV-SAM geothermal generation.

+

As of 12/20/2022, the resource potential input in SAM is only used +to calculate the number of well replacements during the lifetime of +a geothermal plant. It was decided that reV would not model well +replacements. Therefore, reV sets the resource potential to match +(or be just above) the gross potential so that SAM does not throw +any errors.

+

Also as of 12/20/2022, the SAM GETEM module requires a weather file, +but does not actually require any weather data to run. Therefore, +reV currently generates an empty weather file to pass to SAM. This +behavior can be easily updated in the future should the SAM GETEM +module start using weather data.

+

See the PySAM Geothermal documentation +for the configuration keys required in the sam_sys_inputs config. +Some notable keys include (non-exhaustive):

+
+
    +
  • resource_type : Integer flag representing either +Hydrothermal (0) or EGS (1) resource. Only values of 0 or 1 +allowed.

  • +
  • resource_potential : Total resource potential at location +(in MW).

    +
    +

    Important

    +

    reV automatically sets the resource +potential to match the gross potential (see documentation +above), so this key should be left out of the config (it +will be overridden in any case).

    +
    +
  • +
  • resource_temp : Temperature of resource (in C).

    +
    +

    Important

    +

    This value is set by reV based on the +user’s geothermal resource data input. To override this +behavior, users may specify their own resource_temp +value (either a single value for all sites in the SAM +geothermal config or a site-dependent value in the project +points CSV). In this case, the resource temperature from +the input data will be ignored completely, and the +temperature at each location will be determined solely from +this input.

    +
    +
  • +
  • resource_depth : Depth to geothermal resource (in m).

  • +
  • analysis_type : Integer flag representing the plant +configuration. If the nameplate input is to be used to +specify the plant capacity, then this flag should be set to 0 +(this is the default reV assumption). Otherwise, if the +num_wells input is to be used to specify the plant site, +then this flag should be set to 1. Only values of 0 or 1 +allowed.

  • +
  • nameplate : Geothermal plant size (in kW). Only affects +the output if analysis_type=0.

    +
    +

    Important

    +

    Unlike wind or solar, reV geothermal +dynamically sets the size of a geothermal plant. In +particular, the plant capacity is set to match the resource +potential (obtained from the input data) for each site. For +this to work, users must leave out the nameplate +key from the SAM config.

    +

    Alternatively, users may specify their own nameplate +capacity value (either a single value for all sites in the +SAM geothermal config or a site-dependent value in the +project points CSV). In this case, the resource potential +from the input data will be ignored completely, and the +capacity at each location will be determined solely from +this input.

    +
    +
  • +
  • num_wells : Number of wells at each plant. This value is +used to determined plant capacity if analysis_type=1. +Otherwise this input has no effect.

  • +
  • num_wells_getem : Number of wells assumed at each plant +for power block calculations. Only affects power block outputs +if analysis_type=0 (otherwise the num_wells input is +used in power block calculations).

    +
    +

    Note

    +

    reV does not currently adjust this value based +on the resource input (as it probably should). If any +power block outputs are required in the future, there may +need to be extra development to set this value based on +the dynamically calculated plant size.

    +
    +
  • +
  • conversion_type : Integer flag representing the conversion +plant type. Either Binary (0) or Flash (1). Only values of 0 +or 1 allowed.

  • +
  • design_temp : EGS plant design temperature (in C). Only +affects EGS runs. If this value is set lower than the +resource temperature input, reV will adjust it to match +the latter in order to avoid SAM errors.

  • +
  • geotherm.cost.inj_prod_well_ratio : Fraction representing +the injection to production well ratio (0-1). SAM GUI defaults +to 0.5 for this value, but it is recommended to set this to +the GETEM default of 0.75.

  • +
+
+

You may also include the following reV-specific keys:

+
+
    +
  • num_confirmation_wells : Number of confirmation wells that +can also be used as production wells. This number is used to +determined to total number of wells required at each plant, +and therefore the total drilling costs. This value defaults to +2 (to match the SAM GUI as of 8/1/2023). However, the default +value can lead to negative costs if the plant size is small +(e.g. only 1 production well is needed, so the costs equal +-1 * drill_cost_per_well). This is a limitation of the +SAM calculations (as of 8/1/2023), and it is therefore useful +to set num_confirmation_wells=0 when performing reV +runs for small plant sizes.

  • +
  • capital_cost_per_kw : Capital cost values in $/kW. If +this value is specified in the config, reV calculates and +overrides the total capital_cost value based on the +geothermal plant size (capacity) at each location.

  • +
  • fixed_operating_cost : Fixed operating cost values in +$/kW. If this value is specified in the config, reV calculates +and overrides the total fixed_operating_cost value based +on the geothermal plant size (capacity) at each location.

  • +
  • drill_cost_per_well : Drilling cost per well, in $. If +this value is specified in the config, reV calculates the +total drilling costs based on the number of wells that need to +be drilled at each location. The drilling costs are added to +the total capital_cost at each location.

  • +
  • reV_outages : Specification for reV-scheduled +stochastic outage losses. For example:

    +
    outage_info = [
    +    {
    +        'count': 6,
    +        'duration': 24,
    +        'percentage_of_capacity_lost': 100,
    +        'allowed_months': ['January', 'March'],
    +        'allow_outage_overlap': True
    +    },
    +    {
    +        'count': 10,
    +        'duration': 1,
    +        'percentage_of_capacity_lost': 10,
    +        'allowed_months': ['January'],
    +        'allow_outage_overlap': False
    +    },
    +    ...
    +]
    +
    +
    +

    See the description of +add_scheduled_losses() +or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages_seed : Integer value used to seed the RNG +used to compute stochastic outage losses.

  • +
  • time_index_step : Integer representing the step size +used to sample the time_index in the resource data. +This can be used to reduce temporal resolution (i.e. for +30 minute NSRDB input data, time_index_step=1 yields +the full 30 minute time series as output, while +time_index_step=2 yields hourly output, and so forth).

  • +
+
+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default PySAM Geothermal object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation and possibility follow-on econ analysis.

set_resource_data(resource, meta)

Generate the weather file and set the path as an input.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

PYSAM_WEATHER_TAG

WF_META_DROP_COLS

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Geothermal' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Geothermal.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the executed default PySAM Geothermal object.

+
+
Returns:
+

PySAM.Geothermal

+
+
+
+ +
+
+cf_profile()[source]
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+assign_inputs()[source]
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+set_resource_data(resource, meta)[source]
+

Generate the weather file and set the path as an input.

+

The Geothermal PySAM model requires a data file, not raw data. +This method generates the weather data, writes it to a file on +disk, and then sets the file as an input to the Geothermal +generation module. The function +run_gen_and_econ() +deletes the file on disk after a run is complete.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Time series resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the +required variables to run the respective SAM simulation.

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, +elevation, and timezone.

  • +
+
+
+
+ +
+
+run_gen_and_econ()[source]
+

Run SAM generation and possibility follow-on econ analysis.

+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.LinearDirectSteam.html b/_autosummary/reV.SAM.generation.LinearDirectSteam.html new file mode 100644 index 000000000..b60248124 --- /dev/null +++ b/_autosummary/reV.SAM.generation.LinearDirectSteam.html @@ -0,0 +1,1204 @@ + + + + + + + reV.SAM.generation.LinearDirectSteam — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.LinearDirectSteam

+
+
+class LinearDirectSteam(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamGenerationFromWeatherFile

+

Process heat linear Fresnel direct steam generation

+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Calculate mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam linear Fresnel object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation and possibility follow-on econ analysis.

set_resource_data(resource, meta)

Generate the weather file and set the path as an input.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

PYSAM_WEATHER_TAG

WF_META_DROP_COLS

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.LinearFresnelDsgIph' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/LinearFresnelDsgIph.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+cf_mean()[source]
+

Calculate mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+static default()[source]
+

Get the executed default pysam linear Fresnel object.

+
+
Returns:
+

PySAM.LinearFresnelDsgIph

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation and possibility follow-on econ analysis.

+
+ +
+
+set_resource_data(resource, meta)
+

Generate the weather file and set the path as an input.

+

Some PySAM models require a data file, not raw data. This method +generates the weather data, writes it to a file on disk, and +then sets the file as an input to the generation module. The +function +run_gen_and_econ() +deletes the file on disk after a run is complete.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Time series resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the +required variables to run the respective SAM simulation. +Remapping will be done to convert typical NSRDB/WTK names +into SAM names (e.g. DNI -> dn and wind_speed -> windspeed).

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, +elevation, and timezone.

  • +
+
+
+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.MhkWave.html b/_autosummary/reV.SAM.generation.MhkWave.html new file mode 100644 index 000000000..c6f75cc25 --- /dev/null +++ b/_autosummary/reV.SAM.generation.MhkWave.html @@ -0,0 +1,1190 @@ + + + + + + + reV.SAM.generation.MhkWave — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.MhkWave

+
+
+class MhkWave(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamGeneration

+

Class for Wave generation from SAM

+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default PySAM MhkWave object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_resource_data(resource, meta)

Set Hindcast US Wave resource data arrays.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.MhkWave' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/MhkWave.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+set_resource_data(resource, meta)[source]
+

Set Hindcast US Wave resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation.

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+
+static default()[source]
+

Get the executed default PySAM MhkWave object.

+
+
Returns:
+

PySAM.MhkWave

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.PvSamv1.html b/_autosummary/reV.SAM.generation.PvSamv1.html new file mode 100644 index 000000000..78c1663c9 --- /dev/null +++ b/_autosummary/reV.SAM.generation.PvSamv1.html @@ -0,0 +1,1402 @@ + + + + + + + reV.SAM.generation.PvSamv1 — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.PvSamv1

+
+
+class PvSamv1(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamPv

+

Detailed PV model

+

Initialize a SAM solar object.

+

See the PySAM Pvwattsv8 (or older +version model) documentation for the configuration keys required +in the sam_sys_inputs config. You may also include the +following reV-specific keys:

+
+
    +
  • reV_outages : Specification for reV-scheduled +stochastic outage losses. For example:

    +
    outage_info = [
    +    {
    +        'count': 6,
    +        'duration': 24,
    +        'percentage_of_capacity_lost': 100,
    +        'allowed_months': ['January', 'March'],
    +        'allow_outage_overlap': True
    +    },
    +    {
    +        'count': 10,
    +        'duration': 1,
    +        'percentage_of_capacity_lost': 10,
    +        'allowed_months': ['January'],
    +        'allow_outage_overlap': False
    +    },
    +    ...
    +]
    +
    +
    +

    See the description of +add_scheduled_losses() +or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages_seed : Integer value used to seed the RNG +used to compute stochastic outage losses.

  • +
  • time_index_step : Integer representing the step size +used to sample the time_index in the resource data. +This can be used to reduce temporal resolution (i.e. for +30 minute NSRDB input data, time_index_step=1 yields +the full 30 minute time series as output, while +time_index_step=2 yields hourly output, and so forth).

    +
    +

    Note

    +

    The reduced data shape (i.e. after applying a +step size of time_index_step) must still be an +integer multiple of 8760, or the execution will +fail.

    +
    +
  • +
  • clearsky : Boolean flag value indicating wether +computation should use clearsky resource data to compute +generation data.

  • +
+
+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ac()

Get AC inverter power generation profile (local timezone) in kW.

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

agg_albedo(time_index, albedo)

Aggregate a timeseries of albedo data to monthly values w len 12 as required by pysam Pvsamv1

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_mean_ac()

Get mean AC capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

cf_profile_ac()

Get hourly AC capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

clipped_power()

Get the clipped DC power generated behind the inverter (local timezone) in kW.

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

dc()

Get DC array power generation profile (local timezone) in kW.

default()

Get the executed default pysam Pvsamv1 object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get AC inverter power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_latitude_tilt_az(sam_sys_inputs, meta)

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

set_resource_data(resource, meta)

Set NSRDB resource data arrays.

system_capacity_ac()

Get AC system capacity from SAM inputs.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Pvsamv1' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Pvsamv1.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+ac()[source]
+

Get AC inverter power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+dc()[source]
+

Get DC array power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of DC array power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static default()[source]
+

Get the executed default pysam Pvsamv1 object.

+
+
Returns:
+

PySAM.Pvsamv1

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+static agg_albedo(time_index, albedo)
+

Aggregate a timeseries of albedo data to monthly values w len 12 as +required by pysam Pvsamv1

+

Tech spec from pysam docs: +https://nrel-pysam.readthedocs.io/en/master/modules/Pvsamv1.html +#PySAM.Pvsamv1.Pvsamv1.SolarResource.albedo

+
+
Parameters:
+
    +
  • time_index (pd.DatetimeIndex) – Timeseries solar resource datetimeindex

  • +
  • albedo (list) – Timeseries Albedo data to be aggregated. Should be 0-1 and likely +hourly or less.

  • +
+
+
Returns:
+

monthly_albedo (list) – 1D list of monthly albedo values with length 12

+
+
+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

output (float) – Mean capacity factor (fractional). +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_mean_ac()
+

Get mean AC capacity factor (fractional) from SAM.

+

NOTE: This value only available in PVWattsV8 and up.

+
+
Returns:
+

output (float) – Mean AC capacity factor (fractional). +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_profile_ac()
+

Get hourly AC capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV AC capacity factor is the AC power production / the AC +nameplate. AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+clipped_power()
+

Get the clipped DC power generated behind the inverter +(local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

clipped (np.ndarray) – 1D array of clipped DC power in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get AC inverter power generation profile (local timezone) in kW. +This is an alias of the “ac” SAM output variable if PySAM version>=3. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+static set_latitude_tilt_az(sam_sys_inputs, meta)
+

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments. +If for a pv simulation the “tilt” parameter was originally not +present or set to ‘lat’ or ‘latitude’, the tilt will be set to +the absolute value of the latitude found in meta and the azimuth +will be 180 if lat>0, 0 if lat<0.

+
+
+
+ +
+
+set_resource_data(resource, meta)
+

Set NSRDB resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+

:raises ValueError : If lat/lon outside of -90 to 90 and -180 to 180,: respectively.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+system_capacity_ac()
+

Get AC system capacity from SAM inputs.

+

NOTE: AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (float) – AC nameplate = DC nameplate / ILR

+
+
+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.PvWattsv5.html b/_autosummary/reV.SAM.generation.PvWattsv5.html new file mode 100644 index 000000000..5661fc152 --- /dev/null +++ b/_autosummary/reV.SAM.generation.PvWattsv5.html @@ -0,0 +1,1402 @@ + + + + + + + reV.SAM.generation.PvWattsv5 — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.PvWattsv5

+
+
+class PvWattsv5(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamPv

+

Photovoltaic (PV) generation with pvwattsv5.

+

Initialize a SAM solar object.

+

See the PySAM Pvwattsv8 (or older +version model) documentation for the configuration keys required +in the sam_sys_inputs config. You may also include the +following reV-specific keys:

+
+
    +
  • reV_outages : Specification for reV-scheduled +stochastic outage losses. For example:

    +
    outage_info = [
    +    {
    +        'count': 6,
    +        'duration': 24,
    +        'percentage_of_capacity_lost': 100,
    +        'allowed_months': ['January', 'March'],
    +        'allow_outage_overlap': True
    +    },
    +    {
    +        'count': 10,
    +        'duration': 1,
    +        'percentage_of_capacity_lost': 10,
    +        'allowed_months': ['January'],
    +        'allow_outage_overlap': False
    +    },
    +    ...
    +]
    +
    +
    +

    See the description of +add_scheduled_losses() +or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages_seed : Integer value used to seed the RNG +used to compute stochastic outage losses.

  • +
  • time_index_step : Integer representing the step size +used to sample the time_index in the resource data. +This can be used to reduce temporal resolution (i.e. for +30 minute NSRDB input data, time_index_step=1 yields +the full 30 minute time series as output, while +time_index_step=2 yields hourly output, and so forth).

    +
    +

    Note

    +

    The reduced data shape (i.e. after applying a +step size of time_index_step) must still be an +integer multiple of 8760, or the execution will +fail.

    +
    +
  • +
  • clearsky : Boolean flag value indicating wether +computation should use clearsky resource data to compute +generation data.

  • +
+
+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ac()

Get AC inverter power generation profile (local timezone) in kW.

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

agg_albedo(time_index, albedo)

Aggregate a timeseries of albedo data to monthly values w len 12 as required by pysam Pvsamv1

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_mean_ac()

Get mean AC capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

cf_profile_ac()

Get hourly AC capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

clipped_power()

Get the clipped DC power generated behind the inverter (local timezone) in kW.

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

dc()

Get DC array power generation profile (local timezone) in kW.

default()

Get the executed default pysam PVWATTSV5 object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get AC inverter power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_latitude_tilt_az(sam_sys_inputs, meta)

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

set_resource_data(resource, meta)

Set NSRDB resource data arrays.

system_capacity_ac()

Get AC system capacity from SAM inputs.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Pvwattsv5' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Pvwattsv5.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the executed default pysam PVWATTSV5 object.

+
+
Returns:
+

PySAM.Pvwattsv5

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+ac()
+

Get AC inverter power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+static agg_albedo(time_index, albedo)
+

Aggregate a timeseries of albedo data to monthly values w len 12 as +required by pysam Pvsamv1

+

Tech spec from pysam docs: +https://nrel-pysam.readthedocs.io/en/master/modules/Pvsamv1.html +#PySAM.Pvsamv1.Pvsamv1.SolarResource.albedo

+
+
Parameters:
+
    +
  • time_index (pd.DatetimeIndex) – Timeseries solar resource datetimeindex

  • +
  • albedo (list) – Timeseries Albedo data to be aggregated. Should be 0-1 and likely +hourly or less.

  • +
+
+
Returns:
+

monthly_albedo (list) – 1D list of monthly albedo values with length 12

+
+
+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

output (float) – Mean capacity factor (fractional). +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_mean_ac()
+

Get mean AC capacity factor (fractional) from SAM.

+

NOTE: This value only available in PVWattsV8 and up.

+
+
Returns:
+

output (float) – Mean AC capacity factor (fractional). +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_profile_ac()
+

Get hourly AC capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV AC capacity factor is the AC power production / the AC +nameplate. AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+clipped_power()
+

Get the clipped DC power generated behind the inverter +(local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

clipped (np.ndarray) – 1D array of clipped DC power in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+dc()
+

Get DC array power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of DC array power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get AC inverter power generation profile (local timezone) in kW. +This is an alias of the “ac” SAM output variable if PySAM version>=3. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+static set_latitude_tilt_az(sam_sys_inputs, meta)
+

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments. +If for a pv simulation the “tilt” parameter was originally not +present or set to ‘lat’ or ‘latitude’, the tilt will be set to +the absolute value of the latitude found in meta and the azimuth +will be 180 if lat>0, 0 if lat<0.

+
+
+
+ +
+
+set_resource_data(resource, meta)
+

Set NSRDB resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+

:raises ValueError : If lat/lon outside of -90 to 90 and -180 to 180,: respectively.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+system_capacity_ac()
+

Get AC system capacity from SAM inputs.

+

NOTE: AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (float) – AC nameplate = DC nameplate / ILR

+
+
+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.PvWattsv7.html b/_autosummary/reV.SAM.generation.PvWattsv7.html new file mode 100644 index 000000000..9d4e54363 --- /dev/null +++ b/_autosummary/reV.SAM.generation.PvWattsv7.html @@ -0,0 +1,1402 @@ + + + + + + + reV.SAM.generation.PvWattsv7 — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.PvWattsv7

+
+
+class PvWattsv7(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamPv

+

Photovoltaic (PV) generation with pvwattsv7.

+

Initialize a SAM solar object.

+

See the PySAM Pvwattsv8 (or older +version model) documentation for the configuration keys required +in the sam_sys_inputs config. You may also include the +following reV-specific keys:

+
+
    +
  • reV_outages : Specification for reV-scheduled +stochastic outage losses. For example:

    +
    outage_info = [
    +    {
    +        'count': 6,
    +        'duration': 24,
    +        'percentage_of_capacity_lost': 100,
    +        'allowed_months': ['January', 'March'],
    +        'allow_outage_overlap': True
    +    },
    +    {
    +        'count': 10,
    +        'duration': 1,
    +        'percentage_of_capacity_lost': 10,
    +        'allowed_months': ['January'],
    +        'allow_outage_overlap': False
    +    },
    +    ...
    +]
    +
    +
    +

    See the description of +add_scheduled_losses() +or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages_seed : Integer value used to seed the RNG +used to compute stochastic outage losses.

  • +
  • time_index_step : Integer representing the step size +used to sample the time_index in the resource data. +This can be used to reduce temporal resolution (i.e. for +30 minute NSRDB input data, time_index_step=1 yields +the full 30 minute time series as output, while +time_index_step=2 yields hourly output, and so forth).

    +
    +

    Note

    +

    The reduced data shape (i.e. after applying a +step size of time_index_step) must still be an +integer multiple of 8760, or the execution will +fail.

    +
    +
  • +
  • clearsky : Boolean flag value indicating wether +computation should use clearsky resource data to compute +generation data.

  • +
+
+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ac()

Get AC inverter power generation profile (local timezone) in kW.

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

agg_albedo(time_index, albedo)

Aggregate a timeseries of albedo data to monthly values w len 12 as required by pysam Pvsamv1

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_mean_ac()

Get mean AC capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

cf_profile_ac()

Get hourly AC capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

clipped_power()

Get the clipped DC power generated behind the inverter (local timezone) in kW.

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

dc()

Get DC array power generation profile (local timezone) in kW.

default()

Get the executed default pysam PVWATTSV7 object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get AC inverter power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_latitude_tilt_az(sam_sys_inputs, meta)

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

set_resource_data(resource, meta)

Set NSRDB resource data arrays.

system_capacity_ac()

Get AC system capacity from SAM inputs.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Pvwattsv7' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Pvwattsv7.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the executed default pysam PVWATTSV7 object.

+
+
Returns:
+

PySAM.Pvwattsv7

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+ac()
+

Get AC inverter power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+static agg_albedo(time_index, albedo)
+

Aggregate a timeseries of albedo data to monthly values w len 12 as +required by pysam Pvsamv1

+

Tech spec from pysam docs: +https://nrel-pysam.readthedocs.io/en/master/modules/Pvsamv1.html +#PySAM.Pvsamv1.Pvsamv1.SolarResource.albedo

+
+
Parameters:
+
    +
  • time_index (pd.DatetimeIndex) – Timeseries solar resource datetimeindex

  • +
  • albedo (list) – Timeseries Albedo data to be aggregated. Should be 0-1 and likely +hourly or less.

  • +
+
+
Returns:
+

monthly_albedo (list) – 1D list of monthly albedo values with length 12

+
+
+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

output (float) – Mean capacity factor (fractional). +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_mean_ac()
+

Get mean AC capacity factor (fractional) from SAM.

+

NOTE: This value only available in PVWattsV8 and up.

+
+
Returns:
+

output (float) – Mean AC capacity factor (fractional). +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_profile_ac()
+

Get hourly AC capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV AC capacity factor is the AC power production / the AC +nameplate. AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+clipped_power()
+

Get the clipped DC power generated behind the inverter +(local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

clipped (np.ndarray) – 1D array of clipped DC power in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+dc()
+

Get DC array power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of DC array power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get AC inverter power generation profile (local timezone) in kW. +This is an alias of the “ac” SAM output variable if PySAM version>=3. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+static set_latitude_tilt_az(sam_sys_inputs, meta)
+

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments. +If for a pv simulation the “tilt” parameter was originally not +present or set to ‘lat’ or ‘latitude’, the tilt will be set to +the absolute value of the latitude found in meta and the azimuth +will be 180 if lat>0, 0 if lat<0.

+
+
+
+ +
+
+set_resource_data(resource, meta)
+

Set NSRDB resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+

:raises ValueError : If lat/lon outside of -90 to 90 and -180 to 180,: respectively.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+system_capacity_ac()
+

Get AC system capacity from SAM inputs.

+

NOTE: AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (float) – AC nameplate = DC nameplate / ILR

+
+
+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.PvWattsv8.html b/_autosummary/reV.SAM.generation.PvWattsv8.html new file mode 100644 index 000000000..597784895 --- /dev/null +++ b/_autosummary/reV.SAM.generation.PvWattsv8.html @@ -0,0 +1,1402 @@ + + + + + + + reV.SAM.generation.PvWattsv8 — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.PvWattsv8

+
+
+class PvWattsv8(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamPv

+

Photovoltaic (PV) generation with pvwattsv8.

+

Initialize a SAM solar object.

+

See the PySAM Pvwattsv8 (or older +version model) documentation for the configuration keys required +in the sam_sys_inputs config. You may also include the +following reV-specific keys:

+
+
    +
  • reV_outages : Specification for reV-scheduled +stochastic outage losses. For example:

    +
    outage_info = [
    +    {
    +        'count': 6,
    +        'duration': 24,
    +        'percentage_of_capacity_lost': 100,
    +        'allowed_months': ['January', 'March'],
    +        'allow_outage_overlap': True
    +    },
    +    {
    +        'count': 10,
    +        'duration': 1,
    +        'percentage_of_capacity_lost': 10,
    +        'allowed_months': ['January'],
    +        'allow_outage_overlap': False
    +    },
    +    ...
    +]
    +
    +
    +

    See the description of +add_scheduled_losses() +or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages_seed : Integer value used to seed the RNG +used to compute stochastic outage losses.

  • +
  • time_index_step : Integer representing the step size +used to sample the time_index in the resource data. +This can be used to reduce temporal resolution (i.e. for +30 minute NSRDB input data, time_index_step=1 yields +the full 30 minute time series as output, while +time_index_step=2 yields hourly output, and so forth).

    +
    +

    Note

    +

    The reduced data shape (i.e. after applying a +step size of time_index_step) must still be an +integer multiple of 8760, or the execution will +fail.

    +
    +
  • +
  • clearsky : Boolean flag value indicating wether +computation should use clearsky resource data to compute +generation data.

  • +
+
+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ac()

Get AC inverter power generation profile (local timezone) in kW.

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

agg_albedo(time_index, albedo)

Aggregate a timeseries of albedo data to monthly values w len 12 as required by pysam Pvsamv1

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_mean_ac()

Get mean AC capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

cf_profile_ac()

Get hourly AC capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

clipped_power()

Get the clipped DC power generated behind the inverter (local timezone) in kW.

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

dc()

Get DC array power generation profile (local timezone) in kW.

default()

Get the executed default pysam PVWATTSV8 object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get AC inverter power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_latitude_tilt_az(sam_sys_inputs, meta)

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

set_resource_data(resource, meta)

Set NSRDB resource data arrays.

system_capacity_ac()

Get AC system capacity from SAM inputs.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Pvwattsv8' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Pvwattsv8.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the executed default pysam PVWATTSV8 object.

+
+
Returns:
+

PySAM.Pvwattsv8

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+ac()
+

Get AC inverter power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+static agg_albedo(time_index, albedo)
+

Aggregate a timeseries of albedo data to monthly values w len 12 as +required by pysam Pvsamv1

+

Tech spec from pysam docs: +https://nrel-pysam.readthedocs.io/en/master/modules/Pvsamv1.html +#PySAM.Pvsamv1.Pvsamv1.SolarResource.albedo

+
+
Parameters:
+
    +
  • time_index (pd.DatetimeIndex) – Timeseries solar resource datetimeindex

  • +
  • albedo (list) – Timeseries Albedo data to be aggregated. Should be 0-1 and likely +hourly or less.

  • +
+
+
Returns:
+

monthly_albedo (list) – 1D list of monthly albedo values with length 12

+
+
+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

output (float) – Mean capacity factor (fractional). +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_mean_ac()
+

Get mean AC capacity factor (fractional) from SAM.

+

NOTE: This value only available in PVWattsV8 and up.

+
+
Returns:
+

output (float) – Mean AC capacity factor (fractional). +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV capacity factor is the AC power production / the DC nameplate

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV CF is calculated as AC power / DC nameplate.

+
+
+
+ +
+
+cf_profile_ac()
+

Get hourly AC capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+

NOTE: PV AC capacity factor is the AC power production / the AC +nameplate. AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval. +PV AC CF is calculated as AC power / AC nameplate.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+clipped_power()
+

Get the clipped DC power generated behind the inverter +(local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

clipped (np.ndarray) – 1D array of clipped DC power in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+dc()
+

Get DC array power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of DC array power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get AC inverter power generation profile (local timezone) in kW. +This is an alias of the “ac” SAM output variable if PySAM version>=3. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of AC inverter power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+static set_latitude_tilt_az(sam_sys_inputs, meta)
+

Check if tilt is specified as latitude and set tilt=lat, az=180 or 0

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments. +If for a pv simulation the “tilt” parameter was originally not +present or set to ‘lat’ or ‘latitude’, the tilt will be set to +the absolute value of the latitude found in meta and the azimuth +will be 180 if lat>0, 0 if lat<0.

+
+
+
+ +
+
+set_resource_data(resource, meta)
+

Set NSRDB resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+

:raises ValueError : If lat/lon outside of -90 to 90 and -180 to 180,: respectively.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+system_capacity_ac()
+

Get AC system capacity from SAM inputs.

+

NOTE: AC nameplate = DC nameplate / ILR

+
+
Returns:
+

cf_profile (float) – AC nameplate = DC nameplate / ILR

+
+
+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.SolarWaterHeat.html b/_autosummary/reV.SAM.generation.SolarWaterHeat.html new file mode 100644 index 000000000..8cb9d33ed --- /dev/null +++ b/_autosummary/reV.SAM.generation.SolarWaterHeat.html @@ -0,0 +1,1204 @@ + + + + + + + reV.SAM.generation.SolarWaterHeat — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.SolarWaterHeat

+
+
+class SolarWaterHeat(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamGenerationFromWeatherFile

+

Solar Water Heater generation

+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam swh object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation and possibility follow-on econ analysis.

set_resource_data(resource, meta)

Generate the weather file and set the path as an input.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

PYSAM_WEATHER_TAG

WF_META_DROP_COLS

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Swh' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Swh.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+static default()[source]
+

Get the executed default pysam swh object.

+
+
Returns:
+

PySAM.Swh

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation and possibility follow-on econ analysis.

+
+ +
+
+set_resource_data(resource, meta)
+

Generate the weather file and set the path as an input.

+

Some PySAM models require a data file, not raw data. This method +generates the weather data, writes it to a file on disk, and +then sets the file as an input to the generation module. The +function +run_gen_and_econ() +deletes the file on disk after a run is complete.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Time series resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the +required variables to run the respective SAM simulation. +Remapping will be done to convert typical NSRDB/WTK names +into SAM names (e.g. DNI -> dn and wind_speed -> windspeed).

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, +elevation, and timezone.

  • +
+
+
+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.TcsMoltenSalt.html b/_autosummary/reV.SAM.generation.TcsMoltenSalt.html new file mode 100644 index 000000000..67f6309b4 --- /dev/null +++ b/_autosummary/reV.SAM.generation.TcsMoltenSalt.html @@ -0,0 +1,1218 @@ + + + + + + + reV.SAM.generation.TcsMoltenSalt — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.TcsMoltenSalt

+
+
+class TcsMoltenSalt(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamSolar

+

Concentrated Solar Power (CSP) generation with tower molten salt

+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

agg_albedo(time_index, albedo)

Aggregate a timeseries of albedo data to monthly values w len 12 as required by pysam Pvsamv1

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get absolute value hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam CSP object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_resource_data(resource, meta)

Set NSRDB resource data arrays.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.TcsmoltenSalt' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/TcsmoltenSalt.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+cf_profile()[source]
+

Get absolute value hourly capacity factor (frac) profile in +local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static default()[source]
+

Get the executed default pysam CSP object.

+
+
Returns:
+

PySAM.TcsmoltenSalt

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+static agg_albedo(time_index, albedo)
+

Aggregate a timeseries of albedo data to monthly values w len 12 as +required by pysam Pvsamv1

+

Tech spec from pysam docs: +https://nrel-pysam.readthedocs.io/en/master/modules/Pvsamv1.html +#PySAM.Pvsamv1.Pvsamv1.SolarResource.albedo

+
+
Parameters:
+
    +
  • time_index (pd.DatetimeIndex) – Timeseries solar resource datetimeindex

  • +
  • albedo (list) – Timeseries Albedo data to be aggregated. Should be 0-1 and likely +hourly or less.

  • +
+
+
Returns:
+

monthly_albedo (list) – 1D list of monthly albedo values with length 12

+
+
+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+set_resource_data(resource, meta)
+

Set NSRDB resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.TroughPhysicalHeat.html b/_autosummary/reV.SAM.generation.TroughPhysicalHeat.html new file mode 100644 index 000000000..3fc2d02a9 --- /dev/null +++ b/_autosummary/reV.SAM.generation.TroughPhysicalHeat.html @@ -0,0 +1,1204 @@ + + + + + + + reV.SAM.generation.TroughPhysicalHeat — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.TroughPhysicalHeat

+
+
+class TroughPhysicalHeat(resource, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None, drop_leap=False)[source]
+

Bases: AbstractSamGenerationFromWeatherFile

+

Trough Physical Process Heat generation

+

Initialize a SAM generation object.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Calculate mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam trough object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation and possibility follow-on econ analysis.

set_resource_data(resource, meta)

Generate the weather file and set the path as an input.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

PYSAM_WEATHER_TAG

WF_META_DROP_COLS

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.TroughPhysicalProcessHeat' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/TroughPhysicalProcessHeat.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+cf_mean()[source]
+

Calculate mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+static default()[source]
+

Get the executed default pysam trough object.

+
+
Returns:
+

PySAM.TroughPhysicalProcessHeat

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation and possibility follow-on econ analysis.

+
+ +
+
+set_resource_data(resource, meta)
+

Generate the weather file and set the path as an input.

+

Some PySAM models require a data file, not raw data. This method +generates the weather data, writes it to a file on disk, and +then sets the file as an input to the generation module. The +function +run_gen_and_econ() +deletes the file on disk after a run is complete.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Time series resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the +required variables to run the respective SAM simulation. +Remapping will be done to convert typical NSRDB/WTK names +into SAM names (e.g. DNI -> dn and wind_speed -> windspeed).

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, +elevation, and timezone.

  • +
+
+
+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.WindPower.html b/_autosummary/reV.SAM.generation.WindPower.html new file mode 100644 index 000000000..a8547bf83 --- /dev/null +++ b/_autosummary/reV.SAM.generation.WindPower.html @@ -0,0 +1,1320 @@ + + + + + + + reV.SAM.generation.WindPower — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.WindPower

+
+
+class WindPower(*args, **kwargs)[source]
+

Bases: AbstractSamWind

+

Class for Wind generation from SAM

+

Wind generation from SAM.

+

See the PySAM Windpower +documentation for the configuration keys required in the +sam_sys_inputs config. You may also include the following +reV-specific keys:

+
+
    +
  • reV_power_curve_losses : A dictionary that can be used +to initialize +PowerCurveLossesInput. +For example:

    +
    reV_power_curve_losses = {
    +    'target_losses_percent': 9.8,
    +    'transformation': 'exponential_stretching'
    +}
    +
    +
    +

    See the description of the class mentioned above or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages : Specification for reV-scheduled +stochastic outage losses. For example:

    +
    outage_info = [
    +    {
    +        'count': 6,
    +        'duration': 24,
    +        'percentage_of_capacity_lost': 100,
    +        'allowed_months': ['January', 'March'],
    +        'allow_outage_overlap': True
    +    },
    +    {
    +        'count': 10,
    +        'duration': 1,
    +        'percentage_of_capacity_lost': 10,
    +        'allowed_months': ['January'],
    +        'allow_outage_overlap': False
    +    },
    +    ...
    +]
    +
    +
    +

    See the description of +add_scheduled_losses() +or the +reV losses demo notebook +for detailed instructions on how to specify this input.

    +
  • +
  • reV_outages_seed : Integer value used to seed the RNG +used to compute stochastic outage losses.

  • +
  • time_index_step : Integer representing the step size +used to sample the time_index in the resource data. +This can be used to reduce temporal resolution (i.e. for +30 minute input data, time_index_step=1 yields the +full 30 minute time series as output, while +time_index_step=2 yields hourly output, and so forth).

    +
    +

    Note

    +

    The reduced data shape (i.e. after applying a +step size of time_index_step) must still be +an integer multiple of 8760, or the execution +will fail.

    +
    +
  • +
+
+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_power_curve_losses()

Adjust power curve in SAM config file to account for losses.

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam WindPower object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_resource_data(resource, meta)

Set WTK resource data arrays.

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

wind_resource_from_input()

Collect wind resource and weights from inputs.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

POWER_CURVE_CONFIG_KEY

Specify power curve loss target in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

input_power_curve

Original power curve for site.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Windpower' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Windpower.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+set_resource_data(resource, meta)[source]
+

Set WTK resource data arrays.

+
+
Parameters:
+
    +
  • resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

  • +
  • meta (pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
+
+ +
+
+static default()[source]
+

Get the executed default pysam WindPower object.

+
+
Returns:
+

PySAM.Windpower

+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+POWER_CURVE_CONFIG_KEY = 'reV_power_curve_losses'
+

Specify power curve loss target in the config file using this key.

+
+ +
+
+add_power_curve_losses()
+

Adjust power curve in SAM config file to account for losses.

+

This function reads the information in the +reV_power_curve_losses key of the sam_sys_inputs +dictionary and computes a new power curve that accounts for the +loss percentage specified from that input. If no power curve +loss info is specified in sam_sys_inputs, the power curve +will not be adjusted.

+
+

See also

+
+
adjust_power_curve()

Power curve shift calculation.

+
+
+
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+property input_power_curve
+

Original power curve for site.

+
+
Type:
+

PowerCurve

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+
+wind_resource_from_input()
+

Collect wind resource and weights from inputs.

+
+
Returns:
+

PowerCurveWindResource – Wind resource used to compute power curve shift.

+
+
Raises:
+

reVLossesValueError – If power curve losses are not compatible with the + ‘wind_resource_model_choice’.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.WindPowerPD.html b/_autosummary/reV.SAM.generation.WindPowerPD.html new file mode 100644 index 000000000..8b48f2b26 --- /dev/null +++ b/_autosummary/reV.SAM.generation.WindPowerPD.html @@ -0,0 +1,1252 @@ + + + + + + + reV.SAM.generation.WindPowerPD — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation.WindPowerPD

+
+
+class WindPowerPD(ws_edges, wd_edges, wind_dist, meta, sam_sys_inputs, site_sys_inputs=None, output_request=None)[source]
+

Bases: AbstractSamGeneration, PowerCurveLossesMixin

+

WindPower analysis with wind speed/direction joint probabilty +distrubtion input

+

Initialize a SAM generation object for windpower with a +speed/direction joint probability distribution.

+
+
Parameters:
+
    +
  • ws_edges (np.ndarray) – 1D array of windspeed (m/s) values that set the bin edges for the +wind probability distribution. Same len as wind_dist.shape[0] + 1

  • +
  • wd_edges (np.ndarray) – 1D array of winddirections (deg) values that set the bin edges +for the wind probability dist. Same len as wind_dist.shape[1] + 1

  • +
  • wind_dist (np.ndarray) – 2D array probability distribution of (windspeed, winddirection).

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • output_request (list) – Requested SAM outputs (e.g., ‘cf_mean’, ‘annual_energy’, +‘cf_profile’, ‘gen_profile’, ‘energy_yield’, ‘ppa_price’, +‘lcoe_fcr’).

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_power_curve_losses()

Adjust power curve in SAM config file to account for losses.

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

annual_energy()

Get annual energy generation value in kWh from SAM.

assign_inputs()

Assign the self.sam_sys_inputs attribute to the PySAM object.

cf_mean()

Get mean capacity factor (fractional) from SAM.

cf_profile()

Get hourly capacity factor (frac) profile in local timezone.

check_resource_data(resource)

Check resource dataframe for NaN values

collect_outputs([output_lookup])

Collect SAM output_request, convert timeseries outputs to UTC, and save outputs to self.outputs property.

default()

Get the executed default pysam object.

drop_leap(resource)

Drop Feb 29th from resource df with time index.

energy_yield()

Get annual energy yield value in kwh/kw from SAM.

ensure_res_len(arr, time_index)

Ensure time_index has a constant time-step and only covers 365 days (no leap days).

execute()

Call the PySAM execute method.

gen_profile()

Get power generation profile (local timezone) in kW.

get_sam_res(*args, **kwargs)

Get the SAM resource iterator object (single year, single file).

get_time_interval(time_index)

Get the time interval.

make_datetime(series)

Ensure that pd series is a datetime series with dt accessor

outputs_to_utc_arr()

Convert array-like SAM outputs to UTC np.ndarrays

reV_run(points_control, res_file, site_df[, ...])

Execute SAM generation based on a reV points control instance.

run()

Run a reV-SAM generation object by assigning inputs, executing the SAM simulation, collecting outputs, and converting all arrays to UTC.

run_gen_and_econ()

Run SAM generation with possibility for follow on econ analysis.

set_resource_data(ws_edges, wd_edges, wind_dist)

Send wind PD to pysam

tz_elev_check(sam_sys_inputs, ...)

Check timezone+elevation input and use json config timezone+elevation if not in resource meta.

wind_resource_from_input()

Collect wind resource and weights from inputs.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DIR

IGNORE_ATTRS

MODULE

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

POWER_CURVE_CONFIG_KEY

Specify power curve loss target in the config file using this key.

attr_dict

Get the heirarchical PySAM object attribute dictionary.

has_timezone

Returns true if instance has a timezone set

input_list

Get the list of lowest level input attribute/variable names.

input_power_curve

Original power curve for site.

meta

Get meta data property.

module

Get module property.

outage_seed

A value to use as the seed for the outage losses.

pysam

Get the pysam object.

site

Get the site number for this SAM simulation.

+
+
+PYSAM = <module 'PySAM.Windpower' from '/opt/hostedtoolcache/Python/3.8.18/x64/lib/python3.8/site-packages/PySAM/Windpower.cpython-38-x86_64-linux-gnu.so'>
+
+ +
+
+set_resource_data(ws_edges, wd_edges, wind_dist)[source]
+

Send wind PD to pysam

+
+
Parameters:
+
    +
  • ws_edges (np.ndarray) – 1D array of windspeed (m/s) values that set the bin edges for the +wind probability distribution. Same len as wind_dist.shape[0] + 1

  • +
  • wd_edges (np.ndarray) – 1D array of winddirections (deg) values that set the bin edges +for the wind probability dist. Same len as wind_dist.shape[1] + 1

  • +
  • wind_dist (np.ndarray) – 2D array probability distribution of (windspeed, winddirection).

  • +
+
+
+
+ +
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+POWER_CURVE_CONFIG_KEY = 'reV_power_curve_losses'
+

Specify power curve loss target in the config file using this key.

+
+ +
+
+add_power_curve_losses()
+

Adjust power curve in SAM config file to account for losses.

+

This function reads the information in the +reV_power_curve_losses key of the sam_sys_inputs +dictionary and computes a new power curve that accounts for the +loss percentage specified from that input. If no power curve +loss info is specified in sam_sys_inputs, the power curve +will not be adjusted.

+
+

See also

+
+
adjust_power_curve()

Power curve shift calculation.

+
+
+
+
+ +
+
+add_scheduled_losses(resource=None)
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+annual_energy()
+

Get annual energy generation value in kWh from SAM.

+
+
Returns:
+

output (float) – Annual energy generation (kWh).

+
+
+
+ +
+
+assign_inputs()
+

Assign the self.sam_sys_inputs attribute to the PySAM object.

+
+ +
+
+property attr_dict
+

Get the heirarchical PySAM object attribute dictionary.

+
+
Returns:
+

_attr_dict (dict) –

+
+
Dictionary with:

keys: variable groups +values: lowest level attribute/variable names

+
+
+

+
+
+
+ +
+
+cf_mean()
+

Get mean capacity factor (fractional) from SAM.

+
+
Returns:
+

output (float) – Mean capacity factor (fractional).

+
+
+
+ +
+
+cf_profile()
+

Get hourly capacity factor (frac) profile in local timezone. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

cf_profile (np.ndarray) – 1D numpy array of capacity factor profile. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+check_resource_data(resource)
+

Check resource dataframe for NaN values

+
+
Parameters:
+

resource (pd.DataFrame) – Timeseries solar or wind resource data for a single location with a +pandas DatetimeIndex. There must be columns for all the required +variables to run the respective SAM simulation. Remapping will be +done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> +dn and wind_speed -> windspeed)

+
+
+
+ +
+
+collect_outputs(output_lookup=None)
+

Collect SAM output_request, convert timeseries outputs to UTC, and +save outputs to self.outputs property.

+
+
Parameters:
+

output_lookup (dict | None) – Lookup dictionary mapping output keys to special output methods. +None defaults to generation default outputs.

+
+
+
+ +
+
+classmethod default()
+

Get the executed default pysam object.

+
+
Returns:
+

PySAM.GenericSystem

+
+
+
+ +
+
+static drop_leap(resource)
+

Drop Feb 29th from resource df with time index.

+
+
Parameters:
+

resource (pd.DataFrame) – Resource dataframe with an index containing a pandas +time index object with month and day attributes.

+
+
Returns:
+

resource (pd.DataFrame) – Resource dataframe with all February 29th timesteps removed.

+
+
+
+ +
+
+energy_yield()
+

Get annual energy yield value in kwh/kw from SAM.

+
+
Returns:
+

output (float) – Annual energy yield (kwh/kw).

+
+
+
+ +
+
+static ensure_res_len(arr, time_index)
+

Ensure time_index has a constant time-step and only covers 365 days +(no leap days). If not remove last day

+
+
Parameters:
+
    +
  • arr (ndarray) – Array to truncate if time_index has a leap day

  • +
  • time_index (pandas.DatatimeIndex) – Time index associated with arr, used to check time-series +frequency and number of days

  • +
+
+
Returns:
+

arr (ndarray) – Truncated array of data such that there are 365 days

+
+
+
+ +
+
+execute()
+

Call the PySAM execute method. Raise SAMExecutionError if error. +Include the site index if available.

+
+ +
+
+gen_profile()
+

Get power generation profile (local timezone) in kW. +See self.outputs attribute for collected output data in UTC.

+
+
Returns:
+

output (np.ndarray) – 1D array of hourly power generation in kW. +Datatype is float32 and array length is 8760*time_interval.

+
+
+
+ +
+
+static get_sam_res(*args, **kwargs)
+

Get the SAM resource iterator object (single year, single file).

+
+ +
+
+classmethod get_time_interval(time_index)
+

Get the time interval.

+
+
Parameters:
+

time_index (pd.series) – Datetime series. Must have a dt attribute to access datetime +properties (added using make_datetime method).

+
+
Returns:
+

time_interval (int:) – This value is the number of indices over which an hour is counted. +So if the timestep is 0.5 hours, time_interval is 2.

+
+
+
+ +
+
+property has_timezone
+

Returns true if instance has a timezone set

+
+ +
+
+property input_list
+

Get the list of lowest level input attribute/variable names.

+
+
Returns:
+

_inputs (list) – List of lowest level input attributes.

+
+
+
+ +
+
+property input_power_curve
+

Original power curve for site.

+
+
Type:
+

PowerCurve

+
+
+
+ +
+
+static make_datetime(series)
+

Ensure that pd series is a datetime series with dt accessor

+
+ +
+
+property meta
+

Get meta data property.

+
+ +
+
+property module
+

Get module property.

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+
+outputs_to_utc_arr()
+

Convert array-like SAM outputs to UTC np.ndarrays

+
+ +
+
+property pysam
+

Get the pysam object.

+
+ +
+
+classmethod reV_run(points_control, res_file, site_df, lr_res_file=None, output_request=('cf_mean',), drop_leap=False, gid_map=None, nn_map=None, bias_correct=None)
+

Execute SAM generation based on a reV points control instance.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • res_file (str) – Resource file with full path.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • lr_res_file (str | None) – Optional low resolution resource file that will be dynamically +mapped+interpolated to the nominal-resolution res_file. This +needs to be of the same format as resource_file, e.g. they both +need to be handled by the same rex Resource handler such as +WindResource

  • +
  • output_request (list | tuple) – Outputs to retrieve from SAM.

  • +
  • drop_leap (bool) – Drops February 29th from the resource data. If False, December +31st is dropped from leap years.

  • +
  • gid_map (None | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This enables the user to input unique +generation gids in the project points that map to non-unique +resource gids. This can be None or a pre-extracted dict.

  • +
  • nn_map (np.ndarray) – Optional 1D array of nearest neighbor mappings associated with the +res_file to lr_res_file spatial mapping. For details on this +argument, see the rex.MultiResolutionResource docstring.

  • +
  • bias_correct (None | pd.DataFrame) – None if not provided or extracted DataFrame with wind or solar +resource bias correction table. This has columns: gid (can be index +name), adder, scalar. The gid field should match the true resource +gid regardless of the optional gid_map input. If both adder and +scalar are present, the wind or solar resource is corrected by +(res*scalar)+adder. If either adder or scalar is not present, +scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are +corrected depending on the technology. GHI and DNI are corrected +with the same correction factors.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+
+run()
+

Run a reV-SAM generation object by assigning inputs, executing the +SAM simulation, collecting outputs, and converting all arrays to UTC.

+
+ +
+
+run_gen_and_econ()
+

Run SAM generation with possibility for follow on econ analysis.

+
+ +
+
+property site
+

Get the site number for this SAM simulation.

+
+ +
+
+static tz_elev_check(sam_sys_inputs, site_sys_inputs, meta)
+

Check timezone+elevation input and use json config +timezone+elevation if not in resource meta.

+
+
Parameters:
+
    +
  • sam_sys_inputs (dict) – Site-agnostic SAM system model inputs arguments.

  • +
  • site_sys_inputs (dict) – Optional set of site-specific SAM system inputs to complement the +site-agnostic inputs.

  • +
  • meta (pd.DataFrame | pd.Series) – Meta data corresponding to the resource input for the single +location. Should include values for latitude, longitude, elevation, +and timezone.

  • +
+
+
Returns:
+

meta (pd.DataFrame | pd.Series) – Datafram or series for a single site. Will include “timezone” +and “elevation” from the sam and site system inputs if found.

+
+
+
+ +
+
+wind_resource_from_input()
+

Collect wind resource and weights from inputs.

+
+
Returns:
+

PowerCurveWindResource – Wind resource used to compute power curve shift.

+
+
Raises:
+

reVLossesValueError – If power curve losses are not compatible with the + ‘wind_resource_model_choice’.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.generation.html b/_autosummary/reV.SAM.generation.html new file mode 100644 index 000000000..e00513aa8 --- /dev/null +++ b/_autosummary/reV.SAM.generation.html @@ -0,0 +1,689 @@ + + + + + + + reV.SAM.generation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.generation

+

reV-to-SAM generation interface module.

+

Wraps the NREL-PySAM pvwattsv5, windpower, and tcsmolensalt modules with +additional reV features.

+

Classes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

AbstractSamGeneration(resource, meta, ...[, ...])

Base class for SAM generation simulations.

AbstractSamGenerationFromWeatherFile(...[, ...])

Base class for running sam generation with a weather file on disk.

AbstractSamPv(resource, meta, sam_sys_inputs)

Photovoltaic (PV) generation with either pvwatts of detailed pv.

AbstractSamSolar(resource, meta, sam_sys_inputs)

Base Class for Solar generation from SAM

AbstractSamWind(*args, **kwargs)

Wind generation from SAM.

Geothermal(resource, meta, sam_sys_inputs[, ...])

reV-SAM geothermal generation.

LinearDirectSteam(resource, meta, sam_sys_inputs)

Process heat linear Fresnel direct steam generation

MhkWave(resource, meta, sam_sys_inputs[, ...])

Class for Wave generation from SAM

PvSamv1(resource, meta, sam_sys_inputs[, ...])

Detailed PV model

PvWattsv5(resource, meta, sam_sys_inputs[, ...])

Photovoltaic (PV) generation with pvwattsv5.

PvWattsv7(resource, meta, sam_sys_inputs[, ...])

Photovoltaic (PV) generation with pvwattsv7.

PvWattsv8(resource, meta, sam_sys_inputs[, ...])

Photovoltaic (PV) generation with pvwattsv8.

SolarWaterHeat(resource, meta, sam_sys_inputs)

Solar Water Heater generation

TcsMoltenSalt(resource, meta, sam_sys_inputs)

Concentrated Solar Power (CSP) generation with tower molten salt

TroughPhysicalHeat(resource, meta, ...[, ...])

Trough Physical Process Heat generation

WindPower(*args, **kwargs)

Class for Wind generation from SAM

WindPowerPD(ws_edges, wd_edges, wind_dist, ...)

WindPower analysis with wind speed/direction joint probabilty distrubtion input

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.html b/_autosummary/reV.SAM.html new file mode 100644 index 000000000..92fd4bf8f --- /dev/null +++ b/_autosummary/reV.SAM.html @@ -0,0 +1,652 @@ + + + + + + + reV.SAM — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM

+

reV-SAM interface module

+ + + + + + + + + + + + + + + + + + + + + +

reV.SAM.SAM

reV-to-SAM interface module.

reV.SAM.defaults

PySAM default implementations.

reV.SAM.econ

reV-to-SAM econ interface module.

reV.SAM.generation

reV-to-SAM generation interface module.

reV.SAM.version_checker

Module to check PySAM versions and correct input keys to new SAM 2 keys.

reV.SAM.windbos

SAM Wind Balance of System Cost Model

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.version_checker.PySamVersionChecker.html b/_autosummary/reV.SAM.version_checker.PySamVersionChecker.html new file mode 100644 index 000000000..b8e1c4320 --- /dev/null +++ b/_autosummary/reV.SAM.version_checker.PySamVersionChecker.html @@ -0,0 +1,690 @@ + + + + + + + reV.SAM.version_checker.PySamVersionChecker — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.version_checker.PySamVersionChecker

+
+
+class PySamVersionChecker(requirement='2')[source]
+

Bases: object

+

Check the PySAM version and modify input keys if required.

+
+
Parameters:
+

requirement (str) – PySAM version requirement.

+
+
+

Methods

+ + + + + + +

run(tech, parameters)

Run PySAM version and inputs checker and modify keys to reflect PySAM 2 updates.

+

Attributes

+ + + + + + + + + + + + +

V2_CORRECTION_KEYS

WIND

pysam_version

Get the PySAM distribution version

+
+
+property pysam_version
+

Get the PySAM distribution version

+
+ +
+
+classmethod run(tech, parameters)[source]
+

Run PySAM version and inputs checker and modify keys to reflect +PySAM 2 updates.

+
+
Parameters:
+
    +
  • tech (str) – reV-SAM technology string and key to the V2_CORRECTION_KEYS dict

  • +
  • parameters (dict) – SAM input dictionary. Will be checked for valid keys if +PySAM version > 2.

  • +
+
+
Returns:
+

parameters (dict) – Updated input parameters dictionary

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.version_checker.html b/_autosummary/reV.SAM.version_checker.html new file mode 100644 index 000000000..641b96d41 --- /dev/null +++ b/_autosummary/reV.SAM.version_checker.html @@ -0,0 +1,641 @@ + + + + + + + reV.SAM.version_checker — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.version_checker

+

Module to check PySAM versions and correct input keys to new SAM 2 keys.

+

Created on Mon Feb 3 14:40:42 2020

+

@author: gbuster

+

Classes

+ + + + + + +

PySamVersionChecker([requirement])

Check the PySAM version and modify input keys if required.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.windbos.WindBos.html b/_autosummary/reV.SAM.windbos.WindBos.html new file mode 100644 index 000000000..0a7e95a49 --- /dev/null +++ b/_autosummary/reV.SAM.windbos.WindBos.html @@ -0,0 +1,789 @@ + + + + + + + reV.SAM.windbos.WindBos — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.windbos.WindBos

+
+
+class WindBos(inputs)[source]
+

Bases: object

+

Wind Balance of System Cost Model.

+
+
Parameters:
+

inputs (dict) – SAM key value pair inputs.

+
+
+

Methods

+ + + + + + +

reV_run(points_control, site_df[, ...])

Execute SAM SingleOwner simulations based on reV points control.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

KEYS

MODULE

bos_cost

Get the balance of system cost ($).

hub_height

Turbine hub height.

machine_rating

Single turbine machine rating either from input or power curve.

number_of_turbines

Number of turbines either based on input or system (farm) capacity and machine rating

output

Get a dictionary containing the cost breakdown.

rotor_diameter

Turbine rotor diameter.

sales_tax_cost

Get the cost of sales tax ($).

sales_tax_mult

Get a sales tax multiplier (frac of the total installed cost).

total_installed_cost

Get the total installed cost ($) (bos + turbine).

turbine_capital_cost

Returns zero (no turbine capital cost for WindBOS input, and assigns any input turbine_capital_cost to an attr

turbine_cost

Get the turbine cost ($).

+
+
+property machine_rating
+

Single turbine machine rating either from input or power curve.

+
+ +
+
+property hub_height
+

Turbine hub height.

+
+ +
+
+property rotor_diameter
+

Turbine rotor diameter.

+
+ +
+
+property number_of_turbines
+

Number of turbines either based on input or system (farm) capacity +and machine rating

+
+ +
+
+property turbine_capital_cost
+

Returns zero (no turbine capital cost for WindBOS input, +and assigns any input turbine_capital_cost to an attr

+
+ +
+
+property bos_cost
+

Get the balance of system cost ($).

+
+ +
+
+property turbine_cost
+

Get the turbine cost ($).

+
+ +
+
+property sales_tax_mult
+

Get a sales tax multiplier (frac of the total installed cost).

+
+ +
+
+property sales_tax_cost
+

Get the cost of sales tax ($).

+
+ +
+
+property total_installed_cost
+

Get the total installed cost ($) (bos + turbine).

+
+ +
+
+property output
+

Get a dictionary containing the cost breakdown.

+
+ +
+
+classmethod reV_run(points_control, site_df, output_request=('total_installed_cost',), **kwargs)[source]
+

Execute SAM SingleOwner simulations based on reV points control.

+
+
Parameters:
+
    +
  • points_control (config.PointsControl) – PointsControl instance containing project points site and SAM +config info.

  • +
  • site_df (pd.DataFrame) – Dataframe of site-specific input variables. Row index corresponds +to site number/gid (via df.loc not df.iloc), column labels are the +variable keys that will be passed forward as SAM parameters.

  • +
  • output_request (list | tuple | str) – Output(s) to retrieve from SAM.

  • +
  • kwargs (dict) – Not used but maintained for polymorphic calls with other +SAM econ reV_run() methods (lcoe and single owner). +Breaks pylint error W0613: unused argument.

  • +
+
+
Returns:
+

out (dict) – Nested dictionaries where the top level key is the site index, +the second level key is the variable name, second level value is +the output variable value.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.SAM.windbos.html b/_autosummary/reV.SAM.windbos.html new file mode 100644 index 000000000..55cc44521 --- /dev/null +++ b/_autosummary/reV.SAM.windbos.html @@ -0,0 +1,639 @@ + + + + + + + reV.SAM.windbos — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.SAM.windbos

+

SAM Wind Balance of System Cost Model

+

Classes

+ + + + + + +

WindBos(inputs)

Wind Balance of System Cost Model.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.bespoke.BespokeMultiPlantData.html b/_autosummary/reV.bespoke.bespoke.BespokeMultiPlantData.html new file mode 100644 index 000000000..a144b7004 --- /dev/null +++ b/_autosummary/reV.bespoke.bespoke.BespokeMultiPlantData.html @@ -0,0 +1,679 @@ + + + + + + + reV.bespoke.bespoke.BespokeMultiPlantData — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.bespoke.BespokeMultiPlantData

+
+
+class BespokeMultiPlantData(res_fpath, sc_gid_to_hh, sc_gid_to_res_gid)[source]
+

Bases: object

+

Multi-plant preloaded data.

+

This object is intended to facilitate the use of pre-loaded data for +running BespokeWindPlants on systems with slow parallel +reads to a single HDF5 file.

+

Initialize BespokeMultiPlantData

+
+
Parameters:
+
    +
  • res_fpath (str) – Path to resource h5 file.

  • +
  • sc_gid_to_hh (dict) – Dictionary mapping SC GID values to hub-heights. Data for +each SC GID will be pulled for the corresponding hub-height +given in this dictionary.

  • +
  • sc_gid_to_res_gid (dict) – Dictionary mapping SC GID values to an iterable oif resource +GID values. Resource GID values should correspond to GID +values in teh HDF5 file, so any GID map must be applied +before initializing :class`BespokeMultiPlantData`.

  • +
+
+
+

Methods

+ + + + + + +

get_preloaded_data_for_gid(sc_gid)

Get the pre-loaded data for a single SC GID.

+
+
+get_preloaded_data_for_gid(sc_gid)[source]
+

Get the pre-loaded data for a single SC GID.

+
+
Parameters:
+

sc_gid (int) – SC GID to load resource data for.

+
+
Returns:
+

BespokeSinglePlantData – A loaded BespokeSinglePlantData object that can act as +an HDF5 handler stand-in for this SC GID only.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.bespoke.BespokeSinglePlant.html b/_autosummary/reV.bespoke.bespoke.BespokeSinglePlant.html new file mode 100644 index 000000000..366e99ca1 --- /dev/null +++ b/_autosummary/reV.bespoke.bespoke.BespokeSinglePlant.html @@ -0,0 +1,1189 @@ + + + + + + + reV.bespoke.bespoke.BespokeSinglePlant — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.bespoke.BespokeSinglePlant

+
+
+class BespokeSinglePlant(gid, excl, res, tm_dset, sam_sys_inputs, objective_function, capital_cost_function, fixed_operating_cost_function, variable_operating_cost_function, min_spacing='5x', wake_loss_multiplier=1, ga_kwargs=None, output_request=('system_capacity', 'cf_mean'), ws_bins=(0.0, 20.0, 5.0), wd_bins=(0.0, 360.0, 45.0), excl_dict=None, inclusion_mask=None, data_layers=None, resolution=64, excl_area=None, exclusion_shape=None, eos_mult_baseline_cap_mw=200, prior_meta=None, gid_map=None, bias_correct=None, pre_loaded_data=None, close=True)[source]
+

Bases: object

+

Framework for analyzing and optimized a wind plant layout specific to +the local wind resource and exclusions for a single reV supply curve point.

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • res (str | Resource) – Filepath to .h5 wind resource file or pre-initialized Resource +handler

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • sam_sys_inputs (dict) – SAM windpower compute module system inputs not including the +wind resource data.

  • +
  • objective_function (str) – The objective function of the optimization as a string, should +return the objective to be minimized during layout optimization. +Variables available are:

    +
    +
      +
    • n_turbines: the number of turbines

    • +
    • system_capacity: wind plant capacity

    • +
    • aep: annual energy production

    • +
    • fixed_charge_rate: user input fixed_charge_rate if included +as part of the sam system config.

    • +
    • self.wind_plant: the SAM wind plant object, through which +all SAM variables can be accessed

    • +
    • capital_cost: plant capital cost as evaluated +by capital_cost_function

    • +
    • fixed_operating_cost: plant fixed annual operating cost as +evaluated by fixed_operating_cost_function

    • +
    • variable_operating_cost: plant variable annual operating cost +as evaluated by variable_operating_cost_function

    • +
    +
    +
  • +
  • capital_cost_function (str) – The plant capital cost function as a string, must return the total +capital cost in $. Has access to the same variables as the +objective_function.

  • +
  • fixed_operating_cost_function (str) – The plant annual fixed operating cost function as a string, must +return the fixed operating cost in $/year. Has access to the same +variables as the objective_function.

  • +
  • variable_operating_cost_function (str) – The plant annual variable operating cost function as a string, must +return the variable operating cost in $/kWh. Has access to the same +variables as the objective_function.

  • +
  • min_spacing (float | int | str) – Minimum spacing between turbines in meters. Can also be a string +like “5x” (default) which is interpreted as 5 times the turbine +rotor diameter.

  • +
  • wake_loss_multiplier (float, optional) – A multiplier used to scale the annual energy lost due to +wake losses. +.. WARNING:: This multiplier will ONLY be applied during the +optimization process and will NOT be come through in output +values such as the hourly profiles, +aep, any of the cost functions, or even the output objective.

  • +
  • ga_kwargs (dict | None) – Dictionary of keyword arguments to pass to GA initialization. +If None, default initialization values are used. +See GeneticAlgorithm for +a description of the allowed keyword arguments.

  • +
  • output_request (list | tuple) – Outputs requested from the SAM windpower simulation after the +bespoke plant layout optimization. Can also request resource means +like ws_mean, windspeed_mean, temperature_mean, pressure_mean.

  • +
  • ws_bins (tuple) – 3-entry tuple with (start, stop, step) for the windspeed binning of +the wind joint probability distribution. The stop value is +inclusive, so ws_bins=(0, 20, 5) would result in four bins with bin +edges (0, 5, 10, 15, 20).

  • +
  • wd_bins (tuple) – 3-entry tuple with (start, stop, step) for the winddirection +binning of the wind joint probability distribution. The stop value +is inclusive, so ws_bins=(0, 360, 90) would result in four bins +with bin edges (0, 90, 180, 270, 360).

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • inclusion_mask (np.ndarray) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. The shape of this will be checked against the input +resolution.

  • +
  • data_layers (None | dict) – Aggregation data layers. Must be a dictionary keyed by data label +name. Each value must be another dictionary with “dset”, “method”, +and “fpath”.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • excl_area (float | None, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • eos_mult_baseline_cap_mw (int | float, optional) – Baseline plant capacity (MW) used to calculate economies of +scale (EOS) multiplier from the capital_cost_function. EOS +multiplier is calculated as the $-per-kW of the wind plant +divided by the $-per-kW of a plant with this baseline +capacity. By default, 200 (MW), which aligns the baseline +with ATB assumptions. See here: https://tinyurl.com/y85hnu6h.

  • +
  • prior_meta (pd.DataFrame | None) – Optional meta dataframe belonging to a prior run. This will only +run the timeseries power generation step and assume that all of the +wind plant layouts are fixed given the prior run. The meta data +needs columns “capacity”, “turbine_x_coords”, and +“turbine_y_coords”.

  • +
  • gid_map (None | str | dict) – Mapping of unique integer generation gids (keys) to single integer +resource gids (values). This can be None, a pre-extracted dict, or +a filepath to json or csv. If this is a csv, it must have the +columns “gid” (which matches the techmap) and “gid_map” (gids to +extract from the resource input). This is useful if you’re running +forecasted resource data (e.g., ECMWF) to complement historical +meteorology (e.g., WTK).

  • +
  • bias_correct (str | pd.DataFrame | None) – Optional DataFrame or csv filepath to a wind bias correction table. +This has columns: gid (can be index name), adder, scalar. If both +adder and scalar are present, the wind is corrected by +(res*scalar)+adder. If either is not present, scalar defaults to 1 +and adder to 0. Only windspeed is corrected. Note that if gid_map +is provided, the bias_correct gid corresponds to the actual +resource data gid and not the techmap gid.

  • +
  • pre_loaded_data (BespokeSinglePlantData, optional) – A pre-loaded BespokeSinglePlantData object, or +None. Can be useful to speed up execution on file +systems with slow parallel reads.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

agg_data_layers()

Aggregate optional data layers if requested and save to self.meta

check_dependencies()

Check special dependencies for bespoke

close()

Close any open file handlers via the sc point attribute.

get_lcoe_kwargs()

Get a namespace of arguments for calculating LCOE based on the bespoke optimized wind plant capacity

get_weighted_res_dir()

Special method for calculating the exclusion-weighted mean wind direction for the BespokeSinglePlant

get_weighted_res_ts(dset)

Special method for calculating the exclusion-weighted mean resource timeseries data for the BespokeSinglePlant.

get_wind_handler(res)

Get a wind resource handler for a resource filepath.

initialize_wind_plant_ts()

Initialize the annual wind plant timeseries analysis object(s) using the annual resource data and the sam system inputs from the optimized plant.

recalc_lcoe()

Recalculate the multi-year mean LCOE based on the multi-year mean annual energy production (AEP)

run(*args, **kwargs)

Run the bespoke optimization for a single wind plant.

run_plant_optimization()

Run the wind plant layout optimization and export outputs to outputs property.

run_wind_plant_ts()

Run the wind plant multi-year timeseries analysis and export output requests to outputs property.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

DEPENDENCIES

OUT_ATTRS

annual_time_indexes

Get an ordered list of single-year time index objects that matches the profile outputs from the wind_plant_ts object.

gid

SC point gid for this bespoke plant.

hub_height

Get the integer SAM system config turbine hub height (meters)

include_mask

Get the supply curve point 2D inclusion mask (included is 1, excluded is 0)

meta

Get the basic supply curve point meta data

original_sam_sys_inputs

Get the original (pre-optimized) SAM windpower system inputs.

outputs

Saved outputs for the single wind plant bespoke optimization.

pixel_side_length

Get the length of a single exclusion pixel side (meters)

plant_optimizer

Bespoke plant turbine placement optimizer object.

res_df

Get the reV compliant wind resource dataframe representing the aggregated and included wind resource in the current reV supply curve point at the turbine hub height.

sam_sys_inputs

Get the SAM windpower system inputs.

sc_point

Get the reV supply curve point object.

wind_dist

Get the wind joint probability distribution and corresonding bin edges

wind_plant_pd

reV WindPowerPD compute object for plant layout optimization based on wind joint probability distribution

wind_plant_ts

reV WindPower compute object(s) based on wind resource timeseries data keyed by year

years

Get the sorted list of analysis years.

+
+
+close()[source]
+

Close any open file handlers via the sc point attribute. If this +class was initialized with close=False, this will not close any +handlers.

+
+ +
+
+get_weighted_res_ts(dset)[source]
+

Special method for calculating the exclusion-weighted mean resource +timeseries data for the BespokeSinglePlant.

+
+
Returns:
+

data (np.ndarray) – Timeseries data of shape (n_time,) for the wind plant weighted by +the plant inclusions mask.

+
+
+
+ +
+
+get_weighted_res_dir()[source]
+

Special method for calculating the exclusion-weighted mean wind +direction for the BespokeSinglePlant

+
+
Returns:
+

mean_wind_dirs (np.ndarray) – Timeseries array of winddirection data in shape (n_time,) in units +of degrees from north.

+
+
+
+ +
+
+property gid
+

SC point gid for this bespoke plant.

+
+
Returns:
+

int

+
+
+
+ +
+
+property include_mask
+

Get the supply curve point 2D inclusion mask (included is 1, +excluded is 0)

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property pixel_side_length
+

Get the length of a single exclusion pixel side (meters)

+
+
Returns:
+

float

+
+
+
+ +
+
+property original_sam_sys_inputs
+

Get the original (pre-optimized) SAM windpower system inputs.

+
+
Returns:
+

dict

+
+
+
+ +
+
+property sam_sys_inputs
+

Get the SAM windpower system inputs. If the wind plant has not yet +been optimized, this returns the initial SAM config. If the wind plant +has been optimized using the wind_plant_pd object, this returns the +final optimized SAM plant config.

+
+
Returns:
+

dict

+
+
+
+ +
+
+property sc_point
+

Get the reV supply curve point object.

+
+
Returns:
+

AggSCPoint

+
+
+
+ +
+
+property meta
+

Get the basic supply curve point meta data

+
+
Returns:
+

pd.DataFrame

+
+
+
+ +
+
+property hub_height
+

Get the integer SAM system config turbine hub height (meters)

+
+
Returns:
+

int

+
+
+
+ +
+
+property res_df
+

Get the reV compliant wind resource dataframe representing the +aggregated and included wind resource in the current reV supply curve +point at the turbine hub height. Includes a DatetimeIndex and columns +for temperature, pressure, windspeed, and winddirection.

+
+
Returns:
+

pd.DataFrame

+
+
+
+ +
+
+property years
+

Get the sorted list of analysis years.

+
+
Returns:
+

list

+
+
+
+ +
+
+property annual_time_indexes
+

Get an ordered list of single-year time index objects that matches +the profile outputs from the wind_plant_ts object.

+
+
Returns:
+

list

+
+
+
+ +
+
+property wind_dist
+

Get the wind joint probability distribution and corresonding bin +edges

+
+
Returns:
+

    +
  • wind_dist (np.ndarray) – 2D array probability distribution of (windspeed, winddirection) +normalized so the sum of all values = 1.

  • +
  • ws_edges (np.ndarray) – 1D array of windspeed (m/s) values that set the bin edges for the +wind probability distribution. Same len as wind_dist.shape[0] + 1

  • +
  • wd_edges (np.ndarray) – 1D array of winddirections (deg) values that set the bin edges +for the wind probability dist. Same len as wind_dist.shape[1] + 1

  • +
+

+
+
+
+ +
+
+initialize_wind_plant_ts()[source]
+

Initialize the annual wind plant timeseries analysis object(s) using +the annual resource data and the sam system inputs from the optimized +plant.

+
+
Returns:
+

wind_plant_ts (dict) – Annual reV.SAM.generation.WindPower object(s) keyed by year.

+
+
+
+ +
+
+property wind_plant_pd
+

reV WindPowerPD compute object for plant layout optimization based +on wind joint probability distribution

+
+
Returns:
+

reV.SAM.generation.WindPowerPD

+
+
+
+ +
+
+property wind_plant_ts
+

reV WindPower compute object(s) based on wind resource timeseries +data keyed by year

+
+
Returns:
+

dict

+
+
+
+ +
+
+property plant_optimizer
+

Bespoke plant turbine placement optimizer object.

+
+
Returns:
+

PlaceTurbines

+
+
+
+ +
+
+recalc_lcoe()[source]
+

Recalculate the multi-year mean LCOE based on the multi-year mean +annual energy production (AEP)

+
+ +
+
+get_lcoe_kwargs()[source]
+

Get a namespace of arguments for calculating LCOE based on the +bespoke optimized wind plant capacity

+
+
Returns:
+

lcoe_kwargs (dict) – kwargs for the SAM lcoe model. These are based on the original +sam_sys_inputs, normalized to the original system_capacity, and +updated based on the bespoke optimized system_capacity, includes +fixed_charge_rate, system_capacity (kW), capital_cost ($), +fixed_operating_cos ($), variable_operating_cost ($/kWh)

+
+
+
+ +
+
+static get_wind_handler(res)[source]
+

Get a wind resource handler for a resource filepath.

+
+
Parameters:
+

res (str) – Resource filepath to wtk .h5 file. Can include * wildcards +for multi year resource.

+
+
Returns:
+

handler (WindResource | MultiYearWindResource) – Wind resource handler or multi year handler

+
+
+
+ +
+
+classmethod check_dependencies()[source]
+

Check special dependencies for bespoke

+
+ +
+
+run_wind_plant_ts()[source]
+

Run the wind plant multi-year timeseries analysis and export output +requests to outputs property.

+
+
Returns:
+

outputs (dict) – Output dictionary for the full BespokeSinglePlant object. The +multi-year timeseries data is also exported to the +BespokeSinglePlant.outputs property.

+
+
+
+ +
+
+run_plant_optimization()[source]
+

Run the wind plant layout optimization and export outputs +to outputs property.

+
+
Returns:
+

outputs (dict) – Output dictionary for the full BespokeSinglePlant object. The +layout optimization output data is also exported to the +BespokeSinglePlant.outputs property.

+
+
+
+ +
+
+agg_data_layers()[source]
+

Aggregate optional data layers if requested and save to self.meta

+
+ +
+
+property outputs
+

Saved outputs for the single wind plant bespoke optimization.

+
+
Returns:
+

dict

+
+
+
+ +
+
+classmethod run(*args, **kwargs)[source]
+

Run the bespoke optimization for a single wind plant.

+
+
Parameters:
+

See the class initialization parameters.

+
+
Returns:
+

bsp (dict) – Bespoke single plant outputs namespace keyed by dataset name +including a dataset “meta” for the BespokeSinglePlant meta data.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.bespoke.BespokeSinglePlantData.html b/_autosummary/reV.bespoke.bespoke.BespokeSinglePlantData.html new file mode 100644 index 000000000..2eb84f53d --- /dev/null +++ b/_autosummary/reV.bespoke.bespoke.BespokeSinglePlantData.html @@ -0,0 +1,663 @@ + + + + + + + reV.bespoke.bespoke.BespokeSinglePlantData — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.bespoke.BespokeSinglePlantData

+
+
+class BespokeSinglePlantData(data_inds, wind_dirs, wind_speeds, temps, pressures, time_index)[source]
+

Bases: object

+

Single-plant preloaded data.

+

This object is intended to facilitate the use of pre-loaded data for +running BespokeSinglePlant on systems with slow parallel +reads to a single HDF5 file.

+

Initialize BespokeSinglePlantData

+
+
Parameters:
+
    +
  • data_inds (1D np.array) – Array of res GIDs. This array should be the same length as +the second dimension of wind_dirs, wind_speeds, temps, +and pressures. The GID value of data_inds[0] should +correspond to the wind_dirs[:, 0] data, etc.

  • +
  • wind_dirs, wind_speeds, temps, pressures (2D np.array) – Array of wind directions, wind speeds, temperatures, and +pressures, respectively. Dimensions should be correspond to +[time, location]. See documentation for data_inds for +required spatial mapping of GID values.

  • +
  • time_index (1D np.array) – Time index array corresponding to the temporal dimension of +the 2D data. Will be exposed directly to user.

  • +
+
+
+

Methods

+ + + +
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.bespoke.BespokeWindPlants.html b/_autosummary/reV.bespoke.bespoke.BespokeWindPlants.html new file mode 100644 index 000000000..83a5e5eb1 --- /dev/null +++ b/_autosummary/reV.bespoke.bespoke.BespokeWindPlants.html @@ -0,0 +1,1238 @@ + + + + + + + reV.bespoke.bespoke.BespokeWindPlants — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.bespoke.BespokeWindPlants

+
+
+class BespokeWindPlants(excl_fpath, res_fpath, tm_dset, objective_function, capital_cost_function, fixed_operating_cost_function, variable_operating_cost_function, project_points, sam_files, min_spacing='5x', wake_loss_multiplier=1, ga_kwargs=None, output_request=('system_capacity', 'cf_mean'), ws_bins=(0.0, 20.0, 5.0), wd_bins=(0.0, 360.0, 45.0), excl_dict=None, area_filter_kernel='queen', min_area=None, resolution=64, excl_area=None, data_layers=None, pre_extract_inclusions=False, prior_run=None, gid_map=None, bias_correct=None, pre_load_data=False)[source]
+

Bases: BaseAggregation

+

reV bespoke analysis class.

+

Much like generation, reV bespoke analysis runs SAM +simulations by piping in renewable energy resource data (usually +from the WTK), loading the SAM config, and then executing the +PySAM.Windpower.Windpower compute module. +However, unlike reV generation, bespoke analysis is +performed on the supply-curve grid resolution, and the plant +layout is optimized for every supply-curve point based on an +optimization objective specified by the user. See the NREL +publication on the bespoke methodology for more information.

+

See the documentation for the reV SAM class (e.g. +reV.SAM.generation.WindPower, +reV.SAM.generation.PvWattsv8, +reV.SAM.generation.Geothermal, etc.) for info on the +allowed and/or required SAM config file inputs.

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions data HDF5 file. The exclusions HDF5 +file should contain the layers specified in excl_dict +and data_layers. These layers may also be spread out +across multiple HDF5 files, in which case this input should +be a list or tuple of filepaths pointing to the files +containing the layers. Note that each data layer must be +uniquely defined (i.e.only appear once and in a single +input file).

  • +
  • res_fpath (str) – Filepath to wind resource data in NREL WTK format. This +input can be path to a single resource HDF5 file or a path +including a wildcard input like /h5_dir/prefix*suffix to +run bespoke on multiple years of resource data. The former +must be readable by +rex.renewable_resource.WindResource while the +latter must be readable by +or rex.multi_year_resource.MultiYearWindResource +(i.e. the resource data conform to the +rex data format). This +means the data file(s) must contain a 1D time_index +dataset indicating the UTC time of observation, a 1D +meta dataset represented by a DataFrame with +site-specific columns, and 2D resource datasets that match +the dimensions of (time_index, meta). The time index must +start at 00:00 of January 1st of the year under +consideration, and its shape must be a multiple of 8760.

  • +
  • tm_dset (str) – Dataset name in the excl_fpath file containing the +techmap (exclusions-to-resource mapping data). This data +layer links the supply curve GID’s to the generation GID’s +that are used to evaluate the performance metrics of each +wind plant. By default, the generation GID’s are assumed to +match the resource GID’s, but this mapping can be customized +via the gid_map input (see the documentation for gid_map +for more details).

    +
    +

    Important

    +

    This dataset uniquely couples the (typically +high-resolution) exclusion layers to the (typically +lower-resolution) resource data. Therefore, a separate +techmap must be used for every unique combination of +resource and exclusion coordinates.

    +
    +
  • +
  • objective_function (str) – The objective function of the optimization written out as a +string. This expression should compute the objective to be +minimized during layout optimization. Variables available +for computation are:

    +
    +
      +
    • n_turbines: the number of turbines

    • +
    • system_capacity: wind plant capacity

    • +
    • aep: annual energy production

    • +
    • fixed_charge_rate: user input fixed_charge_rate if +included as part of the sam system config.

    • +
    • self.wind_plant: the SAM wind plant object, +through which all SAM variables can be accessed

    • +
    • capital_cost: plant capital cost as evaluated +by capital_cost_function

    • +
    • fixed_operating_cost: plant fixed annual operating +cost as evaluated by fixed_operating_cost_function

    • +
    • variable_operating_cost: plant variable annual +operating cost, as evaluated by +variable_operating_cost_function

    • +
    +
    +
  • +
  • capital_cost_function (str) – The plant capital cost function written out as a string. +This expression must return the total plant capital cost in +$. This expression has access to the same variables as the +objective_function argument above.

  • +
  • fixed_operating_cost_function (str) – The plant annual fixed operating cost function written out +as a string. This expression must return the fixed operating +cost in $/year. This expression has access to the same +variables as the objective_function argument above.

  • +
  • variable_operating_cost_function (str) – The plant annual variable operating cost function written +out as a string. This expression must return the variable +operating cost in $/kWh. This expression has access to the +same variables as the objective_function argument above.

  • +
  • project_points (int | list | tuple | str | dict | pd.DataFrame | slice) – Input specifying which sites to process. A single integer +representing the supply curve GID of a site may be specified +to evaluate reV at a supply curve point. A list or tuple +of integers (or slice) representing the supply curve GIDs of +multiple sites can be specified to evaluate reV at +multiple specific locations. A string pointing to a project +points CSV file may also be specified. Typically, the CSV +contains two columns:

    +
    +
      +
    • gid: Integer specifying the supply curve GID of +each site.

    • +
    • config: Key in the sam_files input dictionary +(see below) corresponding to the SAM configuration to +use for each particular site. This value can also be +None (or left out completely) if you specify only +a single SAM configuration file as the sam_files +input.

    • +
    +
    +

    The CSV file may also contain site-specific inputs by +including a column named after a config keyword (e.g. a +column called capital_cost may be included to specify a +site-specific capital cost value for each location). Columns +that do not correspond to a config key may also be included, +but they will be ignored. The CSV file input can also have +these extra columns:

    +
    +
      +
    • capital_cost_multiplier

    • +
    • fixed_operating_cost_multiplier

    • +
    • variable_operating_cost_multiplier

    • +
    +
    +

    These particular inputs are treated as multipliers to be +applied to the respective cost curves +(capital_cost_function, fixed_operating_cost_function, +and variable_operating_cost_function) both during and +after the optimization. A DataFrame following the same +guidelines as the CSV input (or a dictionary that can be +used to initialize such a DataFrame) may be used for this +input as well. If you would like to obtain all available +reV supply curve points to run, you can use the +reV.supply_curve.extent.SupplyCurveExtent class +like so:

    +
    import pandas as pd
    +from reV.supply_curve.extent import SupplyCurveExtent
    +
    +excl_fpath = "..."
    +resolution = ...
    +with SupplyCurveExtent(excl_fpath, resolution) as sc:
    +    points = sc.valid_sc_points(tm_dset).tolist()
    +    points = pd.DataFrame({"gid": points})
    +    points["config"] = "default"  # or a list of config choices
    +
    +# Use the points directly or save them to csv for CLI usage
    +points.to_csv("project_points.csv", index=False)
    +
    +
    +
  • +
  • sam_files (dict | str) – A dictionary mapping SAM input configuration ID(s) to SAM +configuration(s). Keys are the SAM config ID(s) which +correspond to the config column in the project points +CSV. Values for each key are either a path to a +corresponding SAM config file or a full dictionary +of SAM config inputs. For example:

    +
    sam_files = {
    +    "default": "/path/to/default/sam.json",
    +    "onshore": "/path/to/onshore/sam_config.yaml",
    +    "offshore": {
    +        "sam_key_1": "sam_value_1",
    +        "sam_key_2": "sam_value_2",
    +        ...
    +    },
    +    ...
    +}
    +
    +
    +

    This input can also be a string pointing to a single SAM +config file. In this case, the config column of the +CSV points input should be set to None or left out +completely. See the documentation for the reV SAM class +(e.g. reV.SAM.generation.WindPower, +reV.SAM.generation.PvWattsv8, +reV.SAM.generation.Geothermal, etc.) for +info on the allowed and/or required SAM config file inputs.

    +
  • +
  • min_spacing (float | int | str, optional) – Minimum spacing between turbines (in meters). This input can +also be a string like “5x”, which is interpreted as 5 times +the turbine rotor diameter. By default, "5x".

  • +
  • wake_loss_multiplier (float, optional) – A multiplier used to scale the annual energy lost due to +wake losses.

    +
    +

    Warning

    +

    This multiplier will ONLY be applied during the +optimization process and will NOT come through in output +values such as the hourly profiles, aep, any of the cost +functions, or even the output objective.

    +
    +

    By default, 1.

    +
  • +
  • ga_kwargs (dict, optional) – Dictionary of keyword arguments to pass to GA +initialization. If None, default initialization values +are used. See +GeneticAlgorithm for +a description of the allowed keyword arguments. +By default, None.

  • +
  • output_request (list | tuple, optional) – Outputs requested from the SAM windpower simulation after +the bespoke plant layout optimization. Can be any of the +parameters in the “Outputs” group of the PySAM module +PySAM.Windpower.Windpower.Outputs, PySAM module. +This list can also include a select number of SAM +config/resource parameters to include in the output: +any key in any of the +output attribute JSON files +may be requested. Time-series profiles requested via this +input are output in UTC. This input can also be used to +request resource means like "ws_mean", +"windspeed_mean", "temperature_mean", and +"pressure_mean". By default, +('system_capacity', 'cf_mean').

  • +
  • ws_bins (tuple, optional) – A 3-entry tuple with (start, stop, step) for the +windspeed binning of the wind joint probability +distribution. The stop value is inclusive, so +ws_bins=(0, 20, 5) would result in four bins with bin +edges (0, 5, 10, 15, 20). By default, (0.0, 20.0, 5.0).

  • +
  • wd_bins (tuple, optional) – A 3-entry tuple with (start, stop, step) for the wind +direction binning of the wind joint probability +distribution. The stop value is inclusive, so +wd_bins=(0, 360, 90) would result in four bins with bin +edges (0, 90, 180, 270, 360). +By default, (0.0, 360.0, 45.0).

  • +
  • excl_dict (dict, optional) – Dictionary of exclusion keyword arguments of the format +{layer_dset_name: {kwarg: value}}, where +layer_dset_name is a dataset in the exclusion h5 file +and the kwarg: value pair is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +For example:

    +
    excl_dict = {
    +    "typical_exclusion": {
    +        "exclude_values": 255,
    +    },
    +    "another_exclusion": {
    +        "exclude_values": [2, 3],
    +        "weight": 0.5
    +    },
    +    "exclusion_with_nodata": {
    +        "exclude_range": [10, 100],
    +        "exclude_nodata": True,
    +        "nodata_value": -1
    +    },
    +    "partial_setback": {
    +        "use_as_weights": True
    +    },
    +    "height_limit": {
    +        "exclude_range": [0, 200]
    +    },
    +    "slope": {
    +        "include_range": [0, 20]
    +    },
    +    "developable_land": {
    +        "force_include_values": 42
    +    },
    +    "more_developable_land": {
    +        "force_include_range": [5, 10]
    +    },
    +    ...
    +}
    +
    +
    +

    Note that all the keys given in this dictionary should be +datasets of the excl_fpath file. If None or empty +dictionary, no exclusions are applied. By default, None.

    +
  • +
  • area_filter_kernel ({“queen”, “rook”}, optional) – Contiguous area filter method to use on final exclusions +mask. The filters are defined as:

    +
    # Queen:     # Rook:
    +[[1,1,1],    [[0,1,0],
    + [1,1,1],     [1,1,1],
    + [1,1,1]]     [0,1,0]]
    +
    +
    +

    These filters define how neighboring pixels are “connected”. +Once pixels in the final exclusion layer are connected, the +area of each resulting cluster is computed and compared +against the min_area input. Any cluster with an area +less than min_area is excluded from the final mask. +This argument has no effect if min_area is None. +By default, "queen".

    +
  • +
  • min_area (float, optional) – Minimum area (in km2) required to keep an isolated +cluster of (included) land within the resulting exclusions +mask. Any clusters of land with areas less than this value +will be marked as exclusions. See the documentation for +area_filter_kernel for an explanation of how the area of +each land cluster is computed. If None, no area +filtering is performed. By default, None.

  • +
  • resolution (int, optional) – Supply Curve resolution. This value defines how many pixels +are in a single side of a supply curve cell. For example, +a value of 64 would generate a supply curve where the +side of each supply curve cell is 64x64 exclusion +pixels. By default, 64.

  • +
  • excl_area (float, optional) – Area of a single exclusion mask pixel (in km2). +If None, this value will be inferred from the profile +transform attribute in excl_fpath. By default, None.

  • +
  • data_layers (dict, optional) –

    +

    Dictionary of aggregation data layers of the format:

    +
    data_layers = {
    +    "output_layer_name": {
    +        "dset": "layer_name",
    +        "method": "mean",
    +        "fpath": "/path/to/data.h5"
    +    },
    +    "another_output_layer_name": {
    +        "dset": "input_layer_name",
    +        "method": "mode",
    +        # optional "fpath" key omitted
    +    },
    +    ...
    +}
    +
    +
    +

    The "output_layer_name" is the column name under which +the aggregated data will appear in the meta DataFrame of the +output file. The "output_layer_name" does not have to +match the dset input value. The latter should match +the layer name in the HDF5 from which the data to aggregate +should be pulled. The method should be one of +{"mode", "mean", "min", "max", "sum", "category"}, +describing how the high-resolution data should be aggregated +for each supply curve point. fpath is an optional key +that can point to an HDF5 file containing the layer data. If +left out, the data is assumed to exist in the file(s) +specified by the excl_fpath input. If None, no data +layer aggregation is performed. By default, None.

    +
  • +
  • pre_extract_inclusions (bool, optional) – Optional flag to pre-extract/compute the inclusion mask from +the excl_dict input. It is typically faster to compute +the inclusion mask on the fly with parallel workers. +By default, False.

  • +
  • prior_run (str, optional) – Optional filepath to a bespoke output HDF5 file belonging to +a prior run. If specified, this module will only run the +timeseries power generation step and assume that all of the +wind plant layouts are fixed from the prior run. The meta +data of this file must contain the following columns +(automatically satisfied if the HDF5 file was generated by +reV bespoke):

    +
    +
      +
    • capacity : Capacity of the plant, in MW.

    • +
    • turbine_x_coords: A string representation of a +python list containing the X coordinates (in m; origin +of cell at bottom left) of the turbines within the +plant (supply curve cell).

    • +
    • turbine_y_coords : A string representation of a +python list containing the Y coordinates (in m; origin +of cell at bottom left) of the turbines within the +plant (supply curve cell).

    • +
    +
    +

    If None, no previous run data is considered. +By default, None

    +
  • +
  • gid_map (str | dict, optional) – Mapping of unique integer generation gids (keys) to single +integer resource gids (values). This enables unique +generation gids in the project points to map to non-unique +resource gids, which can be useful when evaluating multiple +resource datasets in reV (e.g., forecasted ECMWF +resource data to complement historical WTK meteorology). +This input can be a pre-extracted dictionary or a path to a +JSON or CSV file. If this input points to a CSV file, the +file must have the columns gid (which matches the +project points) and gid_map (gids to extract from the +resource input). If None, the GID values in the project +points are assumed to match the resource GID values. +By default, None.

  • +
  • bias_correct (str | pd.DataFrame, optional) – Optional DataFrame or CSV filepath to a wind or solar +resource bias correction table. This has columns:

    +
    +
      +
    • gid: GID of site (can be index name)

    • +
    • adder: Value to add to resource at each site

    • +
    • scalar: Value to scale resource at each site by

    • +
    +
    +

    The gid field should match the true resource gid +regardless of the optional gid_map input. If both +adder and scalar are present, the wind or solar +resource is corrected by \((res*scalar)+adder\). If +either is missing, scalar defaults to 1 and adder +to 0. Only windspeed or GHI + DNI are corrected, +depending on the technology (wind for the former, solar +for the latter). GHI and DNI are corrected with the +same correction factors. If None, no corrections are +applied. By default, None.

    +
  • +
  • pre_load_data (bool, optional) – Option to pre-load resource data. This step can be +time-consuming up front, but it drastically reduces the +number of parallel reads to the res_fpath HDF5 file(s), +and can have a significant overall speedup on systems with +slow parallel I/O capabilities. Pre-loaded data can use a +significant amount of RAM, so be sure to split execution +across many nodes (e.g. 100 nodes, 36 workers each for +CONUS) or request large amounts of memory for a smaller +number of nodes. By default, False.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + +

run([out_fpath, max_workers])

Run the bespoke wind plant optimization in serial or parallel.

run_parallel([max_workers])

Run the bespoke optimization for many supply curve points in parallel.

run_serial(excl_fpath, res_fpath, tm_dset, ...)

Standalone serial method to run bespoke optimization.

sam_sys_inputs_with_site_data(gid)

Update the sam_sys_inputs with site data for the given GID.

save_outputs(out_fpath)

Save Bespoke Wind Plant optimization outputs to disk.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + +

completed_gids

Get a sorted list of completed BespokeSinglePlant gids

gids

1D array of supply curve point gids to aggregate

meta

Meta data for all completed BespokeSinglePlant objects.

outputs

Saved outputs for the multi wind plant bespoke optimization.

shape

Get the shape of the full exclusions raster.

slice_lookup

Lookup mapping sc_point_gid to exclusion slice.

+
+
+property outputs
+

Saved outputs for the multi wind plant bespoke optimization. Keys +are reV supply curve gids and values are BespokeSinglePlant.outputs +dictionaries.

+
+
Returns:
+

dict

+
+
+
+ +
+
+property completed_gids
+

Get a sorted list of completed BespokeSinglePlant gids

+
+
Returns:
+

list

+
+
+
+ +
+
+property meta
+

Meta data for all completed BespokeSinglePlant objects.

+
+
Returns:
+

pd.DataFrame

+
+
+
+ +
+
+property slice_lookup
+

Lookup mapping sc_point_gid to exclusion slice.

+
+
Type:
+

dict | None

+
+
+
+ +
+
+sam_sys_inputs_with_site_data(gid)[source]
+

Update the sam_sys_inputs with site data for the given GID.

+

Site data is extracted from the project points DataFrame. Every +column in the project DataFrame becomes a key in the site_data +output dictionary.

+
+
Parameters:
+

gid (int) – SC point gid for site to pull site data for.

+
+
Returns:
+

dictionary (dict) – SAM system config with extra keys from the project points +DataFrame.

+
+
+
+ +
+
+save_outputs(out_fpath)[source]
+

Save Bespoke Wind Plant optimization outputs to disk.

+
+
Parameters:
+

out_fpath (str) – Full filepath to an output .h5 file to save Bespoke data to. The +parent directories will be created if they do not already exist.

+
+
Returns:
+

out_fpath (str) – Full filepath to desired .h5 output file, the .h5 extension has +been added if it was not already present.

+
+
+
+ +
+
+classmethod run_serial(excl_fpath, res_fpath, tm_dset, sam_sys_inputs, objective_function, capital_cost_function, fixed_operating_cost_function, variable_operating_cost_function, min_spacing='5x', wake_loss_multiplier=1, ga_kwargs=None, output_request=('system_capacity', 'cf_mean'), ws_bins=(0.0, 20.0, 5.0), wd_bins=(0.0, 360.0, 45.0), excl_dict=None, inclusion_mask=None, area_filter_kernel='queen', min_area=None, resolution=64, excl_area=0.0081, data_layers=None, gids=None, exclusion_shape=None, slice_lookup=None, prior_meta=None, gid_map=None, bias_correct=None, pre_loaded_data=None)[source]
+

Standalone serial method to run bespoke optimization. +See BespokeWindPlants docstring for parameter description.

+

This method can only take a single sam_sys_inputs… For a spatially +variant gid-to-config mapping, see the BespokeWindPlants class methods.

+
+
Returns:
+

out (dict) – Bespoke outputs keyed by sc point gid

+
+
+
+ +
+
+run_parallel(max_workers=None)[source]
+

Run the bespoke optimization for many supply curve points in +parallel.

+
+
Parameters:
+

max_workers (int | None, optional) – Number of cores to run summary on. None is all +available cpus, by default None

+
+
Returns:
+

out (dict) – Bespoke outputs keyed by sc point gid

+
+
+
+ +
+
+run(out_fpath=None, max_workers=None)[source]
+

Run the bespoke wind plant optimization in serial or parallel.

+
+
Parameters:
+
    +
  • out_fpath (str, optional) – Path to output file. If None, no output file will +be written. If the filepath is specified but the module name +(bespoke) is not included, the module name will get added to +the output file name. By default, None.

  • +
  • max_workers (int, optional) – Number of local workers to run on. If None, uses all +available cores (typically 36). By default, None.

  • +
+
+
Returns:
+

str | None – Path to output HDF5 file, or None if results were not +written to disk.

+
+
+
+ +
+
+property gids
+

1D array of supply curve point gids to aggregate

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property shape
+

Get the shape of the full exclusions raster.

+
+
Returns:
+

tuple

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.bespoke.html b/_autosummary/reV.bespoke.bespoke.html new file mode 100644 index 000000000..f3bbd7c93 --- /dev/null +++ b/_autosummary/reV.bespoke.bespoke.html @@ -0,0 +1,648 @@ + + + + + + + reV.bespoke.bespoke — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.bespoke

+

reV bespoke wind plant analysis tools

+

Classes

+ + + + + + + + + + + + + + + +

BespokeMultiPlantData(res_fpath, ...)

Multi-plant preloaded data.

BespokeSinglePlant(gid, excl, res, tm_dset, ...)

Framework for analyzing and optimized a wind plant layout specific to the local wind resource and exclusions for a single reV supply curve point.

BespokeSinglePlantData(data_inds, wind_dirs, ...)

Single-plant preloaded data.

BespokeWindPlants(excl_fpath, res_fpath, ...)

reV bespoke analysis class.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.cli_bespoke.html b/_autosummary/reV.bespoke.cli_bespoke.html new file mode 100644 index 000000000..72dade0ea --- /dev/null +++ b/_autosummary/reV.bespoke.cli_bespoke.html @@ -0,0 +1,631 @@ + + + + + + + reV.bespoke.cli_bespoke — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.cli_bespoke

+

Bespoke wind plant optimization CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.gradient_free.GeneticAlgorithm.html b/_autosummary/reV.bespoke.gradient_free.GeneticAlgorithm.html new file mode 100644 index 000000000..a41a69ee8 --- /dev/null +++ b/_autosummary/reV.bespoke.gradient_free.GeneticAlgorithm.html @@ -0,0 +1,740 @@ + + + + + + + reV.bespoke.gradient_free.GeneticAlgorithm — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.gradient_free.GeneticAlgorithm

+
+
+class GeneticAlgorithm(bits, bounds, variable_type, objective_function, max_generation=100, population_size=0, crossover_rate=0.1, mutation_rate=0.01, tol=1e-06, convergence_iters=5, max_time=3600)[source]
+

Bases: object

+

a simple genetic algorithm used to select bespoke turbine locations

+
+
Parameters:
+
    +
  • bits (array of ints) – The number of bits assigned to each of the design variables. +The number of discretizations for each design variables will be +2^n where n is the number of bits assigned to that variable.

  • +
  • bounds (array of tuples) – The bounds for each design variable. This parameter looks like: +np.array([(lower, upper), (lower, upper)…])

  • +
  • variable_type (array of strings (‘int’ or ‘float’)) – The type of each design variable (int or float).

  • +
  • objective_function (function handle for the objective that is to be) – minimized. Should take a single variable as an input which is a +list/array of the design variables.

  • +
  • max_generation (int, optional) – The maximum number of generations that will be run in the genetic +algorithm.

  • +
  • population_size (int, optional) – The population size in the genetic algorithm.

  • +
  • crossover_rate (float, optional) – The probability of crossover for a single bit during the crossover +phase of the genetic algorithm.

  • +
  • mutation_rate (float, optional) – The probability of a single bit mutating during the mutation phase +of the genetic algorithm.

  • +
  • tol (float, optional) – The absolute tolerance to determine convergence.

  • +
  • convergence_iters (int, optional) – The number of generations to determine convergence.

  • +
  • max_time (float) – The maximum time (in seconds) to run the genetic algorithm.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

chromosome_2_variables(chromosome)

convert the binary chromosomes to design variable values

crossover()

perform crossover between individual parents

initialize_bits()

determine the total number of bits

initialize_design_variables()

initialize the design variables from the randomly initialized population

initialize_fitness()

initialize the fitness of member of the parent population

initialize_population()

randomly initialize the parent and offspring populations

mutate()

randomly mutate bits of each chromosome

optimize_ga()

run the genetic algorithm

+
+
+initialize_design_variables()[source]
+

initialize the design variables from the randomly initialized +population

+
+ +
+
+initialize_bits()[source]
+

determine the total number of bits

+
+ +
+
+initialize_population()[source]
+

randomly initialize the parent and offspring populations

+
+ +
+
+initialize_fitness()[source]
+

initialize the fitness of member of the parent population

+
+ +
+
+chromosome_2_variables(chromosome)[source]
+

convert the binary chromosomes to design variable values

+
+ +
+
+crossover()[source]
+

perform crossover between individual parents

+
+ +
+
+mutate()[source]
+

randomly mutate bits of each chromosome

+
+ +
+
+optimize_ga()[source]
+

run the genetic algorithm

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.gradient_free.html b/_autosummary/reV.bespoke.gradient_free.html new file mode 100644 index 000000000..f9a9fc06e --- /dev/null +++ b/_autosummary/reV.bespoke.gradient_free.html @@ -0,0 +1,639 @@ + + + + + + + reV.bespoke.gradient_free — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.gradient_free

+

a simple genetic algorithm

+

Classes

+ + + + + + +

GeneticAlgorithm(bits, bounds, ...[, ...])

a simple genetic algorithm used to select bespoke turbine locations

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.html b/_autosummary/reV.bespoke.html new file mode 100644 index 000000000..6ff919fc1 --- /dev/null +++ b/_autosummary/reV.bespoke.html @@ -0,0 +1,652 @@ + + + + + + + reV.bespoke — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke

+

reV bespoke wind plant analysis tools

+ + + + + + + + + + + + + + + + + + + + + +

reV.bespoke.bespoke

reV bespoke wind plant analysis tools

reV.bespoke.cli_bespoke

Bespoke wind plant optimization CLI utility functions.

reV.bespoke.gradient_free

a simple genetic algorithm

reV.bespoke.pack_turbs

turbine packing module.

reV.bespoke.place_turbines

place turbines for bespoke wind plants

reV.bespoke.plotting_functions

functions to plot turbine layouts and boundary polygons

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.pack_turbs.PackTurbines.html b/_autosummary/reV.bespoke.pack_turbs.PackTurbines.html new file mode 100644 index 000000000..c4b0aee77 --- /dev/null +++ b/_autosummary/reV.bespoke.pack_turbs.PackTurbines.html @@ -0,0 +1,673 @@ + + + + + + + reV.bespoke.pack_turbs.PackTurbines — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.pack_turbs.PackTurbines

+
+
+class PackTurbines(min_spacing, safe_polygons, weight_x=0.0013547)[source]
+

Bases: object

+

Framework to maximize plant capacity in a provided wind plant area.

+
+
Parameters:
+
    +
  • min_spacing (float) – The minimum allowed spacing between wind turbines.

  • +
  • safe_polygons (Polygon | MultiPolygon) – The “safe” area(s) where turbines can be placed without +violating boundary, setback, exclusion, or other constraints.

  • +
  • weight_x (float, optional)

  • +
+
+
+

Methods

+ + + + + + + + + +

clear()

Reset the packing algorithm by clearing the x and y turbine arrays

pack_turbines_poly()

Fast packing algorithm that maximizes plant capacity in a provided wind plant area.

+
+
+pack_turbines_poly()[source]
+

Fast packing algorithm that maximizes plant capacity in a +provided wind plant area. Sets the the optimal locations to +self.turbine_x and self.turbine_y

+
+ +
+
+clear()[source]
+

Reset the packing algorithm by clearing the x and y turbine arrays

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.pack_turbs.html b/_autosummary/reV.bespoke.pack_turbs.html new file mode 100644 index 000000000..7e120f8d7 --- /dev/null +++ b/_autosummary/reV.bespoke.pack_turbs.html @@ -0,0 +1,647 @@ + + + + + + + reV.bespoke.pack_turbs — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.pack_turbs

+

turbine packing module.

+

Functions

+ + + + + + +

smallest_area_with_tiebreakers(g)

_summary_

+

Classes

+ + + + + + +

PackTurbines(min_spacing, safe_polygons[, ...])

Framework to maximize plant capacity in a provided wind plant area.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.pack_turbs.smallest_area_with_tiebreakers.html b/_autosummary/reV.bespoke.pack_turbs.smallest_area_with_tiebreakers.html new file mode 100644 index 000000000..c58805f50 --- /dev/null +++ b/_autosummary/reV.bespoke.pack_turbs.smallest_area_with_tiebreakers.html @@ -0,0 +1,657 @@ + + + + + + + reV.bespoke.pack_turbs.smallest_area_with_tiebreakers — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.pack_turbs.smallest_area_with_tiebreakers

+
+
+smallest_area_with_tiebreakers(g)[source]
+

_summary_

+

This function helps break ties in the area of two different +geometries using their exterior coordinate values.

+
+
Parameters:
+

g (_type_) – A geometry object with an area and an +exterior.coords coords attribute.

+
+
Returns:
+

tuple

+
+
Tuple with the following elements:
    +
  • area of the geometry

  • +
  • minimum exterior coordinate (southwest)

  • +
  • maximum exterior coordinate (northeast)

  • +
+
+
+

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.place_turbines.PlaceTurbines.html b/_autosummary/reV.bespoke.place_turbines.PlaceTurbines.html new file mode 100644 index 000000000..ae1e7eec9 --- /dev/null +++ b/_autosummary/reV.bespoke.place_turbines.PlaceTurbines.html @@ -0,0 +1,951 @@ + + + + + + + reV.bespoke.place_turbines.PlaceTurbines — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.place_turbines.PlaceTurbines

+
+
+class PlaceTurbines(wind_plant, objective_function, capital_cost_function, fixed_operating_cost_function, variable_operating_cost_function, include_mask, pixel_side_length, min_spacing, wake_loss_multiplier=1)[source]
+

Bases: object

+

Framework for optimizing turbine locations for site specific +exclusions, wind resources, and objective

+
+
Parameters:
+
    +
  • wind_plant (WindPowerPD) – wind plant object to analyze wind plant performance. This object +should have everything in the plant defined, such that only the +turbine coordinates and plant capacity need to be defined during +the optimization.

  • +
  • objective_function (str) – The objective function of the optimization as a string, should +return the objective to be minimized during layout optimization. +Variables available are:

    +
    +
      +
    • n_turbines: the number of turbines

    • +
    • system_capacity: wind plant capacity

    • +
    • aep: annual energy production

    • +
    • fixed_charge_rate: user input fixed_charge_rate if included +as part of the sam system config.

    • +
    • capital_cost: plant capital cost as evaluated +by capital_cost_function

    • +
    • fixed_operating_cost: plant fixed annual operating cost as +evaluated by fixed_operating_cost_function

    • +
    • variable_operating_cost: plant variable annual operating cost +as evaluated by variable_operating_cost_function

    • +
    • self.wind_plant: the SAM wind plant object, through which +all SAM variables can be accessed

    • +
    • cost: the annual cost of the wind plant (from cost_function)

    • +
    +
    +
  • +
  • capital_cost_function (str) – The plant capital cost function as a string, must return the total +capital cost in $. Has access to the same variables as the +objective_function.

  • +
  • fixed_operating_cost_function (str) – The plant annual fixed operating cost function as a string, must +return the fixed operating cost in $/year. Has access to the same +variables as the objective_function.

  • +
  • variable_operating_cost_function (str) – The plant annual variable operating cost function as a string, must +return the variable operating cost in $/kWh. Has access to the same +variables as the objective_function.

  • +
  • exclusions (ExclusionMaskFromDict) – The exclusions that define where turbines can be placed. Contains +exclusions.latitude, exclusions.longitude, and exclusions.mask

  • +
  • min_spacing (float) – The minimum spacing between turbines (in meters).

  • +
  • wake_loss_multiplier (float, optional) – A multiplier used to scale the annual energy lost due to +wake losses. IMPORTANT: This multiplier will ONLY be +applied during the optimization process and will NOT be +come through in output values such as aep, any of the cost +functions, or even the output objective.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + +

capital_cost_per_kw(capacity_mw)

Capital cost function ($ per kW) evaluated for a given capacity.

define_exclusions()

From the exclusions data, create a shapely MultiPolygon as self.safe_polygons that defines where turbines can be placed.

initialize_packing()

run the turbine packing algorithm (maximizing plant capacity) to define potential turbine locations that will be used as design variables in the gentic algorithm.

optimization_objective(x)

The optimization objective used in the bespoke optimization

optimize(**kwargs)

Optimize wind farm layout.

place_turbines(**kwargs)

Define bespoke wind plant turbine layouts.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

aep

Wrapper to return None if PlaceTurbines is not optimized

area

Wrapper to return None if PlaceTurbines is not optimized

capacity

Wrapper to return None if PlaceTurbines is not optimized

capacity_density

Wrapper to return None if PlaceTurbines is not optimized

capital_cost

Wrapper to return None if PlaceTurbines is not optimized

convex_hull

Wrapper to return None if PlaceTurbines is not optimized

convex_hull_area

Wrapper to return None if PlaceTurbines is not optimized

convex_hull_capacity_density

Wrapper to return None if PlaceTurbines is not optimized

fixed_charge_rate

Fixed charge rate if input to the SAM WindPowerPD object, None if not found in inputs.

fixed_operating_cost

Wrapper to return None if PlaceTurbines is not optimized

full_cell_area

Wrapper to return None if PlaceTurbines is not optimized

full_cell_capacity_density

Wrapper to return None if PlaceTurbines is not optimized

nturbs

Wrapper to return None if PlaceTurbines is not optimized

objective

Wrapper to return None if PlaceTurbines is not optimized

turbine_x

Wrapper to return None if PlaceTurbines is not optimized

turbine_y

Wrapper to return None if PlaceTurbines is not optimized

variable_operating_cost

Wrapper to return None if PlaceTurbines is not optimized

+
+
+define_exclusions()[source]
+

From the exclusions data, create a shapely MultiPolygon as +self.safe_polygons that defines where turbines can be placed.

+
+ +
+
+initialize_packing()[source]
+

run the turbine packing algorithm (maximizing plant capacity) to +define potential turbine locations that will be used as design +variables in the gentic algorithm.

+
+ +
+
+optimization_objective(x)[source]
+

The optimization objective used in the bespoke optimization

+
+ +
+
+optimize(**kwargs)[source]
+

Optimize wind farm layout.

+

Use a genetic algorithm to optimize wind plant layout for the +user-defined objective function.

+
+
Parameters:
+

**kwargs – Keyword arguments to pass to GA initialization.

+
+
+
+

See also

+
+
GeneticAlgorithm

GA Algorithm.

+
+
+
+
+ +
+
+place_turbines(**kwargs)[source]
+

Define bespoke wind plant turbine layouts.

+

Run all functions to define bespoke wind plant turbine layouts.

+
+
Parameters:
+

**kwargs – Keyword arguments to pass to GA initialization.

+
+
+
+

See also

+
+
GeneticAlgorithm

GA Algorithm.

+
+
+
+
+ +
+
+capital_cost_per_kw(capacity_mw)[source]
+

Capital cost function ($ per kW) evaluated for a given capacity.

+

The capacity will be adjusted to be an exact multiple of the +turbine rating in order to yield an integer number of +turbines.

+
+
Parameters:
+

capacity_mw (float) – The desired capacity (MW) to sample the cost curve at. Note +as mentioned above, the capacity will be adjusted to be an +exact multiple of the turbine rating in order to yield an +integer number of turbines. For best results, set this +value to be an integer multiple of the turbine rating.

+
+
Returns:
+

capital_cost (float) – Capital cost ($ per kW) for the (adjusted) plant capacity.

+
+
+
+ +
+
+property fixed_charge_rate
+

Fixed charge rate if input to the SAM WindPowerPD object, None if +not found in inputs.

+
+ +
+
+property turbine_x
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property turbine_y
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property nturbs
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property capacity
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property convex_hull
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property area
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property convex_hull_area
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property full_cell_area
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property capacity_density
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property convex_hull_capacity_density
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property full_cell_capacity_density
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property aep
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property capital_cost
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property fixed_operating_cost
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property variable_operating_cost
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+
+property objective
+

Wrapper to return None if PlaceTurbines is not optimized

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.place_turbines.html b/_autosummary/reV.bespoke.place_turbines.html new file mode 100644 index 000000000..1a8a0b6ff --- /dev/null +++ b/_autosummary/reV.bespoke.place_turbines.html @@ -0,0 +1,647 @@ + + + + + + + reV.bespoke.place_turbines — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.place_turbines

+

place turbines for bespoke wind plants

+

Functions

+ + + + + + +

none_until_optimized(func)

Decorator that returns None until PlaceTurbines is optimized.

+

Classes

+ + + + + + +

PlaceTurbines(wind_plant, ...[, ...])

Framework for optimizing turbine locations for site specific exclusions, wind resources, and objective

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.place_turbines.none_until_optimized.html b/_autosummary/reV.bespoke.place_turbines.none_until_optimized.html new file mode 100644 index 000000000..51db4df1b --- /dev/null +++ b/_autosummary/reV.bespoke.place_turbines.none_until_optimized.html @@ -0,0 +1,650 @@ + + + + + + + reV.bespoke.place_turbines.none_until_optimized — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.place_turbines.none_until_optimized

+
+
+none_until_optimized(func)[source]
+

Decorator that returns None until PlaceTurbines is optimized.

+

Meant for exclusive use in PlaceTurbines and its subclasses. +PlaceTurbines is considered optimized when its +optimized_design_variables attribute is not None.

+
+
Parameters:
+

func (callable) – A callable function that should return None until +PlaceTurbines is optimized.

+
+
Returns:
+

callable – New function that returns None until PlaceTurbines is +optimized.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.plotting_functions.get_xy.html b/_autosummary/reV.bespoke.plotting_functions.get_xy.html new file mode 100644 index 000000000..def2f09fa --- /dev/null +++ b/_autosummary/reV.bespoke.plotting_functions.get_xy.html @@ -0,0 +1,647 @@ + + + + + + + reV.bespoke.plotting_functions.get_xy — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.plotting_functions.get_xy

+
+
+get_xy(A)[source]
+

separate polygon exterior coordinates to x and y

+
+
Parameters:
+
    +
  • A (Polygon.exteroir.coords) – Exterior coordinates from a shapely Polygon

  • +
  • Outputs

  • +
  • ———-

  • +
  • x, y (array) – Boundary polygon x and y coordinates

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.plotting_functions.html b/_autosummary/reV.bespoke.plotting_functions.html new file mode 100644 index 000000000..550ae8d04 --- /dev/null +++ b/_autosummary/reV.bespoke.plotting_functions.html @@ -0,0 +1,648 @@ + + + + + + + reV.bespoke.plotting_functions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.plotting_functions

+

functions to plot turbine layouts and boundary polygons

+

Functions

+ + + + + + + + + + + + + + + +

get_xy(A)

separate polygon exterior coordinates to x and y

plot_poly(geom[, ax, color, linestyle, ...])

plot the wind plant boundaries

plot_turbines(x, y, r[, ax, color, nums])

plot wind turbine locations

plot_windrose(wind_directions, wind_speeds, ...)

plot windrose

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.plotting_functions.plot_poly.html b/_autosummary/reV.bespoke.plotting_functions.plot_poly.html new file mode 100644 index 000000000..9d07712e8 --- /dev/null +++ b/_autosummary/reV.bespoke.plotting_functions.plot_poly.html @@ -0,0 +1,650 @@ + + + + + + + reV.bespoke.plotting_functions.plot_poly — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.plotting_functions.plot_poly

+
+
+plot_poly(geom, ax=None, color='black', linestyle='--', linewidth=0.5)[source]
+

plot the wind plant boundaries

+
+
Parameters:
+
    +
  • geom (Polygon | MultiPolygon) – The shapely.Polygon or shapely.MultiPolygon that define the wind +plant boundary(ies).

  • +
  • ax (matplotlib.pyplot.axes, optional) – The figure axes on which the wind rose is plotted. +Defaults to None.

  • +
  • color (string, optional) – The color for the wind plant boundaries

  • +
  • linestyle (string, optional) – Style to plot the boundary lines

  • +
  • linewidth (float, optional) – The width of the boundary lines

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.plotting_functions.plot_turbines.html b/_autosummary/reV.bespoke.plotting_functions.plot_turbines.html new file mode 100644 index 000000000..eda7a1351 --- /dev/null +++ b/_autosummary/reV.bespoke.plotting_functions.plot_turbines.html @@ -0,0 +1,649 @@ + + + + + + + reV.bespoke.plotting_functions.plot_turbines — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.plotting_functions.plot_turbines

+
+
+plot_turbines(x, y, r, ax=None, color='C0', nums=False)[source]
+

plot wind turbine locations

+
+
Parameters:
+
    +
  • x, y (array) – Wind turbine x and y locations

  • +
  • r (float) – Wind turbine radius

  • +
  • ax :py:class:`matplotlib.pyplot.axes`, optional – The figure axes on which the wind rose is plotted. +Defaults to None.

  • +
  • color (string, optional) – The color for the wind plant boundaries

  • +
  • nums (bool, optional) – Option to show the turbine numbers next to each turbine

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.bespoke.plotting_functions.plot_windrose.html b/_autosummary/reV.bespoke.plotting_functions.plot_windrose.html new file mode 100644 index 000000000..88426e2f5 --- /dev/null +++ b/_autosummary/reV.bespoke.plotting_functions.plot_windrose.html @@ -0,0 +1,649 @@ + + + + + + + reV.bespoke.plotting_functions.plot_windrose — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.bespoke.plotting_functions.plot_windrose

+
+
+plot_windrose(wind_directions, wind_speeds, wind_frequencies, ax=None, colors=None)[source]
+

plot windrose

+
+
Parameters:
+
    +
  • wind_directions (1D array) – Wind direction samples

  • +
  • wind_speeds (1D array) – Wind speed samples

  • +
  • wind_frequencies (2D array) – Frequency of wind direction and speed samples

  • +
  • ax :py:class:`matplotlib.pyplot.axes`, optional – The figure axes on which the wind rose is plotted. +Defaults to None.

  • +
  • color (array, optional) – The color for the different wind speed bins

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.cli.html b/_autosummary/reV.cli.html new file mode 100644 index 000000000..a310c0b4c --- /dev/null +++ b/_autosummary/reV.cli.html @@ -0,0 +1,630 @@ + + + + + + + reV.cli — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.cli

+

reV command line interface (CLI).

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.base_analysis_config.AnalysisConfig.html b/_autosummary/reV.config.base_analysis_config.AnalysisConfig.html new file mode 100644 index 000000000..28bdd6150 --- /dev/null +++ b/_autosummary/reV.config.base_analysis_config.AnalysisConfig.html @@ -0,0 +1,970 @@ + + + + + + + reV.config.base_analysis_config.AnalysisConfig — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.base_analysis_config.AnalysisConfig

+
+
+class AnalysisConfig(config, run_preflight=True, check_keys=True)[source]
+

Bases: BaseConfig

+

Base analysis config (generation, lcoe, etc…).

+
+
Parameters:
+
    +
  • config (str | dict) – File path to config json (str), serialized json object (str), +or dictionary with pre-extracted config.

  • +
  • run_preflight (bool, optional) – Flag to run or disable preflight checks, by default True

  • +
  • check_keys (bool, optional) – Flag to check config keys against Class properties, by default True

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_files(flist)

Make sure all files in the input file list exist.

check_overwrite_keys(primary_key, ...)

Check for overwrite keys and raise a ConfigError if present

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

items()

keys()

pop(k[,d])

If key is not found, d is returned if given, otherwise KeyError is raised

popitem()

Remove and return a (key, value) pair as a 2-tuple.

resolve_path(path)

Resolve a file path represented by the input string.

set_self_dict(dictlike)

Save a dict-like variable as object instance dictionary items.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

str_replace_and_resolve(d, str_rep)

Perform a deep string replacement and path resolve in d.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

NAME

REQUIREMENTS

Required keys for config

STR_REP

Mapping of config inputs (keys) to desired replacements (values) in addition to relative file paths as demarcated by ./ and ../

analysis_years

Get the analysis years.

config_dir

Get the directory that the config file is in.

config_keys

List of valid config keys

execution_control

Get the execution control object.

log_directory

Get the logging directory, look for key "log_directory" in the config.

log_level

Get user-specified "log_level" (DEBUG, INFO, WARNING, etc...).

name

Get the job name, defaults to the output directory name.

+
+
+property analysis_years
+

Get the analysis years.

+
+
Returns:
+

analysis_years (list) – List of years to analyze. If this is a single year run, this return +value is a single entry list. If no analysis_years are specified, +the code will look anticipate a year in the input files.

+
+
+
+ +
+
+property log_directory
+

Get the logging directory, look for key “log_directory” in the +config. +:returns: log_directory (str) – Target path for reV log files.

+
+ +
+
+property execution_control
+

Get the execution control object.

+
+
Returns:
+

_ec (BaseExecutionConfig | EagleConfig) – reV execution config object specific to the execution_control +option.

+
+
+
+ +
+
+property name
+

Get the job name, defaults to the output directory name. +:returns: _name (str) – reV job name.

+
+ +
+
+REQUIREMENTS = ()
+

Required keys for config

+
+ +
+
+STR_REP = {'REVDIR': '/home/runner/work/reV/reV/reV', 'TESTDATADIR': '/home/runner/work/reV/reV/tests/data'}
+

Mapping of config inputs (keys) to desired replacements (values) in +addition to relative file paths as demarcated by ./ and ../

+
+ +
+
+static check_files(flist)
+

Make sure all files in the input file list exist.

+
+
Parameters:
+

flist (list) – List of files (with paths) to check existance of.

+
+
+
+ +
+
+check_overwrite_keys(primary_key, *overwrite_keys)
+

Check for overwrite keys and raise a ConfigError if present

+
+
Parameters:
+
    +
  • primary_key (str) – Primary key that overwrites overwrite_keys, used for error message

  • +
  • overwrite_keys (str) – Key(s) to overwrite

  • +
+
+
+
+ +
+
+clear() None.  Remove all items from D.
+
+ +
+
+property config_dir
+

Get the directory that the config file is in.

+
+
Returns:
+

config_dir (str) – Directory path that the config file is in.

+
+
+
+ +
+
+property config_keys
+

List of valid config keys

+
+
Returns:
+

list

+
+
+
+ +
+
+copy() a shallow copy of D
+
+ +
+
+fromkeys(value=None, /)
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items
+
+ +
+
+keys() a set-like object providing a view on D's keys
+
+ +
+
+property log_level
+

Get user-specified “log_level” (DEBUG, INFO, WARNING, etc…).

+
+
Returns:
+

log_level (int) – Python logging module level (integer format) corresponding to the +config-specified log level string.

+
+
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.
+

If key is not found, d is returned if given, otherwise KeyError is raised

+
+ +
+
+popitem()
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+resolve_path(path)
+

Resolve a file path represented by the input string.

+

This function resolves the input string if it resembles a path. +Specifically, the string will be resolved if it starts with +“./” or “..”, or it if it contains either “./” or +“..” somewhere in the string body. Otherwise, the string +is returned unchanged, so this function is safe to call on any +string, even ones that do not resemble a path.

+

This method delegates the “resolving” logic to +pathlib.Path.resolve(). This means the path is made +absolute, symlinks are resolved, and “..” components are +eliminated. If the path input starts with “./” or +“..”, it is assumed to be w.r.t the config directory, not +the run directory.

+
+
Parameters:
+

path (str) – Input file path.

+
+
Returns:
+

str – The resolved path.

+
+
+
+ +
+
+set_self_dict(dictlike)
+

Save a dict-like variable as object instance dictionary items.

+
+
Parameters:
+

dictlike (dict) – Python namespace object to set to this dictionary-emulating class.

+
+
+
+ +
+
+setdefault(key, default=None, /)
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+str_replace_and_resolve(d, str_rep)
+

Perform a deep string replacement and path resolve in d.

+
+
Parameters:
+
    +
  • d (dict) – Config dictionary potentially containing strings to replace +and/or paths to resolve.

  • +
  • str_rep (dict) – Replacement mapping where keys are strings to search for and +values are the new values.

  • +
+
+
Returns:
+

d (dict) – Config dictionary with updated strings.

+
+
+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.base_analysis_config.html b/_autosummary/reV.config.base_analysis_config.html new file mode 100644 index 000000000..4492cd20f --- /dev/null +++ b/_autosummary/reV.config.base_analysis_config.html @@ -0,0 +1,639 @@ + + + + + + + reV.config.base_analysis_config — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.base_analysis_config

+

reV Base analysis Configuration Frameworks

+

Classes

+ + + + + + +

AnalysisConfig(config[, run_preflight, ...])

Base analysis config (generation, lcoe, etc...).

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.base_config.BaseConfig.html b/_autosummary/reV.config.base_config.BaseConfig.html new file mode 100644 index 000000000..2dd4f7e92 --- /dev/null +++ b/_autosummary/reV.config.base_config.BaseConfig.html @@ -0,0 +1,929 @@ + + + + + + + reV.config.base_config.BaseConfig — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.base_config.BaseConfig

+
+
+class BaseConfig(config, check_keys=True, perform_str_rep=True)[source]
+

Bases: dict

+

Base class for configuration frameworks.

+
+
Parameters:
+
    +
  • config (str | dict) – File path to config json (str), serialized json object (str), +or dictionary with pre-extracted config.

  • +
  • check_keys (bool, optional) – Flag to check config keys against Class properties, by default True

  • +
  • perform_str_rep (bool) – Flag to perform string replacement for REVDIR, TESTDATADIR, and ./

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_files(flist)

Make sure all files in the input file list exist.

check_overwrite_keys(primary_key, ...)

Check for overwrite keys and raise a ConfigError if present

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

items()

keys()

pop(k[,d])

If key is not found, d is returned if given, otherwise KeyError is raised

popitem()

Remove and return a (key, value) pair as a 2-tuple.

resolve_path(path)

Resolve a file path represented by the input string.

set_self_dict(dictlike)

Save a dict-like variable as object instance dictionary items.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

str_replace_and_resolve(d, str_rep)

Perform a deep string replacement and path resolve in d.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + +

REQUIREMENTS

Required keys for config

STR_REP

Mapping of config inputs (keys) to desired replacements (values) in addition to relative file paths as demarcated by ./ and ../

config_dir

Get the directory that the config file is in.

config_keys

List of valid config keys

log_level

Get user-specified "log_level" (DEBUG, INFO, WARNING, etc...).

name

Get the job name, defaults to 'rev'.

+
+
+REQUIREMENTS = ()
+

Required keys for config

+
+ +
+
+STR_REP = {'REVDIR': '/home/runner/work/reV/reV/reV', 'TESTDATADIR': '/home/runner/work/reV/reV/tests/data'}
+

Mapping of config inputs (keys) to desired replacements (values) in +addition to relative file paths as demarcated by ./ and ../

+
+ +
+
+property config_dir
+

Get the directory that the config file is in.

+
+
Returns:
+

config_dir (str) – Directory path that the config file is in.

+
+
+
+ +
+
+property config_keys
+

List of valid config keys

+
+
Returns:
+

list

+
+
+
+ +
+
+property log_level
+

Get user-specified “log_level” (DEBUG, INFO, WARNING, etc…).

+
+
Returns:
+

log_level (int) – Python logging module level (integer format) corresponding to the +config-specified log level string.

+
+
+
+ +
+
+property name
+

Get the job name, defaults to ‘rev’.

+
+
Returns:
+

name (str) – reV job name.

+
+
+
+ +
+
+check_overwrite_keys(primary_key, *overwrite_keys)[source]
+

Check for overwrite keys and raise a ConfigError if present

+
+
Parameters:
+
    +
  • primary_key (str) – Primary key that overwrites overwrite_keys, used for error message

  • +
  • overwrite_keys (str) – Key(s) to overwrite

  • +
+
+
+
+ +
+
+static check_files(flist)[source]
+

Make sure all files in the input file list exist.

+
+
Parameters:
+

flist (list) – List of files (with paths) to check existance of.

+
+
+
+ +
+
+str_replace_and_resolve(d, str_rep)[source]
+

Perform a deep string replacement and path resolve in d.

+
+
Parameters:
+
    +
  • d (dict) – Config dictionary potentially containing strings to replace +and/or paths to resolve.

  • +
  • str_rep (dict) – Replacement mapping where keys are strings to search for and +values are the new values.

  • +
+
+
Returns:
+

d (dict) – Config dictionary with updated strings.

+
+
+
+ +
+
+clear() None.  Remove all items from D.
+
+ +
+
+copy() a shallow copy of D
+
+ +
+
+fromkeys(value=None, /)
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items
+
+ +
+
+keys() a set-like object providing a view on D's keys
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.
+

If key is not found, d is returned if given, otherwise KeyError is raised

+
+ +
+
+popitem()
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+set_self_dict(dictlike)[source]
+

Save a dict-like variable as object instance dictionary items.

+
+
Parameters:
+

dictlike (dict) – Python namespace object to set to this dictionary-emulating class.

+
+
+
+ +
+
+setdefault(key, default=None, /)
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values
+
+ +
+
+resolve_path(path)[source]
+

Resolve a file path represented by the input string.

+

This function resolves the input string if it resembles a path. +Specifically, the string will be resolved if it starts with +“./” or “..”, or it if it contains either “./” or +“..” somewhere in the string body. Otherwise, the string +is returned unchanged, so this function is safe to call on any +string, even ones that do not resemble a path.

+

This method delegates the “resolving” logic to +pathlib.Path.resolve(). This means the path is made +absolute, symlinks are resolved, and “..” components are +eliminated. If the path input starts with “./” or +“..”, it is assumed to be w.r.t the config directory, not +the run directory.

+
+
Parameters:
+

path (str) – Input file path.

+
+
Returns:
+

str – The resolved path.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.base_config.html b/_autosummary/reV.config.base_config.html new file mode 100644 index 000000000..4df7e3ca8 --- /dev/null +++ b/_autosummary/reV.config.base_config.html @@ -0,0 +1,639 @@ + + + + + + + reV.config.base_config — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.base_config

+

reV Base Configuration Framework

+

Classes

+ + + + + + +

BaseConfig(config[, check_keys, perform_str_rep])

Base class for configuration frameworks.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.cli_project_points.html b/_autosummary/reV.config.cli_project_points.html new file mode 100644 index 000000000..a69cc2e50 --- /dev/null +++ b/_autosummary/reV.config.cli_project_points.html @@ -0,0 +1,631 @@ + + + + + + + reV.config.cli_project_points — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.cli_project_points

+

Project Points CLI

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.curtailment.Curtailment.html b/_autosummary/reV.config.curtailment.Curtailment.html new file mode 100644 index 000000000..cf845c039 --- /dev/null +++ b/_autosummary/reV.config.curtailment.Curtailment.html @@ -0,0 +1,1066 @@ + + + + + + + reV.config.curtailment.Curtailment — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.curtailment.Curtailment

+
+
+class Curtailment(curtailment_parameters)[source]
+

Bases: BaseConfig

+

Config for generation curtailment.

+
+
Parameters:
+

curtailment_parameters (str | dict) – Configuration json file (with path) containing curtailment +information. Could also be a pre-extracted curtailment config +dictionary (the contents of the curtailment json).

+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_files(flist)

Make sure all files in the input file list exist.

check_overwrite_keys(primary_key, ...)

Check for overwrite keys and raise a ConfigError if present

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

items()

keys()

pop(k[,d])

If key is not found, d is returned if given, otherwise KeyError is raised

popitem()

Remove and return a (key, value) pair as a 2-tuple.

resolve_path(path)

Resolve a file path represented by the input string.

set_self_dict(dictlike)

Save a dict-like variable as object instance dictionary items.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

str_replace_and_resolve(d, str_rep)

Perform a deep string replacement and path resolve in d.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

REQUIREMENTS

Required keys for config

STR_REP

Mapping of config inputs (keys) to desired replacements (values) in addition to relative file paths as demarcated by ./ and ../

config_dir

Get the directory that the config file is in.

config_keys

List of valid config keys

date_range

Get the date range tuple (start, end) over which curtailment is possible (inclusive, exclusive) ("MMDD", "MMDD").

dawn_dusk

Get the solar zenith angle that signifies dawn and dusk.

equation

Get an equation-based curtailment scenario.

log_level

Get user-specified "log_level" (DEBUG, INFO, WARNING, etc...).

months

Get the months during which curtailment is possible (inclusive).

name

Get the job name, defaults to 'rev'.

precipitation

Get the precip rate (mm/hour) under which curtailment is possible.

probability

Get the probability that curtailment is in-effect if all other screening criteria are met.

random_seed

Random seed to use for curtailment probability

temperature

Get the temperature (C) over which curtailment is possible.

wind_speed

Get the wind speed threshold below which curtailment is possible.

+
+
+property wind_speed
+

Get the wind speed threshold below which curtailment is possible.

+
+
Returns:
+

_wind_speed (float | None) – Wind speed threshold below which curtailment is possible.

+
+
+
+ +
+
+property dawn_dusk
+

Get the solar zenith angle that signifies dawn and dusk.

+
+
Returns:
+

_dawn_dusk (float) – Solar zenith angle at dawn and dusk. Default is nautical, 12 +degrees below the horizon (sza=102).

+
+
+
+ +
+
+property months
+

Get the months during which curtailment is possible (inclusive). +This can be overridden by the date_range input.

+
+
Returns:
+

months (tuple | None) – Tuple of month integers. These are the months during which +curtailment could be in effect. Default is None.

+
+
+
+ +
+
+property date_range
+

Get the date range tuple (start, end) over which curtailment is +possible (inclusive, exclusive) (“MMDD”, “MMDD”). This overrides the +months input.

+
+
Returns:
+

date_range (tuple) – Two-entry tuple of the starting date (inclusive) and ending date +(exclusive) over which curtailment is possible. Input format is a +zero-padded string: “MMDD”.

+
+
+
+ +
+
+property temperature
+

Get the temperature (C) over which curtailment is possible.

+
+
Returns:
+

temperature (float | NoneType) – Temperature over which curtailment is possible. Defaults to None.

+
+
+
+ +
+
+property precipitation
+

Get the precip rate (mm/hour) under which curtailment is possible.

+
+
Returns:
+

precipitation (float | NoneType) – Precipitation rate under which curtailment is possible. This is +compared to the WTK resource dataset “precipitationrate_0m” in +mm/hour. Defaults to None.

+
+
+
+ +
+
+property equation
+

Get an equation-based curtailment scenario.

+
+
Returns:
+

equation (str) – A python equation based on other curtailment variables (wind_speed, +temperature, precipitation_rate, solar_zenith_angle) that returns +a True or False output to signal curtailment.

+
+
+
+ +
+
+property probability
+

Get the probability that curtailment is in-effect if all other +screening criteria are met.

+
+
Returns:
+

probability (float) – Fractional probability that curtailment is in-effect if all other +screening criteria are met. Defaults to 1 (curtailment is always +in effect if all other criteria are met).

+
+
+
+ +
+
+property random_seed
+

Random seed to use for curtailment probability

+
+
Returns:
+

int

+
+
+
+ +
+
+REQUIREMENTS = ()
+

Required keys for config

+
+ +
+
+STR_REP = {'REVDIR': '/home/runner/work/reV/reV/reV', 'TESTDATADIR': '/home/runner/work/reV/reV/tests/data'}
+

Mapping of config inputs (keys) to desired replacements (values) in +addition to relative file paths as demarcated by ./ and ../

+
+ +
+
+static check_files(flist)
+

Make sure all files in the input file list exist.

+
+
Parameters:
+

flist (list) – List of files (with paths) to check existance of.

+
+
+
+ +
+
+check_overwrite_keys(primary_key, *overwrite_keys)
+

Check for overwrite keys and raise a ConfigError if present

+
+
Parameters:
+
    +
  • primary_key (str) – Primary key that overwrites overwrite_keys, used for error message

  • +
  • overwrite_keys (str) – Key(s) to overwrite

  • +
+
+
+
+ +
+
+clear() None.  Remove all items from D.
+
+ +
+
+property config_dir
+

Get the directory that the config file is in.

+
+
Returns:
+

config_dir (str) – Directory path that the config file is in.

+
+
+
+ +
+
+property config_keys
+

List of valid config keys

+
+
Returns:
+

list

+
+
+
+ +
+
+copy() a shallow copy of D
+
+ +
+
+fromkeys(value=None, /)
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items
+
+ +
+
+keys() a set-like object providing a view on D's keys
+
+ +
+
+property log_level
+

Get user-specified “log_level” (DEBUG, INFO, WARNING, etc…).

+
+
Returns:
+

log_level (int) – Python logging module level (integer format) corresponding to the +config-specified log level string.

+
+
+
+ +
+
+property name
+

Get the job name, defaults to ‘rev’.

+
+
Returns:
+

name (str) – reV job name.

+
+
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.
+

If key is not found, d is returned if given, otherwise KeyError is raised

+
+ +
+
+popitem()
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+resolve_path(path)
+

Resolve a file path represented by the input string.

+

This function resolves the input string if it resembles a path. +Specifically, the string will be resolved if it starts with +“./” or “..”, or it if it contains either “./” or +“..” somewhere in the string body. Otherwise, the string +is returned unchanged, so this function is safe to call on any +string, even ones that do not resemble a path.

+

This method delegates the “resolving” logic to +pathlib.Path.resolve(). This means the path is made +absolute, symlinks are resolved, and “..” components are +eliminated. If the path input starts with “./” or +“..”, it is assumed to be w.r.t the config directory, not +the run directory.

+
+
Parameters:
+

path (str) – Input file path.

+
+
Returns:
+

str – The resolved path.

+
+
+
+ +
+
+set_self_dict(dictlike)
+

Save a dict-like variable as object instance dictionary items.

+
+
Parameters:
+

dictlike (dict) – Python namespace object to set to this dictionary-emulating class.

+
+
+
+ +
+
+setdefault(key, default=None, /)
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+str_replace_and_resolve(d, str_rep)
+

Perform a deep string replacement and path resolve in d.

+
+
Parameters:
+
    +
  • d (dict) – Config dictionary potentially containing strings to replace +and/or paths to resolve.

  • +
  • str_rep (dict) – Replacement mapping where keys are strings to search for and +values are the new values.

  • +
+
+
Returns:
+

d (dict) – Config dictionary with updated strings.

+
+
+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.curtailment.html b/_autosummary/reV.config.curtailment.html new file mode 100644 index 000000000..cd476cf86 --- /dev/null +++ b/_autosummary/reV.config.curtailment.html @@ -0,0 +1,641 @@ + + + + + + + reV.config.curtailment — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.curtailment

+

reV config for curtailment inputs.

+

Created on Mon Jan 28 11:43:27 2019

+

@author: gbuster

+

Classes

+ + + + + + +

Curtailment(curtailment_parameters)

Config for generation curtailment.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.execution.BaseExecutionConfig.html b/_autosummary/reV.config.execution.BaseExecutionConfig.html new file mode 100644 index 000000000..57d75bf4d --- /dev/null +++ b/_autosummary/reV.config.execution.BaseExecutionConfig.html @@ -0,0 +1,1012 @@ + + + + + + + reV.config.execution.BaseExecutionConfig — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.execution.BaseExecutionConfig

+
+
+class BaseExecutionConfig(config_dict)[source]
+

Bases: BaseConfig

+

Base class to handle execution configuration

+
+
Parameters:
+

config (str | dict) – File path to config json (str), serialized json object (str), +or dictionary with pre-extracted config.

+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_files(flist)

Make sure all files in the input file list exist.

check_overwrite_keys(primary_key, ...)

Check for overwrite keys and raise a ConfigError if present

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

items()

keys()

pop(k[,d])

If key is not found, d is returned if given, otherwise KeyError is raised

popitem()

Remove and return a (key, value) pair as a 2-tuple.

resolve_path(path)

Resolve a file path represented by the input string.

set_self_dict(dictlike)

Save a dict-like variable as object instance dictionary items.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

str_replace_and_resolve(d, str_rep)

Perform a deep string replacement and path resolve in d.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

REQUIREMENTS

Required keys for config

STR_REP

Mapping of config inputs (keys) to desired replacements (values) in addition to relative file paths as demarcated by ./ and ../

config_dir

Get the directory that the config file is in.

config_keys

List of valid config keys

log_level

Get user-specified "log_level" (DEBUG, INFO, WARNING, etc...).

max_workers

Get the max_workers property (1 runs in serial, None is all workers)

memory_utilization_limit

Get the node memory utilization limit property.

name

Get the job name, defaults to 'rev'.

nodes

Get the number of nodes property.

option

Get the hardware run option.

sh_script

Get the "sh_script" entry which is a string that contains extra shell script commands to run before the reV commands.

sites_per_worker

Get the number of sites to run per worker.

+
+
+property option
+

Get the hardware run option.

+
+
Returns:
+

option (str) – Execution control option, e.g. local, peregrine, eagle…

+
+
+
+ +
+
+property nodes
+

Get the number of nodes property.

+
+
Returns:
+

nodes (int) – Number of available nodes. Default is 1 node.

+
+
+
+ +
+
+property max_workers
+

Get the max_workers property (1 runs in serial, None is all workers)

+
+
Returns:
+

max_workers (int | None) – Processes per node. Default is None max_workers (all available).

+
+
+
+ +
+
+property sites_per_worker
+

Get the number of sites to run per worker.

+
+
Returns:
+

sites_per_worker (int | None) – Number of sites to run per worker in a parallel scheme.

+
+
+
+ +
+
+property memory_utilization_limit
+

Get the node memory utilization limit property. Key in the config +json is “memory_utilization_limit”.

+
+
Returns:
+

mem_util_lim (float) – Memory utilization limit (fractional). Key in the config json is +“memory_utilization_limit”.

+
+
+
+ +
+
+property sh_script
+

Get the “sh_script” entry which is a string that contains extra +shell script commands to run before the reV commands.

+
+
Returns:
+

str

+
+
+
+ +
+
+REQUIREMENTS = ()
+

Required keys for config

+
+ +
+
+STR_REP = {'REVDIR': '/home/runner/work/reV/reV/reV', 'TESTDATADIR': '/home/runner/work/reV/reV/tests/data'}
+

Mapping of config inputs (keys) to desired replacements (values) in +addition to relative file paths as demarcated by ./ and ../

+
+ +
+
+static check_files(flist)
+

Make sure all files in the input file list exist.

+
+
Parameters:
+

flist (list) – List of files (with paths) to check existance of.

+
+
+
+ +
+
+check_overwrite_keys(primary_key, *overwrite_keys)
+

Check for overwrite keys and raise a ConfigError if present

+
+
Parameters:
+
    +
  • primary_key (str) – Primary key that overwrites overwrite_keys, used for error message

  • +
  • overwrite_keys (str) – Key(s) to overwrite

  • +
+
+
+
+ +
+
+clear() None.  Remove all items from D.
+
+ +
+
+property config_dir
+

Get the directory that the config file is in.

+
+
Returns:
+

config_dir (str) – Directory path that the config file is in.

+
+
+
+ +
+
+property config_keys
+

List of valid config keys

+
+
Returns:
+

list

+
+
+
+ +
+
+copy() a shallow copy of D
+
+ +
+
+fromkeys(value=None, /)
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items
+
+ +
+
+keys() a set-like object providing a view on D's keys
+
+ +
+
+property log_level
+

Get user-specified “log_level” (DEBUG, INFO, WARNING, etc…).

+
+
Returns:
+

log_level (int) – Python logging module level (integer format) corresponding to the +config-specified log level string.

+
+
+
+ +
+
+property name
+

Get the job name, defaults to ‘rev’.

+
+
Returns:
+

name (str) – reV job name.

+
+
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.
+

If key is not found, d is returned if given, otherwise KeyError is raised

+
+ +
+
+popitem()
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+resolve_path(path)
+

Resolve a file path represented by the input string.

+

This function resolves the input string if it resembles a path. +Specifically, the string will be resolved if it starts with +“./” or “..”, or it if it contains either “./” or +“..” somewhere in the string body. Otherwise, the string +is returned unchanged, so this function is safe to call on any +string, even ones that do not resemble a path.

+

This method delegates the “resolving” logic to +pathlib.Path.resolve(). This means the path is made +absolute, symlinks are resolved, and “..” components are +eliminated. If the path input starts with “./” or +“..”, it is assumed to be w.r.t the config directory, not +the run directory.

+
+
Parameters:
+

path (str) – Input file path.

+
+
Returns:
+

str – The resolved path.

+
+
+
+ +
+
+set_self_dict(dictlike)
+

Save a dict-like variable as object instance dictionary items.

+
+
Parameters:
+

dictlike (dict) – Python namespace object to set to this dictionary-emulating class.

+
+
+
+ +
+
+setdefault(key, default=None, /)
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+str_replace_and_resolve(d, str_rep)
+

Perform a deep string replacement and path resolve in d.

+
+
Parameters:
+
    +
  • d (dict) – Config dictionary potentially containing strings to replace +and/or paths to resolve.

  • +
  • str_rep (dict) – Replacement mapping where keys are strings to search for and +values are the new values.

  • +
+
+
Returns:
+

d (dict) – Config dictionary with updated strings.

+
+
+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.execution.HPCConfig.html b/_autosummary/reV.config.execution.HPCConfig.html new file mode 100644 index 000000000..aab1b4986 --- /dev/null +++ b/_autosummary/reV.config.execution.HPCConfig.html @@ -0,0 +1,1070 @@ + + + + + + + reV.config.execution.HPCConfig — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.execution.HPCConfig

+
+
+class HPCConfig(config_dict)[source]
+

Bases: BaseExecutionConfig

+

Class to handle HPC configuration inputs.

+
+
Parameters:
+

config (str | dict) – File path to config json (str), serialized json object (str), +or dictionary with pre-extracted config.

+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_files(flist)

Make sure all files in the input file list exist.

check_overwrite_keys(primary_key, ...)

Check for overwrite keys and raise a ConfigError if present

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

items()

keys()

pop(k[,d])

If key is not found, d is returned if given, otherwise KeyError is raised

popitem()

Remove and return a (key, value) pair as a 2-tuple.

resolve_path(path)

Resolve a file path represented by the input string.

set_self_dict(dictlike)

Save a dict-like variable as object instance dictionary items.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

str_replace_and_resolve(d, str_rep)

Perform a deep string replacement and path resolve in d.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

REQUIREMENTS

Required keys for config

STR_REP

Mapping of config inputs (keys) to desired replacements (values) in addition to relative file paths as demarcated by ./ and ../

allocation

Get the HPC allocation property.

conda_env

Get conda environment to activate

config_dir

Get the directory that the config file is in.

config_keys

List of valid config keys

feature

Get feature request str.

log_level

Get user-specified "log_level" (DEBUG, INFO, WARNING, etc...).

max_workers

Get the max_workers property (1 runs in serial, None is all workers)

memory_utilization_limit

Get the node memory utilization limit property.

module

Get module to load if given

name

Get the job name, defaults to 'rev'.

nodes

Get the number of nodes property.

option

Get the hardware run option.

sh_script

Get the "sh_script" entry which is a string that contains extra shell script commands to run before the reV commands.

sites_per_worker

Get the number of sites to run per worker.

+
+
+property allocation
+

Get the HPC allocation property.

+
+
Returns:
+

hpc_alloc (str) – Name of the HPC allocation account for the specified job.

+
+
+
+ +
+
+property feature
+

Get feature request str.

+
+
Returns:
+

feature (str | NoneType) – Feature request string. For EAGLE, a full additional flag. +Config should look like: +"feature": "--depend=[state:job_id]"

+
+
+
+ +
+
+property module
+

Get module to load if given

+
+
Returns:
+

module (str) – Module to load on node

+
+
+
+ +
+
+property conda_env
+

Get conda environment to activate

+
+
Returns:
+

conda_env (str) – Conda environment to activate

+
+
+
+ +
+
+REQUIREMENTS = ()
+

Required keys for config

+
+ +
+
+STR_REP = {'REVDIR': '/home/runner/work/reV/reV/reV', 'TESTDATADIR': '/home/runner/work/reV/reV/tests/data'}
+

Mapping of config inputs (keys) to desired replacements (values) in +addition to relative file paths as demarcated by ./ and ../

+
+ +
+
+static check_files(flist)
+

Make sure all files in the input file list exist.

+
+
Parameters:
+

flist (list) – List of files (with paths) to check existance of.

+
+
+
+ +
+
+check_overwrite_keys(primary_key, *overwrite_keys)
+

Check for overwrite keys and raise a ConfigError if present

+
+
Parameters:
+
    +
  • primary_key (str) – Primary key that overwrites overwrite_keys, used for error message

  • +
  • overwrite_keys (str) – Key(s) to overwrite

  • +
+
+
+
+ +
+
+clear() None.  Remove all items from D.
+
+ +
+
+property config_dir
+

Get the directory that the config file is in.

+
+
Returns:
+

config_dir (str) – Directory path that the config file is in.

+
+
+
+ +
+
+property config_keys
+

List of valid config keys

+
+
Returns:
+

list

+
+
+
+ +
+
+copy() a shallow copy of D
+
+ +
+
+fromkeys(value=None, /)
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items
+
+ +
+
+keys() a set-like object providing a view on D's keys
+
+ +
+
+property log_level
+

Get user-specified “log_level” (DEBUG, INFO, WARNING, etc…).

+
+
Returns:
+

log_level (int) – Python logging module level (integer format) corresponding to the +config-specified log level string.

+
+
+
+ +
+
+property max_workers
+

Get the max_workers property (1 runs in serial, None is all workers)

+
+
Returns:
+

max_workers (int | None) – Processes per node. Default is None max_workers (all available).

+
+
+
+ +
+
+property memory_utilization_limit
+

Get the node memory utilization limit property. Key in the config +json is “memory_utilization_limit”.

+
+
Returns:
+

mem_util_lim (float) – Memory utilization limit (fractional). Key in the config json is +“memory_utilization_limit”.

+
+
+
+ +
+
+property name
+

Get the job name, defaults to ‘rev’.

+
+
Returns:
+

name (str) – reV job name.

+
+
+
+ +
+
+property nodes
+

Get the number of nodes property.

+
+
Returns:
+

nodes (int) – Number of available nodes. Default is 1 node.

+
+
+
+ +
+
+property option
+

Get the hardware run option.

+
+
Returns:
+

option (str) – Execution control option, e.g. local, peregrine, eagle…

+
+
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.
+

If key is not found, d is returned if given, otherwise KeyError is raised

+
+ +
+
+popitem()
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+resolve_path(path)
+

Resolve a file path represented by the input string.

+

This function resolves the input string if it resembles a path. +Specifically, the string will be resolved if it starts with +“./” or “..”, or it if it contains either “./” or +“..” somewhere in the string body. Otherwise, the string +is returned unchanged, so this function is safe to call on any +string, even ones that do not resemble a path.

+

This method delegates the “resolving” logic to +pathlib.Path.resolve(). This means the path is made +absolute, symlinks are resolved, and “..” components are +eliminated. If the path input starts with “./” or +“..”, it is assumed to be w.r.t the config directory, not +the run directory.

+
+
Parameters:
+

path (str) – Input file path.

+
+
Returns:
+

str – The resolved path.

+
+
+
+ +
+
+set_self_dict(dictlike)
+

Save a dict-like variable as object instance dictionary items.

+
+
Parameters:
+

dictlike (dict) – Python namespace object to set to this dictionary-emulating class.

+
+
+
+ +
+
+setdefault(key, default=None, /)
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+property sh_script
+

Get the “sh_script” entry which is a string that contains extra +shell script commands to run before the reV commands.

+
+
Returns:
+

str

+
+
+
+ +
+
+property sites_per_worker
+

Get the number of sites to run per worker.

+
+
Returns:
+

sites_per_worker (int | None) – Number of sites to run per worker in a parallel scheme.

+
+
+
+ +
+
+str_replace_and_resolve(d, str_rep)
+

Perform a deep string replacement and path resolve in d.

+
+
Parameters:
+
    +
  • d (dict) – Config dictionary potentially containing strings to replace +and/or paths to resolve.

  • +
  • str_rep (dict) – Replacement mapping where keys are strings to search for and +values are the new values.

  • +
+
+
Returns:
+

d (dict) – Config dictionary with updated strings.

+
+
+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.execution.SlurmConfig.html b/_autosummary/reV.config.execution.SlurmConfig.html new file mode 100644 index 000000000..9a205cc64 --- /dev/null +++ b/_autosummary/reV.config.execution.SlurmConfig.html @@ -0,0 +1,1098 @@ + + + + + + + reV.config.execution.SlurmConfig — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.execution.SlurmConfig

+
+
+class SlurmConfig(config_dict)[source]
+

Bases: HPCConfig

+

Class to handle SLURM (Eagle) configuration inputs.

+
+
Parameters:
+

config (str | dict) – File path to config json (str), serialized json object (str), +or dictionary with pre-extracted config.

+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_files(flist)

Make sure all files in the input file list exist.

check_overwrite_keys(primary_key, ...)

Check for overwrite keys and raise a ConfigError if present

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

items()

keys()

pop(k[,d])

If key is not found, d is returned if given, otherwise KeyError is raised

popitem()

Remove and return a (key, value) pair as a 2-tuple.

resolve_path(path)

Resolve a file path represented by the input string.

set_self_dict(dictlike)

Save a dict-like variable as object instance dictionary items.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

str_replace_and_resolve(d, str_rep)

Perform a deep string replacement and path resolve in d.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

REQUIREMENTS

Required keys for config

STR_REP

Mapping of config inputs (keys) to desired replacements (values) in addition to relative file paths as demarcated by ./ and ../

allocation

Get the HPC allocation property.

conda_env

Get conda environment to activate

config_dir

Get the directory that the config file is in.

config_keys

List of valid config keys

feature

Get feature request str.

log_level

Get user-specified "log_level" (DEBUG, INFO, WARNING, etc...).

max_workers

Get the max_workers property (1 runs in serial, None is all workers)

memory

Get the requested Eagle node "memory" value in GB or can be None.

memory_utilization_limit

Get the node memory utilization limit property.

module

Get module to load if given

name

Get the job name, defaults to 'rev'.

nodes

Get the number of nodes property.

option

Get the hardware run option.

sh_script

Get the "sh_script" entry which is a string that contains extra shell script commands to run before the reV commands.

sites_per_worker

Get the number of sites to run per worker.

walltime

Get the requested Eagle node "walltime" value.

+
+
+property memory
+

Get the requested Eagle node “memory” value in GB or can be None.

+
+
Returns:
+

_hpc_node_mem (int | None) – Requested node memory in GB.

+
+
+
+ +
+
+property walltime
+

Get the requested Eagle node “walltime” value.

+
+
Returns:
+

_hpc_walltime (int) – Requested single node job time in hours.

+
+
+
+ +
+
+REQUIREMENTS = ()
+

Required keys for config

+
+ +
+
+STR_REP = {'REVDIR': '/home/runner/work/reV/reV/reV', 'TESTDATADIR': '/home/runner/work/reV/reV/tests/data'}
+

Mapping of config inputs (keys) to desired replacements (values) in +addition to relative file paths as demarcated by ./ and ../

+
+ +
+
+property allocation
+

Get the HPC allocation property.

+
+
Returns:
+

hpc_alloc (str) – Name of the HPC allocation account for the specified job.

+
+
+
+ +
+
+static check_files(flist)
+

Make sure all files in the input file list exist.

+
+
Parameters:
+

flist (list) – List of files (with paths) to check existance of.

+
+
+
+ +
+
+check_overwrite_keys(primary_key, *overwrite_keys)
+

Check for overwrite keys and raise a ConfigError if present

+
+
Parameters:
+
    +
  • primary_key (str) – Primary key that overwrites overwrite_keys, used for error message

  • +
  • overwrite_keys (str) – Key(s) to overwrite

  • +
+
+
+
+ +
+
+clear() None.  Remove all items from D.
+
+ +
+
+property conda_env
+

Get conda environment to activate

+
+
Returns:
+

conda_env (str) – Conda environment to activate

+
+
+
+ +
+
+property config_dir
+

Get the directory that the config file is in.

+
+
Returns:
+

config_dir (str) – Directory path that the config file is in.

+
+
+
+ +
+
+property config_keys
+

List of valid config keys

+
+
Returns:
+

list

+
+
+
+ +
+
+copy() a shallow copy of D
+
+ +
+
+property feature
+

Get feature request str.

+
+
Returns:
+

feature (str | NoneType) – Feature request string. For EAGLE, a full additional flag. +Config should look like: +"feature": "--depend=[state:job_id]"

+
+
+
+ +
+
+fromkeys(value=None, /)
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items
+
+ +
+
+keys() a set-like object providing a view on D's keys
+
+ +
+
+property log_level
+

Get user-specified “log_level” (DEBUG, INFO, WARNING, etc…).

+
+
Returns:
+

log_level (int) – Python logging module level (integer format) corresponding to the +config-specified log level string.

+
+
+
+ +
+
+property max_workers
+

Get the max_workers property (1 runs in serial, None is all workers)

+
+
Returns:
+

max_workers (int | None) – Processes per node. Default is None max_workers (all available).

+
+
+
+ +
+
+property memory_utilization_limit
+

Get the node memory utilization limit property. Key in the config +json is “memory_utilization_limit”.

+
+
Returns:
+

mem_util_lim (float) – Memory utilization limit (fractional). Key in the config json is +“memory_utilization_limit”.

+
+
+
+ +
+
+property module
+

Get module to load if given

+
+
Returns:
+

module (str) – Module to load on node

+
+
+
+ +
+
+property name
+

Get the job name, defaults to ‘rev’.

+
+
Returns:
+

name (str) – reV job name.

+
+
+
+ +
+
+property nodes
+

Get the number of nodes property.

+
+
Returns:
+

nodes (int) – Number of available nodes. Default is 1 node.

+
+
+
+ +
+
+property option
+

Get the hardware run option.

+
+
Returns:
+

option (str) – Execution control option, e.g. local, peregrine, eagle…

+
+
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.
+

If key is not found, d is returned if given, otherwise KeyError is raised

+
+ +
+
+popitem()
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+resolve_path(path)
+

Resolve a file path represented by the input string.

+

This function resolves the input string if it resembles a path. +Specifically, the string will be resolved if it starts with +“./” or “..”, or it if it contains either “./” or +“..” somewhere in the string body. Otherwise, the string +is returned unchanged, so this function is safe to call on any +string, even ones that do not resemble a path.

+

This method delegates the “resolving” logic to +pathlib.Path.resolve(). This means the path is made +absolute, symlinks are resolved, and “..” components are +eliminated. If the path input starts with “./” or +“..”, it is assumed to be w.r.t the config directory, not +the run directory.

+
+
Parameters:
+

path (str) – Input file path.

+
+
Returns:
+

str – The resolved path.

+
+
+
+ +
+
+set_self_dict(dictlike)
+

Save a dict-like variable as object instance dictionary items.

+
+
Parameters:
+

dictlike (dict) – Python namespace object to set to this dictionary-emulating class.

+
+
+
+ +
+
+setdefault(key, default=None, /)
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+property sh_script
+

Get the “sh_script” entry which is a string that contains extra +shell script commands to run before the reV commands.

+
+
Returns:
+

str

+
+
+
+ +
+
+property sites_per_worker
+

Get the number of sites to run per worker.

+
+
Returns:
+

sites_per_worker (int | None) – Number of sites to run per worker in a parallel scheme.

+
+
+
+ +
+
+str_replace_and_resolve(d, str_rep)
+

Perform a deep string replacement and path resolve in d.

+
+
Parameters:
+
    +
  • d (dict) – Config dictionary potentially containing strings to replace +and/or paths to resolve.

  • +
  • str_rep (dict) – Replacement mapping where keys are strings to search for and +values are the new values.

  • +
+
+
Returns:
+

d (dict) – Config dictionary with updated strings.

+
+
+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.execution.html b/_autosummary/reV.config.execution.html new file mode 100644 index 000000000..703d18f4f --- /dev/null +++ b/_autosummary/reV.config.execution.html @@ -0,0 +1,645 @@ + + + + + + + reV.config.execution — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.execution

+

reV Configuration for Execution Options

+

Classes

+ + + + + + + + + + + + +

BaseExecutionConfig(config_dict)

Base class to handle execution configuration

HPCConfig(config_dict)

Class to handle HPC configuration inputs.

SlurmConfig(config_dict)

Class to handle SLURM (Eagle) configuration inputs.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.html b/_autosummary/reV.config.html new file mode 100644 index 000000000..f1390d162 --- /dev/null +++ b/_autosummary/reV.config.html @@ -0,0 +1,658 @@ + + + + + + + reV.config — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config

+

reV Configuration

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

reV.config.base_analysis_config

reV Base analysis Configuration Frameworks

reV.config.base_config

reV Base Configuration Framework

reV.config.cli_project_points

Project Points CLI

reV.config.curtailment

reV config for curtailment inputs.

reV.config.execution

reV Configuration for Execution Options

reV.config.output_request

Output request config to handle user output requests.

reV.config.project_points

reV Project Points Configuration

reV.config.sam_config

reV configuration framework for SAM config inputs.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.output_request.OutputRequest.html b/_autosummary/reV.config.output_request.OutputRequest.html new file mode 100644 index 000000000..f1b0169db --- /dev/null +++ b/_autosummary/reV.config.output_request.OutputRequest.html @@ -0,0 +1,775 @@ + + + + + + + reV.config.output_request.OutputRequest — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.output_request.OutputRequest

+
+
+class OutputRequest(inp)[source]
+

Bases: list

+

Base output request list framework with request key correction logic.

+
+
Parameters:
+

inp (list | tuple | str) – List of requested reV output variables.

+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

append(object, /)

Append object to the end of the list.

clear()

Remove all items from list.

copy()

Return a shallow copy of the list.

count(value, /)

Return number of occurrences of value.

extend(iterable, /)

Extend list by appending elements from the iterable.

index(value[, start, stop])

Return first index of value.

insert(index, object, /)

Insert object before index.

pop([index])

Remove and return item at index (default last).

remove(value, /)

Remove first occurrence of value.

reverse()

Reverse IN PLACE.

sort(*[, key, reverse])

Sort the list in ascending order and return None.

+

Attributes

+ + + + + + +

CORRECTIONS

+
+
+__add__(value, /)
+

Return self+value.

+
+ +
+
+__mul__(value, /)
+

Return self*value.

+
+ +
+
+append(object, /)
+

Append object to the end of the list.

+
+ +
+
+clear()
+

Remove all items from list.

+
+ +
+
+copy()
+

Return a shallow copy of the list.

+
+ +
+
+count(value, /)
+

Return number of occurrences of value.

+
+ +
+
+extend(iterable, /)
+

Extend list by appending elements from the iterable.

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+insert(index, object, /)
+

Insert object before index.

+
+ +
+
+pop(index=-1, /)
+

Remove and return item at index (default last).

+

Raises IndexError if list is empty or index is out of range.

+
+ +
+
+remove(value, /)
+

Remove first occurrence of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+reverse()
+

Reverse IN PLACE.

+
+ +
+
+sort(*, key=None, reverse=False)
+

Sort the list in ascending order and return None.

+

The sort is in-place (i.e. the list itself is modified) and stable (i.e. the +order of two equal elements is maintained).

+

If a key function is given, apply it once to each list item and sort them, +ascending or descending, according to their function values.

+

The reverse flag can be set to sort in descending order.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.output_request.SAMOutputRequest.html b/_autosummary/reV.config.output_request.SAMOutputRequest.html new file mode 100644 index 000000000..23f8a319a --- /dev/null +++ b/_autosummary/reV.config.output_request.SAMOutputRequest.html @@ -0,0 +1,775 @@ + + + + + + + reV.config.output_request.SAMOutputRequest — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.output_request.SAMOutputRequest

+
+
+class SAMOutputRequest(inp)[source]
+

Bases: OutputRequest

+

SAM output request framework.

+
+
Parameters:
+

inp (list | tuple | str) – List of requested reV output variables.

+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

append(object, /)

Append object to the end of the list.

clear()

Remove all items from list.

copy()

Return a shallow copy of the list.

count(value, /)

Return number of occurrences of value.

extend(iterable, /)

Extend list by appending elements from the iterable.

index(value[, start, stop])

Return first index of value.

insert(index, object, /)

Insert object before index.

pop([index])

Remove and return item at index (default last).

remove(value, /)

Remove first occurrence of value.

reverse()

Reverse IN PLACE.

sort(*[, key, reverse])

Sort the list in ascending order and return None.

+

Attributes

+ + + + + + +

CORRECTIONS

+
+
+__add__(value, /)
+

Return self+value.

+
+ +
+
+__mul__(value, /)
+

Return self*value.

+
+ +
+
+append(object, /)
+

Append object to the end of the list.

+
+ +
+
+clear()
+

Remove all items from list.

+
+ +
+
+copy()
+

Return a shallow copy of the list.

+
+ +
+
+count(value, /)
+

Return number of occurrences of value.

+
+ +
+
+extend(iterable, /)
+

Extend list by appending elements from the iterable.

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+insert(index, object, /)
+

Insert object before index.

+
+ +
+
+pop(index=-1, /)
+

Remove and return item at index (default last).

+

Raises IndexError if list is empty or index is out of range.

+
+ +
+
+remove(value, /)
+

Remove first occurrence of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+reverse()
+

Reverse IN PLACE.

+
+ +
+
+sort(*, key=None, reverse=False)
+

Sort the list in ascending order and return None.

+

The sort is in-place (i.e. the list itself is modified) and stable (i.e. the +order of two equal elements is maintained).

+

If a key function is given, apply it once to each list item and sort them, +ascending or descending, according to their function values.

+

The reverse flag can be set to sort in descending order.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.output_request.html b/_autosummary/reV.config.output_request.html new file mode 100644 index 000000000..3692cd08b --- /dev/null +++ b/_autosummary/reV.config.output_request.html @@ -0,0 +1,645 @@ + + + + + + + reV.config.output_request — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.output_request

+

Output request config to handle user output requests.

+

This module will allow for aliases and fix some typos.

+

Created on Mon Jul 8 09:37:23 2019

+

@author: gbuster

+

Classes

+ + + + + + + + + +

OutputRequest(inp)

Base output request list framework with request key correction logic.

SAMOutputRequest(inp)

SAM output request framework.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.project_points.PointsControl.html b/_autosummary/reV.config.project_points.PointsControl.html new file mode 100644 index 000000000..a7f902b4c --- /dev/null +++ b/_autosummary/reV.config.project_points.PointsControl.html @@ -0,0 +1,758 @@ + + + + + + + reV.config.project_points.PointsControl — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.project_points.PointsControl

+
+
+class PointsControl(project_points, sites_per_split=100)[source]
+

Bases: object

+

Class to manage and split ProjectPoints.

+
+
Parameters:
+
    +
  • project_points (reV.config.ProjectPoints) – ProjectPoints instance to be split between execution workers.

  • +
  • sites_per_split (int) – Sites per project points split instance returned in the __next__ +iterator function.

  • +
+
+
+

Methods

+ + + + + + +

split(i0, i1, project_points[, sites_per_split])

Split this execution by splitting the project points attribute.

+

Attributes

+ + + + + + + + + + + + + + + + + + +

N

Length of current iterator list

project_points

Get the project points property.

sites

Get the project points sites for this instance.

sites_per_split

number of sites per split.

split_range

Get the current split range property.

+
+
+property N
+

Length of current iterator list

+
+
Returns:
+

N (int) – Number of iterators in list

+
+
+
+ +
+
+property sites_per_split
+

number of sites per split.

+
+
Returns:
+

_sites_per_split (int) – Sites per split iter object.

+
+
Type:
+

Get the iterator increment

+
+
+
+ +
+
+property project_points
+

Get the project points property.

+
+
Returns:
+

_project_points (reV.config.project_points.ProjectPoints) – ProjectPoints instance corresponding to this PointsControl +instance.

+
+
+
+ +
+
+property sites
+

Get the project points sites for this instance.

+
+
Returns:
+

sites (list) – List of sites belonging to the _project_points attribute.

+
+
+
+ +
+
+property split_range
+

Get the current split range property.

+
+
Returns:
+

_split_range (list) – Two-entry list that indicates the starting and finishing +(inclusive, exclusive, respectively) indices of a split instance +of the PointsControl object. This is set in the iterator dunder +methods of PointsControl.

+
+
+
+ +
+
+classmethod split(i0, i1, project_points, sites_per_split=100)[source]
+

Split this execution by splitting the project points attribute.

+
+
Parameters:
+
    +
  • i0/i1 (int) – Beginning/end (inclusive/exclusive, respectively) index split +parameters for ProjectPoints.split() method.

  • +
  • project_points (reV.config.ProjectPoints) – Project points instance that will be split.

  • +
  • sites_per_split (int) – Sites per project points split instance returned in the __next__ +iterator function.

  • +
+
+
Returns:
+

sub (PointsControl) – New instance of PointsControl with a subset of the original +project points.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.project_points.ProjectPoints.html b/_autosummary/reV.config.project_points.ProjectPoints.html new file mode 100644 index 000000000..6d5958ceb --- /dev/null +++ b/_autosummary/reV.config.project_points.ProjectPoints.html @@ -0,0 +1,1055 @@ + + + + + + + reV.config.project_points.ProjectPoints — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.project_points.ProjectPoints

+
+
+class ProjectPoints(points, sam_configs, tech=None, res_file=None, curtailment=None)[source]
+

Bases: object

+

Class to manage site and SAM input configuration requests.

+

Examples

+
>>> import os
+>>> from reV import TESTDATADIR
+>>> from reV.config.project_points import ProjectPoints
+>>>
+>>> points = slice(0, 100)
+>>> sam_file = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json')
+>>> pp = ProjectPoints(points, sam_file)
+>>>
+>>> config_id_site0, SAM_config_dict_site0 = pp[0]
+>>> site_list_or_slice = pp.sites
+>>> site_list_or_slice = pp.get_sites_from_config(config_id)
+>>> ProjectPoints_sub = pp.split(0, 10, project_points)
+>>> h_list = pp.h
+
+
+
+
Parameters:
+
    +
  • points (int | slice | list | tuple | str | pd.DataFrame | dict) – Slice specifying project points, string pointing to a project +points csv, or a dataframe containing the effective csv contents. +Can also be a single integer site value.

  • +
  • sam_configs (dict | str | SAMConfig) – SAM input configuration ID(s) and file path(s). Keys are the SAM +config ID(s) which map to the config column in the project points +CSV. Values are either a JSON SAM config file or dictionary of SAM +config inputs. Can also be a single config file path or a +pre loaded SAMConfig object.

  • +
  • tech (str, optional) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam) +The string should be lower-cased with spaces and _ removed, +by default None

  • +
  • res_file (str | NoneType) – Optional resource file to find maximum length of project points if +points slice stop is None.

  • +
  • curtailment (NoneType | dict | str | config.curtailment.Curtailment) – Inputs for curtailment parameters. If not None, curtailment inputs +are expected. Can be:

    +
    +
      +
    • Explicit namespace of curtailment variables (dict)

    • +
    • Pointer to curtailment config json file with path (str)

    • +
    • Instance of curtailment config object +(config.curtailment.Curtailment)

    • +
    +
    +
  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + +

get_sites_from_config(config)

Get a site list that corresponds to a config key.

index(gid)

Get the index location (iloc not loc) for a resource gid found in the project points.

join_df(df2[, key])

Join new df2 to the _df attribute using the _df's gid as pkey.

lat_lon_coords(lat_lons, res_file, sam_configs)

Generate ProjectPoints for gids nearest to given latitude longitudes

regions(regions, res_file, sam_configs[, ...])

Generate ProjectPoints for gids nearest to given latitude longitudes

split(i0, i1, project_points)

Return split instance of a ProjectPoints instance w/ site subset.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

all_sam_input_keys

Get a list of unique input keys from all SAM technology configs.

curtailment

Get the curtailment config object.

d

Get the depths (m) corresponding to the site list.

df

Get the project points dataframe property.

gids

Get the list of gids (resource file index values) belonging to this instance of ProjectPoints.

h

Get the hub heights corresponding to the site list.

sam_config_ids

Get the SAM configs dictionary property.

sam_config_obj

Get the SAM config object.

sam_inputs

Get the SAM configuration inputs dictionary property.

sites

Get the list of sites (resource file gids) belonging to this instance of ProjectPoints.

sites_as_slice

Get the sites in slice format.

tech

Get the tech property from the config.

+
+
+property df
+

Get the project points dataframe property.

+
+
Returns:
+

_df (pd.DataFrame) – Table of sites and corresponding SAM configuration IDs. +Has columns ‘gid’ and ‘config’.

+
+
+
+ +
+
+property sam_config_ids
+

Get the SAM configs dictionary property.

+
+
Returns:
+

dict – Multi-level dictionary containing multiple SAM input config files. +The top level key is the SAM config ID, top level value is the SAM +config file path

+
+
+
+ +
+
+property sam_config_obj
+

Get the SAM config object.

+
+
Returns:
+

_sam_config_obj (reV.config.sam_config.SAMConfig) – SAM configuration object.

+
+
+
+ +
+
+property sam_inputs
+

Get the SAM configuration inputs dictionary property.

+
+
Returns:
+

dict – Multi-level dictionary containing multiple SAM input +configurations. The top level key is the SAM config ID, top level +value is the SAM config. Each SAM config is a dictionary with keys +equal to input names, values equal to the actual inputs.

+
+
+
+ +
+
+property all_sam_input_keys
+

Get a list of unique input keys from all SAM technology configs.

+
+
Returns:
+

all_sam_input_keys (list) – List of unique strings where each string is a input key for the +SAM technology configs. For example, “gcr” or “losses” for PVWatts +or “wind_turbine_hub_ht” for windpower.

+
+
+
+ +
+
+property gids
+

Get the list of gids (resource file index values) belonging to this +instance of ProjectPoints. This is an alias of self.sites.

+
+
Returns:
+

gids (list) – List of integer gids (resource file index values) belonging to this +instance of ProjectPoints. This is an alias of self.sites.

+
+
+
+ +
+
+property sites
+

Get the list of sites (resource file gids) belonging to this +instance of ProjectPoints.

+
+
Returns:
+

sites (list) – List of integer sites (resource file gids) belonging to this +instance of ProjectPoints.

+
+
+
+ +
+
+property sites_as_slice
+

Get the sites in slice format.

+
+
Returns:
+

sites_as_slice (list | slice) – Sites slice belonging to this instance of ProjectPoints. +The type is slice if possible. Will be a list only if sites are +non-sequential.

+
+
+
+ +
+
+property tech
+

Get the tech property from the config.

+
+
Returns:
+

_tech (str) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam) +The string should be lower-cased with spaces and _ removed.

+
+
+
+ +
+
+property h
+

Get the hub heights corresponding to the site list.

+
+
Returns:
+

_h (list | NoneType) – Hub heights corresponding to each site, taken from the sam config +for each site. This is None if the technology is not wind.

+
+
+
+ +
+
+property d
+

Get the depths (m) corresponding to the site list.

+
+
Returns:
+

_d (list | NoneType) – Resource depths (m) corresponding to each site, taken from +the sam config for each site. This is None if the technology +is not geothermal.

+
+
+
+ +
+
+property curtailment
+

Get the curtailment config object.

+
+
Returns:
+

_curtailment (NoneType | reV.config.curtailment.Curtailment) – None if no curtailment, reV curtailment config object if +curtailment is being assessed.

+
+
+
+ +
+
+index(gid)[source]
+

Get the index location (iloc not loc) for a resource gid found in +the project points.

+
+
Parameters:
+

gid (int) – Resource GID found in the project points gid column.

+
+
Returns:
+

ind (int) – Row index of gid in the project points dataframe.

+
+
+
+ +
+
+join_df(df2, key='gid')[source]
+

Join new df2 to the _df attribute using the _df’s gid as pkey.

+

This can be used to add site-specific data to the project_points, +taking advantage of the points_control iterator/split functions such +that only the relevant site data is passed to the analysis functions.

+
+
Parameters:
+
    +
  • df2 (pd.DataFrame) – Dataframe to be joined to the self._df attribute (this instance +of project points dataframe). This likely contains +site-specific inputs that are to be passed to parallel workers.

  • +
  • key (str) – Primary key of df2 to be joined to the _df attribute (this +instance of the project points dataframe). Primary key +of the self._df attribute is fixed as the gid column.

  • +
+
+
+
+ +
+
+get_sites_from_config(config)[source]
+

Get a site list that corresponds to a config key.

+
+
Parameters:
+

config (str) – SAM configuration ID associated with sites.

+
+
Returns:
+

sites (list) – List of sites associated with the requested configuration ID. If +the configuration ID is not recognized, an empty list is returned.

+
+
+
+ +
+
+classmethod split(i0, i1, project_points)[source]
+

Return split instance of a ProjectPoints instance w/ site subset.

+
+
Parameters:
+
    +
  • i0 (int) – Starting INDEX (not resource gid) (inclusive) of the site property +attribute to include in the split instance. This is not necessarily +the same as the starting site number, for instance if ProjectPoints +is sites 20:100, i0=0 i1=10 will result in sites 20:30.

  • +
  • i1 (int) – Ending INDEX (not resource gid) (exclusive) of the site property +attribute to include in the split instance. This is not necessarily +the same as the final site number, for instance if ProjectPoints is +sites 20:100, i0=0 i1=10 will result in sites 20:30.

  • +
  • project_points (ProjectPoints) – Instance of project points to split.

  • +
+
+
Returns:
+

sub (ProjectPoints) – New instance of ProjectPoints with a subset of the following +attributes: sites, project points df, and the self dictionary data +struct.

+
+
+
+ +
+
+classmethod lat_lon_coords(lat_lons, res_file, sam_configs, tech=None, curtailment=None)[source]
+

Generate ProjectPoints for gids nearest to given latitude longitudes

+
+
Parameters:
+
    +
  • lat_lons (str | tuple | list | ndarray) – Pair or pairs of latitude longitude coordinates

  • +
  • res_file (str) – Resource file, needed to fine nearest neighbors

  • +
  • sam_configs (dict | str | SAMConfig) – SAM input configuration ID(s) and file path(s). Keys are the SAM +config ID(s) which map to the config column in the project points +CSV. Values are either a JSON SAM config file or dictionary of SAM +config inputs. Can also be a single config file path or a +pre loaded SAMConfig object.

  • +
  • tech (str, optional) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam) +The string should be lower-cased with spaces and _ removed, +by default None

  • +
  • curtailment (NoneType | dict | str | config.curtailment.Curtailment) – Inputs for curtailment parameters. If not None, curtailment inputs +are expected. Can be:

    +
    +
      +
    • Explicit namespace of curtailment variables (dict)

    • +
    • Pointer to curtailment config json file with path (str)

    • +
    • Instance of curtailment config object +(config.curtailment.Curtailment)

    • +
    +
    +
  • +
+
+
Returns:
+

pp (ProjectPoints) – Initialized ProjectPoints object for points nearest to given +lat_lons

+
+
+
+ +
+
+classmethod regions(regions, res_file, sam_configs, tech=None, curtailment=None)[source]
+

Generate ProjectPoints for gids nearest to given latitude longitudes

+
+
Parameters:
+
    +
  • regions (dict) – Dictionary of regions to extract points for in the form: +{‘region’: ‘region_column’}

  • +
  • res_file (str) – Resource file, needed to fine nearest neighbors

  • +
  • sam_configs (dict | str | SAMConfig) – SAM input configuration ID(s) and file path(s). Keys are the SAM +config ID(s) which map to the config column in the project points +CSV. Values are either a JSON SAM config file or dictionary of SAM +config inputs. Can also be a single config file path or a +pre loaded SAMConfig object.

  • +
  • tech (str, optional) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam) +The string should be lower-cased with spaces and _ removed, +by default None

  • +
  • curtailment (NoneType | dict | str | config.curtailment.Curtailment) – Inputs for curtailment parameters. If not None, curtailment inputs +are expected. Can be:

    +
    +
      +
    • Explicit namespace of curtailment variables (dict)

    • +
    • Pointer to curtailment config json file with path (str)

    • +
    • Instance of curtailment config object +(config.curtailment.Curtailment)

    • +
    +
    +
  • +
+
+
Returns:
+

pp (ProjectPoints) – Initialized ProjectPoints object for points nearest to given +lat_lons

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.project_points.html b/_autosummary/reV.config.project_points.html new file mode 100644 index 000000000..c4de5eda0 --- /dev/null +++ b/_autosummary/reV.config.project_points.html @@ -0,0 +1,642 @@ + + + + + + + reV.config.project_points — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.project_points

+

reV Project Points Configuration

+

Classes

+ + + + + + + + + +

PointsControl(project_points[, sites_per_split])

Class to manage and split ProjectPoints.

ProjectPoints(points, sam_configs[, tech, ...])

Class to manage site and SAM input configuration requests.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.sam_config.SAMConfig.html b/_autosummary/reV.config.sam_config.SAMConfig.html new file mode 100644 index 000000000..6093a7230 --- /dev/null +++ b/_autosummary/reV.config.sam_config.SAMConfig.html @@ -0,0 +1,1021 @@ + + + + + + + reV.config.sam_config.SAMConfig — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.sam_config.SAMConfig

+
+
+class SAMConfig(SAM_configs)[source]
+

Bases: BaseConfig

+

Class to handle the SAM section of config input.

+
+
Parameters:
+

SAM_configs (dict) – Keys are config ID’s, values are filepaths to the SAM configs.

+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_files(flist)

Make sure all files in the input file list exist.

check_overwrite_keys(primary_key, ...)

Check for overwrite keys and raise a ConfigError if present

clear()

copy()

fromkeys([value])

Create a new dictionary with keys from iterable and values set to value.

get(key[, default])

Return the value for key if key is in the dictionary, else default.

items()

keys()

pop(k[,d])

If key is not found, d is returned if given, otherwise KeyError is raised

popitem()

Remove and return a (key, value) pair as a 2-tuple.

resolve_path(path)

Resolve a file path represented by the input string.

set_self_dict(dictlike)

Save a dict-like variable as object instance dictionary items.

setdefault(key[, default])

Insert key with a value of default if key is not in the dictionary.

str_replace_and_resolve(d, str_rep)

Perform a deep string replacement and path resolve in d.

update([E, ]**F)

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]

values()

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

REQUIREMENTS

Required keys for config

STR_REP

Mapping of config inputs (keys) to desired replacements (values) in addition to relative file paths as demarcated by ./ and ../

bifacial

Get a boolean for whether bifacial solar analysis is being run.

clearsky

Get a boolean for whether solar resource requires clearsky irrad.

config_dir

Get the directory that the config file is in.

config_keys

List of valid config keys

downscale

Resolution to downscale NSRDB resource to.

icing

Get a boolean for whether wind generation is considering icing.

inputs

Get the SAM input file(s) (JSON/JSON5/YAML/TOML) and return as a dictionary.

log_level

Get user-specified "log_level" (DEBUG, INFO, WARNING, etc...).

name

Get the job name, defaults to 'rev'.

time_index_step

Step size for time_index for SAM profile output resolution

+
+
+property clearsky
+

Get a boolean for whether solar resource requires clearsky irrad.

+
+
Returns:
+

clearsky (bool) – Flag set in the SAM config input with key “clearsky” for solar +analysis to process generation for clearsky irradiance. +Defaults to False (normal all-sky irradiance).

+
+
+
+ +
+
+property bifacial
+

Get a boolean for whether bifacial solar analysis is being run.

+
+
Returns:
+

bifacial (bool) – Flag set in the SAM config input with key “bifaciality” for solar +analysis to analyze bifacial PV panels. Will require albedo input. +Defaults to False (no bifacial panels is default).

+
+
+
+ +
+
+property icing
+

Get a boolean for whether wind generation is considering icing.

+
+
Returns:
+

_icing (bool) – Flag for whether wind generation is considering icing effects. +Based on whether SAM input json has “en_icing_cutoff” == 1.

+
+
+
+ +
+
+property time_index_step
+

Step size for time_index for SAM profile output resolution

+
+
Returns:
+

int | None – Step size for time_index, used to reduce temporal resolution

+
+
+
+ +
+
+property downscale
+

Resolution to downscale NSRDB resource to.

+
+
Returns:
+

dict | None – Option for NSRDB resource downscaling to higher temporal +resolution. The config expects a str entry in the Pandas +frequency format, e.g. ‘5min’ or a dict of downscaling kwargs +such as {‘frequency’: ‘5min’, ‘variability_kwargs’: +{‘var_frac’: 0.05, ‘distribution’: ‘uniform’}}. +A str entry will be converted to a kwarg dict for the output +of this property e.g. ‘5min’ -> {‘frequency’: ‘5min’}

+
+
+
+ +
+
+property inputs
+

Get the SAM input file(s) (JSON/JSON5/YAML/TOML) and return +as a dictionary.

+
+
Parameters:
+

_inputs (dict) – The keys of this dictionary are the “configuration ID’s”. +The values are the imported json SAM input dictionaries.

+
+
+
+ +
+
+REQUIREMENTS = ()
+

Required keys for config

+
+ +
+
+STR_REP = {'REVDIR': '/home/runner/work/reV/reV/reV', 'TESTDATADIR': '/home/runner/work/reV/reV/tests/data'}
+

Mapping of config inputs (keys) to desired replacements (values) in +addition to relative file paths as demarcated by ./ and ../

+
+ +
+
+static check_files(flist)
+

Make sure all files in the input file list exist.

+
+
Parameters:
+

flist (list) – List of files (with paths) to check existance of.

+
+
+
+ +
+
+check_overwrite_keys(primary_key, *overwrite_keys)
+

Check for overwrite keys and raise a ConfigError if present

+
+
Parameters:
+
    +
  • primary_key (str) – Primary key that overwrites overwrite_keys, used for error message

  • +
  • overwrite_keys (str) – Key(s) to overwrite

  • +
+
+
+
+ +
+
+clear() None.  Remove all items from D.
+
+ +
+
+property config_dir
+

Get the directory that the config file is in.

+
+
Returns:
+

config_dir (str) – Directory path that the config file is in.

+
+
+
+ +
+
+property config_keys
+

List of valid config keys

+
+
Returns:
+

list

+
+
+
+ +
+
+copy() a shallow copy of D
+
+ +
+
+fromkeys(value=None, /)
+

Create a new dictionary with keys from iterable and values set to value.

+
+ +
+
+get(key, default=None, /)
+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+items() a set-like object providing a view on D's items
+
+ +
+
+keys() a set-like object providing a view on D's keys
+
+ +
+
+property log_level
+

Get user-specified “log_level” (DEBUG, INFO, WARNING, etc…).

+
+
Returns:
+

log_level (int) – Python logging module level (integer format) corresponding to the +config-specified log level string.

+
+
+
+ +
+
+property name
+

Get the job name, defaults to ‘rev’.

+
+
Returns:
+

name (str) – reV job name.

+
+
+
+ +
+
+pop(k[, d]) v, remove specified key and return the corresponding value.
+

If key is not found, d is returned if given, otherwise KeyError is raised

+
+ +
+
+popitem()
+

Remove and return a (key, value) pair as a 2-tuple.

+

Pairs are returned in LIFO (last-in, first-out) order. +Raises KeyError if the dict is empty.

+
+ +
+
+resolve_path(path)
+

Resolve a file path represented by the input string.

+

This function resolves the input string if it resembles a path. +Specifically, the string will be resolved if it starts with +“./” or “..”, or it if it contains either “./” or +“..” somewhere in the string body. Otherwise, the string +is returned unchanged, so this function is safe to call on any +string, even ones that do not resemble a path.

+

This method delegates the “resolving” logic to +pathlib.Path.resolve(). This means the path is made +absolute, symlinks are resolved, and “..” components are +eliminated. If the path input starts with “./” or +“..”, it is assumed to be w.r.t the config directory, not +the run directory.

+
+
Parameters:
+

path (str) – Input file path.

+
+
Returns:
+

str – The resolved path.

+
+
+
+ +
+
+set_self_dict(dictlike)
+

Save a dict-like variable as object instance dictionary items.

+
+
Parameters:
+

dictlike (dict) – Python namespace object to set to this dictionary-emulating class.

+
+
+
+ +
+
+setdefault(key, default=None, /)
+

Insert key with a value of default if key is not in the dictionary.

+

Return the value for key if key is in the dictionary, else default.

+
+ +
+
+str_replace_and_resolve(d, str_rep)
+

Perform a deep string replacement and path resolve in d.

+
+
Parameters:
+
    +
  • d (dict) – Config dictionary potentially containing strings to replace +and/or paths to resolve.

  • +
  • str_rep (dict) – Replacement mapping where keys are strings to search for and +values are the new values.

  • +
+
+
Returns:
+

d (dict) – Config dictionary with updated strings.

+
+
+
+ +
+
+update([E, ]**F) None.  Update D from dict/iterable E and F.
+

If E is present and has a .keys() method, then does: for k in E: D[k] = E[k] +If E is present and lacks a .keys() method, then does: for k, v in E: D[k] = v +In either case, this is followed by: for k in F: D[k] = F[k]

+
+ +
+
+values() an object providing a view on D's values
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.sam_config.SAMInputsChecker.html b/_autosummary/reV.config.sam_config.SAMInputsChecker.html new file mode 100644 index 000000000..41bacde9c --- /dev/null +++ b/_autosummary/reV.config.sam_config.SAMInputsChecker.html @@ -0,0 +1,679 @@ + + + + + + + reV.config.sam_config.SAMInputsChecker — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.sam_config.SAMInputsChecker

+
+
+class SAMInputsChecker(config)[source]
+

Bases: object

+

Class to check SAM input jsons and warn against bad inputs.

+
+
Parameters:
+

config (dict) – Extracted SAM technology input config in dict form.

+
+
+

Methods

+ + + + + + + + + +

check(config)

Run checks on a SAM input json config.

check_pv()

Run input checks for a pv input config.

+

Attributes

+ + + + + + +

KEYS_PV

+
+
+check_pv()[source]
+

Run input checks for a pv input config.

+
+ +
+
+classmethod check(config)[source]
+

Run checks on a SAM input json config.

+
+
Parameters:
+

config (dict) – Extracted SAM technology input config in dict form.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.config.sam_config.html b/_autosummary/reV.config.sam_config.html new file mode 100644 index 000000000..8789471e9 --- /dev/null +++ b/_autosummary/reV.config.sam_config.html @@ -0,0 +1,642 @@ + + + + + + + reV.config.sam_config — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.config.sam_config

+

reV configuration framework for SAM config inputs.

+

Classes

+ + + + + + + + + +

SAMConfig(SAM_configs)

Class to handle the SAM section of config input.

SAMInputsChecker(config)

Class to check SAM input jsons and warn against bad inputs.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.econ.cli_econ.html b/_autosummary/reV.econ.cli_econ.html new file mode 100644 index 000000000..8a12c0b63 --- /dev/null +++ b/_autosummary/reV.econ.cli_econ.html @@ -0,0 +1,631 @@ + + + + + + + reV.econ.cli_econ — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.econ.cli_econ

+

Econ CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.econ.econ.Econ.html b/_autosummary/reV.econ.econ.Econ.html new file mode 100644 index 000000000..c533508a3 --- /dev/null +++ b/_autosummary/reV.econ.econ.Econ.html @@ -0,0 +1,1225 @@ + + + + + + + reV.econ.econ.Econ — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.econ.econ.Econ

+
+
+class Econ(project_points, sam_files, cf_file, site_data=None, output_request=('lcoe_fcr',), sites_per_worker=100, memory_utilization_limit=0.4, append=False)[source]
+

Bases: BaseGen

+

reV econ analysis class.

+

reV econ analysis runs SAM econ calculations, typically to +compute LCOE (using PySAM.Lcoefcr.Lcoefcr), though +PySAM.Singleowner.Singleowner or +PySAM.Windbos.Windbos calculations can also be +performed simply by requesting outputs from those computation +modules. See the keys of +Econ.OPTIONS for all +available econ outputs. Econ computations rely on an input a +generation (i.e. capacity factor) profile. You can request +reV to run the analysis for one or more “sites”, which +correspond to the meta indices in the generation data.

+
+
Parameters:
+
    +
  • project_points (int | list | tuple | str | dict | pd.DataFrame | slice) – Input specifying which sites to process. A single integer +representing the GID of a site may be specified to evaluate +reV at a single location. A list or tuple of integers +(or slice) representing the GIDs of multiple sites can be +specified to evaluate reV at multiple specific locations. +A string pointing to a project points CSV file may also be +specified. Typically, the CSV contains two columns:

    +
    +
      +
    • gid: Integer specifying the GID of each site.

    • +
    • config: Key in the sam_files input dictionary +(see below) corresponding to the SAM configuration to +use for each particular site. This value can also be +None (or left out completely) if you specify only +a single SAM configuration file as the sam_files +input.

    • +
    +
    +

    The CSV file may also contain site-specific inputs by +including a column named after a config keyword (e.g. a +column called capital_cost may be included to specify a +site-specific capital cost value for each location). Columns +that do not correspond to a config key may also be included, +but they will be ignored. A DataFrame following the same +guidelines as the CSV input (or a dictionary that can be +used to initialize such a DataFrame) may be used for this +input as well.

    +
  • +
  • sam_files (dict | str) – A dictionary mapping SAM input configuration ID(s) to SAM +configuration(s). Keys are the SAM config ID(s) which +correspond to the config column in the project points +CSV. Values for each key are either a path to a +corresponding SAM config file or a full dictionary +of SAM config inputs. For example:

    +
    sam_files = {
    +    "default": "/path/to/default/sam.json",
    +    "onshore": "/path/to/onshore/sam_config.yaml",
    +    "offshore": {
    +        "sam_key_1": "sam_value_1",
    +        "sam_key_2": "sam_value_2",
    +        ...
    +    },
    +    ...
    +}
    +
    +
    +

    This input can also be a string pointing to a single SAM +config file. In this case, the config column of the +CSV points input should be set to None or left out +completely. See the documentation for the reV SAM class +(e.g. reV.SAM.generation.WindPower, +reV.SAM.generation.PvWattsv8, +reV.SAM.generation.Geothermal, etc.) for +documentation on the allowed and/or required SAM config file +inputs.

    +
  • +
  • cf_file (str) – Path to reV output generation file containing a capacity +factor output.

    +
    +

    Note

    +

    If executing reV from the command line, this +path can contain brackets {} that will be filled in +by the analysis_years input. Alternatively, this input +can be set to "PIPELINE" to parse the output of the +previous step (reV generation) and use it as input to +this call. However, note that duplicate executions of +reV generation within the pipeline may invalidate this +parsing, meaning the cf_file input will have to be +specified manually.

    +
    +
  • +
  • site_data (str | pd.DataFrame, optional) – Site-specific input data for SAM calculation. If this input +is a string, it should be a path that points to a CSV file. +Otherwise, this input should be a DataFrame with +pre-extracted site data. Rows in this table should match +the input sites via a gid column. The rest of the +columns should match configuration input keys that will take +site-specific values. Note that some or all site-specific +inputs can be specified via the project_points input +table instead. If None, no site-specific data is +considered. By default, None.

  • +
  • output_request (list | tuple, optional) – List of output variables requested from SAM. Can be any +of the parameters in the “Outputs” group of the PySAM module +(e.g. PySAM.Windpower.Windpower.Outputs, +PySAM.Pvwattsv8.Pvwattsv8.Outputs, +PySAM.Geothermal.Geothermal.Outputs, etc.) being +executed. This list can also include a select number of SAM +config/resource parameters to include in the output: +any key in any of the +output attribute JSON files +may be requested. Time-series profiles requested via this +input are output in UTC. By default, ('lcoe_fcr',).

  • +
  • sites_per_worker (int, optional) – Number of sites to run in series on a worker. None +defaults to the resource file chunk size. +By default, None.

  • +
  • memory_utilization_limit (float, optional) – Memory utilization limit (fractional). Must be a value +between 0 and 1. This input sets how many site results will +be stored in-memory at any given time before flushing to +disk. By default, 0.4.

  • +
  • append (bool) – Option to append econ datasets to source cf_file. +By default, False.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_site_data_to_pp(site_data)

Add the site df (site-specific inputs) to project points dataframe.

flush()

Flush the output data in self.out attribute to disk in .h5 format.

get_pc(points, points_range, sam_configs, ...)

Get a PointsControl instance.

get_sites_per_worker(res_file[, default])

Get the nominal sites per worker (x-chunk size) for a given file.

handle_leap_ti(ti[, drop_leap])

Handle a time index for a leap year by dropping a day.

run([out_fpath, max_workers, timeout, pool_size])

Execute a parallel reV econ run with smart data flushing.

site_index(site_gid[, out_index])

Get the index corresponding to the site gid.

unpack_futures(futures)

Combine list of futures results into their native dict format/type.

unpack_output(site_gid, site_output)

Unpack a SAM SiteOutput object to the output attribute.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ECON_ATTRS

LCOE_ARGS

OPTIONS

Available reV econ output_request options

OUT_ATTRS

cf_file

Get the capacity factor output filename and path.

meta

Get meta data from the source capacity factors file.

out

Get the reV gen or econ output results.

out_chunk

Get the current output chunk index range (INCLUSIVE).

output_request

Get the output variables requested from the user.

points_control

Get project points controller.

project_points

Get project points

run_attrs

Run time attributes (__init__ args and kwargs)

sam_configs

Get the sam config dictionary.

sam_metas

SAM configurations including runtime module

sam_module

Get the SAM module class to be used for SAM simulations.

site_data

Get the site-specific inputs in dataframe format.

site_limit

Get the number of sites results that can be stored in memory at once

site_mem

Get the memory (MB) required to store all results for a single site.

tech

Get the reV technology string.

time_index

Get the generation resource time index data.

year

Get the resource year.

+
+
+OPTIONS = {'bos_cost': <class 'reV.SAM.windbos.WindBos'>, 'capital_cost': <class 'reV.SAM.econ.LCOE'>, 'fixed_charge_rate': <class 'reV.SAM.econ.LCOE'>, 'fixed_operating_cost': <class 'reV.SAM.econ.LCOE'>, 'flip_actual_irr': <class 'reV.SAM.econ.SingleOwner'>, 'gross_revenue': <class 'reV.SAM.econ.SingleOwner'>, 'lcoe_fcr': <class 'reV.SAM.econ.LCOE'>, 'lcoe_nom': <class 'reV.SAM.econ.SingleOwner'>, 'lcoe_real': <class 'reV.SAM.econ.SingleOwner'>, 'ppa_price': <class 'reV.SAM.econ.SingleOwner'>, 'project_return_aftertax_npv': <class 'reV.SAM.econ.SingleOwner'>, 'sales_tax_cost': <class 'reV.SAM.windbos.WindBos'>, 'total_installed_cost': <class 'reV.SAM.windbos.WindBos'>, 'turbine_cost': <class 'reV.SAM.windbos.WindBos'>, 'variable_operating_cost': <class 'reV.SAM.econ.LCOE'>}
+

Available reV econ output_request options

+
+ +
+
+property cf_file
+

Get the capacity factor output filename and path.

+
+
Returns:
+

cf_file (str) – reV generation capacity factor output file with path.

+
+
+
+ +
+
+property meta
+

Get meta data from the source capacity factors file.

+
+
Returns:
+

_meta (pd.DataFrame) – Meta data from capacity factor outputs file.

+
+
+
+ +
+
+property time_index
+

Get the generation resource time index data.

+
+ +
+
+classmethod get_pc(points, points_range, sam_configs, cf_file, sites_per_worker=None, append=False)[source]
+

Get a PointsControl instance.

+
+
Parameters:
+
    +
  • points (slice | list | str | reV.config.project_points.PointsControl) – Slice specifying project points, or string pointing to a project +points csv, or a fully instantiated PointsControl object.

  • +
  • points_range (list | None) – Optional two-entry list specifying the index range of the sites to +analyze. To be taken from the reV.config.PointsControl.split_range +property.

  • +
  • sam_configs (dict | str | SAMConfig) – SAM input configuration ID(s) and file path(s). Keys are the SAM +config ID(s) which map to the config column in the project points +CSV. Values are either a JSON SAM config file or dictionary of SAM +config inputs. Can also be a single config file path or a +pre loaded SAMConfig object.

  • +
  • cf_file (str) – reV generation capacity factor output file with path.

  • +
  • sites_per_worker (int) – Number of sites to run in series on a worker. None defaults to the +resource file chunk size.

  • +
  • append (bool) – Flag to append econ datasets to source cf_file. This has priority +over the out_fpath input.

  • +
+
+
Returns:
+

pc (reV.config.project_points.PointsControl) – PointsControl object instance.

+
+
+
+ +
+
+add_site_data_to_pp(site_data)
+

Add the site df (site-specific inputs) to project points dataframe.

+

This ensures that only the relevant site’s data will be passed through +to parallel workers when points_control is iterated and split.

+
+
Parameters:
+

site_data (pd.DataFrame) – Site-specific data for econ calculation. Rows correspond to sites, +columns are variables.

+
+
+
+ +
+
+flush()
+

Flush the output data in self.out attribute to disk in .h5 format.

+

The data to be flushed is accessed from the instance attribute +“self.out”. The disk target is based on the instance attributes +“self._out_fpath”. Data is not flushed if _fpath is None or if .out is +empty.

+
+ +
+
+static get_sites_per_worker(res_file, default=100)
+

Get the nominal sites per worker (x-chunk size) for a given file.

+

This is based on the concept that it is most efficient for one core to +perform one read on one chunk of resource data, such that chunks will +not have to be read into memory twice and no sites will be read +redundantly.

+
+
Parameters:
+
    +
  • res_file (str) – Filepath to single resource file, multi-h5 directory, +or /h5_dir/prefix*suffix

  • +
  • default (int) – Sites to be analyzed on a single core if the chunk size cannot be +determined from res_file.

  • +
+
+
Returns:
+

sites_per_worker (int) – Nominal sites to be analyzed per worker. This is set to the x-axis +chunk size for windspeed and dni datasets for the WTK and NSRDB +data, respectively.

+
+
+
+ +
+
+static handle_leap_ti(ti, drop_leap=False)
+

Handle a time index for a leap year by dropping a day.

+
+
Parameters:
+
    +
  • ti (pandas.DatetimeIndex) – Time-series datetime index with or without a leap day.

  • +
  • drop_leap (bool) – Option to drop leap day (if True) or drop the last day of the year +(if False).

  • +
+
+
Returns:
+

ti (pandas.DatetimeIndex) – Time-series datetime index with length a multiple of 365.

+
+
+
+ +
+
+property out
+

Get the reV gen or econ output results.

+
+
Returns:
+

out (dict) – Dictionary of gen or econ results from SAM.

+
+
+
+ +
+
+property out_chunk
+

Get the current output chunk index range (INCLUSIVE).

+
+
Returns:
+

_out_chunk (tuple) – Two entry tuple (start, end) indicies (inclusive) for where the +current data in-memory belongs in the final output.

+
+
+
+ +
+
+property output_request
+

Get the output variables requested from the user.

+
+
Returns:
+

output_request (list) – Output variables requested from SAM.

+
+
+
+ +
+
+property points_control
+

Get project points controller.

+
+
Returns:
+

points_control (reV.config.project_points.PointsControl) – Project points control instance for site and SAM config spec.

+
+
+
+ +
+
+property project_points
+

Get project points

+
+
Returns:
+

project_points (reV.config.project_points.ProjectPoints) – Project points from the points control instance.

+
+
+
+ +
+
+run(out_fpath=None, max_workers=1, timeout=1800, pool_size=None)[source]
+

Execute a parallel reV econ run with smart data flushing.

+
+
Parameters:
+
    +
  • out_fpath (str, optional) – Path to output file. If this class was initialized with +append=True, this input has no effect. If None, no +output file will be written. If the filepath is specified +but the module name (econ) and/or resource data year is not +included, the module name and/or resource data year will get +added to the output file name. By default, None.

  • +
  • max_workers (int, optional) – Number of local workers to run on. By default, 1.

  • +
  • timeout (int, optional) – Number of seconds to wait for parallel run iteration to +complete before returning zeros. By default, 1800 +seconds.

  • +
  • pool_size (int, optional) – Number of futures to submit to a single process pool for +parallel futures. If None, the pool size is set to +os.cpu_count() * 2. By default, None.

  • +
+
+
Returns:
+

str | None – Path to output HDF5 file, or None if results were not +written to disk.

+
+
+
+ +
+
+property run_attrs
+

Run time attributes (__init__ args and kwargs)

+
+
Returns:
+

run_attrs (dict) – Dictionary of runtime args and kwargs

+
+
+
+ +
+
+property sam_configs
+

Get the sam config dictionary.

+
+
Returns:
+

sam_configs (dict) – SAM config from the project points instance.

+
+
+
+ +
+
+property sam_metas
+

SAM configurations including runtime module

+
+
Returns:
+

sam_metas (dict) – Nested dictionary of SAM configuration files with module used +at runtime

+
+
+
+ +
+
+property sam_module
+

Get the SAM module class to be used for SAM simulations.

+
+
Returns:
+

sam_module (object) – SAM object like PySAM.Pvwattsv7 or PySAM.Lcoefcr

+
+
+
+ +
+
+property site_data
+

Get the site-specific inputs in dataframe format.

+
+
Returns:
+

_site_data (pd.DataFrame) – Site-specific input data for gen or econ calculation. Rows match +sites, columns are variables.

+
+
+
+ +
+
+site_index(site_gid, out_index=False)
+

Get the index corresponding to the site gid.

+
+
Parameters:
+
    +
  • site_gid (int) – Resource-native site index (gid).

  • +
  • out_index (bool) – Option to get output index (if true) which is the column index in +the current in-memory output array, or (if false) the global site +index from the project points site list.

  • +
+
+
Returns:
+

index (int) – Global site index if out_index=False, otherwise column index in +the current in-memory output array.

+
+
+
+ +
+
+property site_limit
+

Get the number of sites results that can be stored in memory at once

+
+
Returns:
+

_site_limit (int) – Number of site result sets that can be stored in memory at once +without violating memory limits.

+
+
+
+ +
+
+property site_mem
+

Get the memory (MB) required to store all results for a single site.

+
+
Returns:
+

_site_mem (float) – Memory (MB) required to store all results in requested in +output_request for a single site.

+
+
+
+ +
+
+property tech
+

Get the reV technology string.

+
+
Returns:
+

tech (str) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam, econ) +The string should be lower-cased with spaces and _ removed.

+
+
+
+ +
+
+static unpack_futures(futures)
+

Combine list of futures results into their native dict format/type.

+
+
Parameters:
+

futures (list) – List of dictionary futures results.

+
+
Returns:
+

out (dict) – Compiled results of the native future results type (dict).

+
+
+
+ +
+
+unpack_output(site_gid, site_output)
+

Unpack a SAM SiteOutput object to the output attribute.

+
+
Parameters:
+
    +
  • site_gid (int) – Resource-native site gid (index).

  • +
  • site_output (dict) – SAM site output object.

  • +
+
+
+
+ +
+
+property year
+

Get the resource year.

+
+
Returns:
+

_year (int) – Year of the time-series datetime index.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.econ.econ.html b/_autosummary/reV.econ.econ.html new file mode 100644 index 000000000..81e054b4a --- /dev/null +++ b/_autosummary/reV.econ.econ.html @@ -0,0 +1,639 @@ + + + + + + + reV.econ.econ — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.econ.econ

+

reV econ module (lcoe-fcr, single owner, etc…)

+

Classes

+ + + + + + +

Econ(project_points, sam_files, cf_file[, ...])

reV econ analysis class.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.econ.economies_of_scale.EconomiesOfScale.html b/_autosummary/reV.econ.economies_of_scale.EconomiesOfScale.html new file mode 100644 index 000000000..1871ab381 --- /dev/null +++ b/_autosummary/reV.econ.economies_of_scale.EconomiesOfScale.html @@ -0,0 +1,858 @@ + + + + + + + reV.econ.economies_of_scale.EconomiesOfScale — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.econ.economies_of_scale.EconomiesOfScale

+
+
+class EconomiesOfScale(eqn, data)[source]
+

Bases: object

+

Class to calculate economies of scale where power plant capital cost is +reduced for larger power plants.

+
+

Units

+

capacity_factor : unitless +capacity : kW +annual_energy_production : kWh +fixed_charge_rate : unitless +fixed_operating_cost : $ (per year) +variable_operating_cost : $/kWh +lcoe : $/MWh

+
+
Parameters:
+
    +
  • eqn (str) – LCOE scaling equation to implement “economies of scale”. +Equation must be in python string format and return a scalar +value to multiply the capital cost by. Independent variables in +the equation should match the keys in the data input arg. This +equation may use numpy functions with the package prefix “np”.

  • +
  • data (dict | pd.DataFrame) – Namespace of econ data to use to calculate economies of scale. Keys +in dict or column labels in dataframe should match the Independent +variables in the eqn input. Should also include variables required +to calculate LCOE.

  • +
+
+
+

Methods

+ + + + + + + + + +

is_method(s)

Check if a string is a numpy/pandas or python builtin method

is_num(s)

Check if a string is a number

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

aep

Annual energy production back-calculated from the raw LCOE:

capital_cost_scalar

Evaluated output of the EconomiesOfScale equation.

fcr

Fixed charge rate from input data arg

foc

Fixed operating cost from input data arg

raw_capital_cost

Unscaled (raw) capital cost found in the data input arg.

raw_lcoe

Raw LCOE taken from the input data

scaled_capital_cost

Capital cost found in the data input arg scaled by the evaluated EconomiesOfScale input equation.

scaled_lcoe

LCOE calculated with the scaled capital cost based on the EconomiesOfScale input equation.

system_capacity

Get the system capacity in kW (SAM input, not the reV supply curve capacity).

vars

Get a list of variable names that the EconomiesOfScale equation uses as input.

voc

Variable operating cost from input data arg

+
+
+static is_num(s)[source]
+

Check if a string is a number

+
+ +
+
+static is_method(s)[source]
+

Check if a string is a numpy/pandas or python builtin method

+
+ +
+
+property vars
+

Get a list of variable names that the EconomiesOfScale equation +uses as input.

+
+
Returns:
+

vars (list) – List of strings representing variable names that were parsed from +the equation string. This will return an empty list if the equation +has no variables.

+
+
+
+ +
+
+property capital_cost_scalar
+

Evaluated output of the EconomiesOfScale equation. Should be +numeric scalars to apply directly to the capital cost.

+
+
Returns:
+

out (float | np.ndarray) – Evaluated output of the EconomiesOfScale equation. Should be +numeric scalars to apply directly to the capital cost.

+
+
+
+ +
+
+property raw_capital_cost
+

Unscaled (raw) capital cost found in the data input arg.

+
+
Returns:
+

out (float | np.ndarray) – Unscaled (raw) capital_cost found in the data input arg.

+
+
+
+ +
+
+property scaled_capital_cost
+

Capital cost found in the data input arg scaled by the evaluated +EconomiesOfScale input equation.

+
+
Returns:
+

out (float | np.ndarray) – Capital cost found in the data input arg scaled by the evaluated +EconomiesOfScale equation.

+
+
+
+ +
+
+property system_capacity
+

Get the system capacity in kW (SAM input, not the reV supply +curve capacity).

+
+
Returns:
+

out (float | np.ndarray)

+
+
+
+ +
+
+property fcr
+

Fixed charge rate from input data arg

+
+
Returns:
+

out (float | np.ndarray) – Fixed charge rate from input data arg

+
+
+
+ +
+
+property foc
+

Fixed operating cost from input data arg

+
+
Returns:
+

out (float | np.ndarray) – Fixed operating cost from input data arg

+
+
+
+ +
+
+property voc
+

Variable operating cost from input data arg

+
+
Returns:
+

out (float | np.ndarray) – Variable operating cost from input data arg

+
+
+
+ +
+
+property aep
+

Annual energy production back-calculated from the raw LCOE:

+

AEP = (fcr * raw_cap_cost + foc) / raw_lcoe

+
+
Returns:
+

out (float | np.ndarray)

+
+
+
+ +
+
+property raw_lcoe
+

Raw LCOE taken from the input data

+
+
Returns:
+

lcoe (float | np.ndarray)

+
+
+
+ +
+
+property scaled_lcoe
+

LCOE calculated with the scaled capital cost based on the +EconomiesOfScale input equation.

+

LCOE = (FCR * scaled_capital_cost + FOC) / AEP + VOC

+
+
Returns:
+

lcoe (float | np.ndarray) – LCOE calculated with the scaled capital cost based on the +EconomiesOfScale input equation.

+
+
+
+ +
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.econ.economies_of_scale.html b/_autosummary/reV.econ.economies_of_scale.html new file mode 100644 index 000000000..8d892258c --- /dev/null +++ b/_autosummary/reV.econ.economies_of_scale.html @@ -0,0 +1,640 @@ + + + + + + + reV.econ.economies_of_scale — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.econ.economies_of_scale

+

reV module for calculating economies of scale where larger power plants will +have reduced capital cost.

+

Classes

+ + + + + + +

EconomiesOfScale(eqn, data)

Class to calculate economies of scale where power plant capital cost is reduced for larger power plants.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.econ.html b/_autosummary/reV.econ.html new file mode 100644 index 000000000..4ebb2b3dc --- /dev/null +++ b/_autosummary/reV.econ.html @@ -0,0 +1,646 @@ + + + + + + + reV.econ — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.econ

+

reV Econ analysis module.

+ + + + + + + + + + + + + + + +

reV.econ.cli_econ

Econ CLI utility functions.

reV.econ.econ

reV econ module (lcoe-fcr, single owner, etc...)

reV.econ.economies_of_scale

reV module for calculating economies of scale where larger power plants will have reduced capital cost.

reV.econ.utilities

reV Econ utilities

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.econ.utilities.html b/_autosummary/reV.econ.utilities.html new file mode 100644 index 000000000..5a6cc5c04 --- /dev/null +++ b/_autosummary/reV.econ.utilities.html @@ -0,0 +1,639 @@ + + + + + + + reV.econ.utilities — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.econ.utilities

+

reV Econ utilities

+

Functions

+ + + + + + +

lcoe_fcr(fixed_charge_rate, capital_cost, ...)

Calculate the Levelized Cost of Electricity (LCOE) using the fixed-charge-rate method:

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.econ.utilities.lcoe_fcr.html b/_autosummary/reV.econ.utilities.lcoe_fcr.html new file mode 100644 index 000000000..5d468e66f --- /dev/null +++ b/_autosummary/reV.econ.utilities.lcoe_fcr.html @@ -0,0 +1,657 @@ + + + + + + + reV.econ.utilities.lcoe_fcr — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.econ.utilities.lcoe_fcr

+
+
+lcoe_fcr(fixed_charge_rate, capital_cost, fixed_operating_cost, annual_energy_production, variable_operating_cost)[source]
+

Calculate the Levelized Cost of Electricity (LCOE) using the +fixed-charge-rate method:

+
+
LCOE = ((fixed_charge_rate * capital_cost + fixed_operating_cost)

/ annual_energy_production + variable_operating_cost)

+
+
+
+
Parameters:
+
    +
  • fixed_charge_rate (float | np.ndarray) – Fixed charge rage (unitless)

  • +
  • capital_cost (float | np.ndarray) – Capital cost (aka Capital Expenditures) ($)

  • +
  • fixed_operating_cost (float | np.ndarray) – Fixed annual operating cost ($/year)

  • +
  • annual_energy_production (float | np.ndarray) – Annual energy production (kWh for year) +(can be calculated as capacity * cf * 8760)

  • +
  • variable_operating_cost (float | np.ndarray) – Variable operating cost ($/kWh)

  • +
+
+
Returns:
+

lcoe (float | np.ndarray) – LCOE in $/MWh

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.generation.base.BaseGen.html b/_autosummary/reV.generation.base.BaseGen.html new file mode 100644 index 000000000..774a497c4 --- /dev/null +++ b/_autosummary/reV.generation.base.BaseGen.html @@ -0,0 +1,1096 @@ + + + + + + + reV.generation.base.BaseGen — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.generation.base.BaseGen

+
+
+class BaseGen(points_control, output_request, site_data=None, drop_leap=False, memory_utilization_limit=0.4, scale_outputs=True)[source]
+

Bases: ABC

+

Base class for reV gen and econ classes to run SAM simulations.

+
+
Parameters:
+
    +
  • points_control (reV.config.project_points.PointsControl) – Project points control instance for site and SAM config spec.

  • +
  • output_request (list | tuple) – Output variables requested from SAM.

  • +
  • site_data (str | pd.DataFrame | None) – Site-specific input data for SAM calculation. String should be a +filepath that points to a csv, DataFrame is pre-extracted data. +Rows match sites, columns are input keys. Need a “gid” column. +Input as None if no site-specific data.

  • +
  • drop_leap (bool) – Drop leap day instead of final day of year during leap years.

  • +
  • memory_utilization_limit (float) – Memory utilization limit (fractional). This sets how many site +results will be stored in-memory at any given time before flushing +to disk.

  • +
  • scale_outputs (bool) – Flag to scale outputs in-place immediately upon Gen returning data.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_site_data_to_pp(site_data)

Add the site df (site-specific inputs) to project points dataframe.

flush()

Flush the output data in self.out attribute to disk in .h5 format.

get_pc(points, points_range, sam_configs, tech)

Get a PointsControl instance.

get_sites_per_worker(res_file[, default])

Get the nominal sites per worker (x-chunk size) for a given file.

handle_leap_ti(ti[, drop_leap])

Handle a time index for a leap year by dropping a day.

site_index(site_gid[, out_index])

Get the index corresponding to the site gid.

unpack_futures(futures)

Combine list of futures results into their native dict format/type.

unpack_output(site_gid, site_output)

Unpack a SAM SiteOutput object to the output attribute.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ECON_ATTRS

LCOE_ARGS

OPTIONS

OUT_ATTRS

meta

Get resource meta for all sites in project points.

out

Get the reV gen or econ output results.

out_chunk

Get the current output chunk index range (INCLUSIVE).

output_request

Get the output variables requested from the user.

points_control

Get project points controller.

project_points

Get project points

run_attrs

Run time attributes (__init__ args and kwargs)

sam_configs

Get the sam config dictionary.

sam_metas

SAM configurations including runtime module

sam_module

Get the SAM module class to be used for SAM simulations.

site_data

Get the site-specific inputs in dataframe format.

site_limit

Get the number of sites results that can be stored in memory at once

site_mem

Get the memory (MB) required to store all results for a single site.

tech

Get the reV technology string.

time_index

Get the resource time index data.

year

Get the resource year.

+
+
+property output_request
+

Get the output variables requested from the user.

+
+
Returns:
+

output_request (list) – Output variables requested from SAM.

+
+
+
+ +
+
+property out_chunk
+

Get the current output chunk index range (INCLUSIVE).

+
+
Returns:
+

_out_chunk (tuple) – Two entry tuple (start, end) indicies (inclusive) for where the +current data in-memory belongs in the final output.

+
+
+
+ +
+
+property site_data
+

Get the site-specific inputs in dataframe format.

+
+
Returns:
+

_site_data (pd.DataFrame) – Site-specific input data for gen or econ calculation. Rows match +sites, columns are variables.

+
+
+
+ +
+
+property site_limit
+

Get the number of sites results that can be stored in memory at once

+
+
Returns:
+

_site_limit (int) – Number of site result sets that can be stored in memory at once +without violating memory limits.

+
+
+
+ +
+
+property site_mem
+

Get the memory (MB) required to store all results for a single site.

+
+
Returns:
+

_site_mem (float) – Memory (MB) required to store all results in requested in +output_request for a single site.

+
+
+
+ +
+
+property points_control
+

Get project points controller.

+
+
Returns:
+

points_control (reV.config.project_points.PointsControl) – Project points control instance for site and SAM config spec.

+
+
+
+ +
+
+property project_points
+

Get project points

+
+
Returns:
+

project_points (reV.config.project_points.ProjectPoints) – Project points from the points control instance.

+
+
+
+ +
+
+property sam_configs
+

Get the sam config dictionary.

+
+
Returns:
+

sam_configs (dict) – SAM config from the project points instance.

+
+
+
+ +
+
+property sam_metas
+

SAM configurations including runtime module

+
+
Returns:
+

sam_metas (dict) – Nested dictionary of SAM configuration files with module used +at runtime

+
+
+
+ +
+
+property sam_module
+

Get the SAM module class to be used for SAM simulations.

+
+
Returns:
+

sam_module (object) – SAM object like PySAM.Pvwattsv7 or PySAM.Lcoefcr

+
+
+
+ +
+
+property meta
+

Get resource meta for all sites in project points.

+
+
Returns:
+

meta (pd.DataFrame) – Meta data df for sites in project points. Column names are meta +data variables, rows are different sites. The row index +does not indicate the site number if the project points are +non-sequential or do not start from 0, so a ‘gid’ column is added.

+
+
+
+ +
+
+property time_index
+

Get the resource time index data.

+
+
Returns:
+

_time_index (pandas.DatetimeIndex) – Time-series datetime index

+
+
+
+ +
+
+property run_attrs
+

Run time attributes (__init__ args and kwargs)

+
+
Returns:
+

run_attrs (dict) – Dictionary of runtime args and kwargs

+
+
+
+ +
+
+property year
+

Get the resource year.

+
+
Returns:
+

_year (int) – Year of the time-series datetime index.

+
+
+
+ +
+
+property tech
+

Get the reV technology string.

+
+
Returns:
+

tech (str) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam, econ) +The string should be lower-cased with spaces and _ removed.

+
+
+
+ +
+
+property out
+

Get the reV gen or econ output results.

+
+
Returns:
+

out (dict) – Dictionary of gen or econ results from SAM.

+
+
+
+ +
+
+static handle_leap_ti(ti, drop_leap=False)[source]
+

Handle a time index for a leap year by dropping a day.

+
+
Parameters:
+
    +
  • ti (pandas.DatetimeIndex) – Time-series datetime index with or without a leap day.

  • +
  • drop_leap (bool) – Option to drop leap day (if True) or drop the last day of the year +(if False).

  • +
+
+
Returns:
+

ti (pandas.DatetimeIndex) – Time-series datetime index with length a multiple of 365.

+
+
+
+ +
+
+classmethod get_pc(points, points_range, sam_configs, tech, sites_per_worker=None, res_file=None, curtailment=None)[source]
+

Get a PointsControl instance.

+
+
Parameters:
+
    +
  • points (int | slice | list | str | pandas.DataFrame | PointsControl) – Single site integer, +or slice or list specifying project points, +or string pointing to a project points csv, +or a pre-loaded project points DataFrame, +or a fully instantiated PointsControl object.

  • +
  • points_range (list | None) – Optional two-entry list specifying the index range of the sites to +analyze. To be taken from the reV.config.PointsControl.split_range +property.

  • +
  • sam_configs (dict | str | SAMConfig) – SAM input configuration ID(s) and file path(s). Keys are the SAM +config ID(s) which map to the config column in the project points +CSV. Values are either a JSON SAM config file or dictionary of SAM +config inputs. Can also be a single config file path or a +pre loaded SAMConfig object.

  • +
  • tech (str) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam) +The string should be lower-cased with spaces and _ removed.

  • +
  • sites_per_worker (int) – Number of sites to run in series on a worker. None defaults to the +resource file chunk size.

  • +
  • res_file (str) – Filepath to single resource file, multi-h5 directory, +or /h5_dir/prefix*suffix

  • +
  • curtailment (NoneType | dict | str | config.curtailment.Curtailment) – Inputs for curtailment parameters. If not None, curtailment inputs +are expected. Can be:

    +
    +
      +
    • Explicit namespace of curtailment variables (dict)

    • +
    • Pointer to curtailment config json file with path (str)

    • +
    • Instance of curtailment config object +(config.curtailment.Curtailment)

    • +
    +
    +
  • +
+
+
Returns:
+

pc (reV.config.project_points.PointsControl) – PointsControl object instance.

+
+
+
+ +
+
+static get_sites_per_worker(res_file, default=100)[source]
+

Get the nominal sites per worker (x-chunk size) for a given file.

+

This is based on the concept that it is most efficient for one core to +perform one read on one chunk of resource data, such that chunks will +not have to be read into memory twice and no sites will be read +redundantly.

+
+
Parameters:
+
    +
  • res_file (str) – Filepath to single resource file, multi-h5 directory, +or /h5_dir/prefix*suffix

  • +
  • default (int) – Sites to be analyzed on a single core if the chunk size cannot be +determined from res_file.

  • +
+
+
Returns:
+

sites_per_worker (int) – Nominal sites to be analyzed per worker. This is set to the x-axis +chunk size for windspeed and dni datasets for the WTK and NSRDB +data, respectively.

+
+
+
+ +
+
+static unpack_futures(futures)[source]
+

Combine list of futures results into their native dict format/type.

+
+
Parameters:
+

futures (list) – List of dictionary futures results.

+
+
Returns:
+

out (dict) – Compiled results of the native future results type (dict).

+
+
+
+ +
+
+add_site_data_to_pp(site_data)[source]
+

Add the site df (site-specific inputs) to project points dataframe.

+

This ensures that only the relevant site’s data will be passed through +to parallel workers when points_control is iterated and split.

+
+
Parameters:
+

site_data (pd.DataFrame) – Site-specific data for econ calculation. Rows correspond to sites, +columns are variables.

+
+
+
+ +
+
+unpack_output(site_gid, site_output)[source]
+

Unpack a SAM SiteOutput object to the output attribute.

+
+
Parameters:
+
    +
  • site_gid (int) – Resource-native site gid (index).

  • +
  • site_output (dict) – SAM site output object.

  • +
+
+
+
+ +
+
+site_index(site_gid, out_index=False)[source]
+

Get the index corresponding to the site gid.

+
+
Parameters:
+
    +
  • site_gid (int) – Resource-native site index (gid).

  • +
  • out_index (bool) – Option to get output index (if true) which is the column index in +the current in-memory output array, or (if false) the global site +index from the project points site list.

  • +
+
+
Returns:
+

index (int) – Global site index if out_index=False, otherwise column index in +the current in-memory output array.

+
+
+
+ +
+
+flush()[source]
+

Flush the output data in self.out attribute to disk in .h5 format.

+

The data to be flushed is accessed from the instance attribute +“self.out”. The disk target is based on the instance attributes +“self._out_fpath”. Data is not flushed if _fpath is None or if .out is +empty.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.generation.base.html b/_autosummary/reV.generation.base.html new file mode 100644 index 000000000..7dbdc99ca --- /dev/null +++ b/_autosummary/reV.generation.base.html @@ -0,0 +1,639 @@ + + + + + + + reV.generation.base — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.generation.base

+

reV base gen and econ module.

+

Classes

+ + + + + + +

BaseGen(points_control, output_request[, ...])

Base class for reV gen and econ classes to run SAM simulations.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.generation.cli_gen.html b/_autosummary/reV.generation.cli_gen.html new file mode 100644 index 000000000..d5bea3853 --- /dev/null +++ b/_autosummary/reV.generation.cli_gen.html @@ -0,0 +1,631 @@ + + + + + + + reV.generation.cli_gen — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.generation.cli_gen

+

Generation CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.generation.generation.Gen.html b/_autosummary/reV.generation.generation.Gen.html new file mode 100644 index 000000000..d031e9828 --- /dev/null +++ b/_autosummary/reV.generation.generation.Gen.html @@ -0,0 +1,1421 @@ + + + + + + + reV.generation.generation.Gen — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.generation.generation.Gen

+
+
+class Gen(technology, project_points, sam_files, resource_file, low_res_resource_file=None, output_request=('cf_mean',), site_data=None, curtailment=None, gid_map=None, drop_leap=False, sites_per_worker=None, memory_utilization_limit=0.4, scale_outputs=True, write_mapped_gids=False, bias_correct=None)[source]
+

Bases: BaseGen

+

reV generation analysis class.

+

reV generation analysis runs SAM simulations by piping in +renewable energy resource data (usually from the NSRDB or WTK), +loading the SAM config, and then executing the PySAM compute +module for a given technology. See the documentation for the +reV SAM class (e.g. reV.SAM.generation.WindPower, +reV.SAM.generation.PvWattsv8, +reV.SAM.generation.Geothermal, etc.) for info on the +allowed and/or required SAM config file inputs. If economic +parameters are supplied in the SAM config, then you can bundle a +“follow-on” econ calculation by just adding the desired econ +output keys to the output_request. You can request reV to ‘ +run the analysis for one or more “sites”, which correspond to +the meta indices in the resource data (also commonly called the +gid's).

+

Examples

+

The following is an example of the most simple way to run reV +generation. Note that the TESTDATADIR refers to the local cloned +repository and will need to be replaced with a valid path if you +installed reV via a simple pip install.

+
>>> import os
+>>> from reV import Gen, TESTDATADIR
+>>>
+>>> sam_tech = 'pvwattsv7'
+>>> sites = 0
+>>> fp_sam = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json')
+>>> fp_res = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2013.h5')
+>>>
+>>> gen = Gen(sam_tech, sites, fp_sam, fp_res)
+>>> gen.run()
+>>>
+>>> gen.out
+{'cf_mean': array([0.16966143], dtype=float32)}
+>>>
+>>> sites = [3, 4, 7, 9]
+>>> req = ('cf_mean', 'cf_profile', 'lcoe_fcr')
+>>> gen = Gen(sam_tech, sites, fp_sam, fp_res, output_request=req)
+>>> gen.run()
+>>>
+>>> gen.out
+{'lcoe_fcr': array([131.39166, 131.31221, 127.54539, 125.49656]),
+'cf_mean': array([0.17713654, 0.17724372, 0.1824783 , 0.1854574 ]),
+'cf_profile': array([[0., 0., 0., 0.],
+        [0., 0., 0., 0.],
+        [0., 0., 0., 0.],
+        ...,
+        [0., 0., 0., 0.],
+        [0., 0., 0., 0.],
+        [0., 0., 0., 0.]])}
+
+
+
+
Parameters:
+
    +
  • technology (str) – String indicating which SAM technology to analyze. Must be +one of the keys of +OPTIONS. The string +should be lower-cased with spaces and underscores removed.

  • +
  • project_points (int | list | tuple | str | dict | pd.DataFrame | slice) – Input specifying which sites to process. A single integer +representing the generation GID of a site may be specified +to evaluate reV at a single location. A list or tuple of +integers (or slice) representing the generation GIDs of +multiple sites can be specified to evaluate reV at multiple +specific locations. A string pointing to a project points +CSV file may also be specified. Typically, the CSV contains +two columns:

    +
    +
      +
    • gid: Integer specifying the generation GID of each +site.

    • +
    • config: Key in the sam_files input dictionary +(see below) corresponding to the SAM configuration to +use for each particular site. This value can also be +None (or left out completely) if you specify only +a single SAM configuration file as the sam_files +input.

    • +
    +
    +

    The CSV file may also contain site-specific inputs by +including a column named after a config keyword (e.g. a +column called capital_cost may be included to specify a +site-specific capital cost value for each location). Columns +that do not correspond to a config key may also be included, +but they will be ignored. A DataFrame following the same +guidelines as the CSV input (or a dictionary that can be +used to initialize such a DataFrame) may be used for this +input as well.

    +
    +

    Note

    +

    By default, the generation GID of each site is +assumed to match the resource GID to be evaluated for that +site. However, unique generation GID’s can be mapped to +non-unique resource GID’s via the gid_map input (see the +documentation for gid_map for more details).

    +
    +
  • +
  • sam_files (dict | str) – A dictionary mapping SAM input configuration ID(s) to SAM +configuration(s). Keys are the SAM config ID(s) which +correspond to the config column in the project points +CSV. Values for each key are either a path to a +corresponding SAM config file or a full dictionary +of SAM config inputs. For example:

    +
    sam_files = {
    +    "default": "/path/to/default/sam.json",
    +    "onshore": "/path/to/onshore/sam_config.yaml",
    +    "offshore": {
    +        "sam_key_1": "sam_value_1",
    +        "sam_key_2": "sam_value_2",
    +        ...
    +    },
    +    ...
    +}
    +
    +
    +

    This input can also be a string pointing to a single SAM +config file. In this case, the config column of the +CSV points input should be set to None or left out +completely. See the documentation for the reV SAM class +(e.g. reV.SAM.generation.WindPower, +reV.SAM.generation.PvWattsv8, +reV.SAM.generation.Geothermal, etc.) for +info on the allowed and/or required SAM config file inputs.

    +
  • +
  • resource_file (str) – Filepath to resource data. This input can be path to a +single resource HDF5 file, a path to a directory containing +data spread across multiple HDF5 files, or a path including +a wildcard input like /h5_dir/prefix*suffix. In all +cases, the resource data must be readable by +rex.resource.Resource +or rex.multi_file_resource.MultiFileResource. +(i.e. the resource data conform to the +rex data format). This +means the data file(s) must contain a 1D time_index +dataset indicating the UTC time of observation, a 1D +meta dataset represented by a DataFrame with +site-specific columns, and 2D resource datasets that match +the dimensions of (time_index, meta). The time index +must start at 00:00 of January 1st of the year under +consideration, and its shape must be a multiple of 8760.

    +
    +

    Note

    +

    If executing reV from the command line, this +path can contain brackets {} that will be filled in by +the analysis_years input.

    +
    +
    +

    Important

    +

    If you are using custom resource data (i.e. +not NSRDB/WTK/Sup3rCC, etc.), ensure the following:

    +
    +
      +
    • The data conforms to the +rex data format.

    • +
    • The meta DataFrame is organized such that every +row is a pixel and at least the columns +latitude, longitude, timezone, and +elevation are given for each location.

    • +
    • The time index and associated temporal data is in +UTC.

    • +
    • The latitude is between -90 and 90 and longitude is +between -180 and 180.

    • +
    • For solar data, ensure the DNI/DHI are not zero. You +can calculate one of these these inputs from the +other using the relationship

      +
      +\[GHI = DNI * cos(SZA) + DHI\]
      +
    • +
    +
    +
    +
  • +
  • low_res_resource_file (str, optional) – Optional low resolution resource file that will be +dynamically mapped+interpolated to the nominal-resolution +resource_file. This needs to be of the same format as +resource_file - both files need to be handled by the +same rex Resource handler (e.g. WindResource). All +of the requirements from the resource_file apply to this +input as well. If None, no dynamic mapping to higher +resolutions is performed. By default, None.

  • +
  • output_request (list | tuple, optional) – List of output variables requested from SAM. Can be any +of the parameters in the “Outputs” group of the PySAM module +(e.g. PySAM.Windpower.Windpower.Outputs, +PySAM.Pvwattsv8.Pvwattsv8.Outputs, +PySAM.Geothermal.Geothermal.Outputs, etc.) being +executed. This list can also include a select number of SAM +config/resource parameters to include in the output: +any key in any of the +output attribute JSON files +may be requested. If cf_mean is not included in this +list, it will automatically be added. Time-series profiles +requested via this input are output in UTC.

    +
    +

    Note

    +

    If you are performing reV solar runs using +PVWatts and would like reV to include AC capacity +values in your aggregation/supply curves, then you must +include the "dc_ac_ratio" time series as an output in +output_request when running reV generation. The AC +capacity outputs will automatically be added during the +aggregation/supply curve step if the "dc_ac_ratio" +dataset is detected in the generation file.

    +
    +

    By default, ('cf_mean',).

    +
  • +
  • site_data (str | pd.DataFrame, optional) – Site-specific input data for SAM calculation. If this input +is a string, it should be a path that points to a CSV file. +Otherwise, this input should be a DataFrame with +pre-extracted site data. Rows in this table should match +the input sites via a gid column. The rest of the +columns should match configuration input keys that will take +site-specific values. Note that some or all site-specific +inputs can be specified via the project_points input +table instead. If None, no site-specific data is +considered. By default, None.

  • +
  • curtailment (dict | str, optional) –

    +

    Inputs for curtailment parameters, which can be:

    +
    +
      +
    • Explicit namespace of curtailment variables (dict)

    • +
    • Pointer to curtailment config file with path (str)

    • +
    +
    +

    The allowed key-value input pairs in the curtailment +configuration are documented as properties of the +reV.config.curtailment.Curtailment class. If +None, no curtailment is modeled. By default, None.

    +
  • +
  • gid_map (dict | str, optional) – Mapping of unique integer generation gids (keys) to single +integer resource gids (values). This enables unique +generation gids in the project points to map to non-unique +resource gids, which can be useful when evaluating multiple +resource datasets in reV (e.g., forecasted ECMWF +resource data to complement historical WTK meteorology). +This input can be a pre-extracted dictionary or a path to a +JSON or CSV file. If this input points to a CSV file, the +file must have the columns gid (which matches the +project points) and gid_map (gids to extract from the +resource input). If None, the GID values in the project +points are assumed to match the resource GID values. +By default, None.

  • +
  • drop_leap (bool, optional) – Drop leap day instead of final day of year when handling +leap years. By default, False.

  • +
  • sites_per_worker (int, optional) – Number of sites to run in series on a worker. None +defaults to the resource file chunk size. +By default, None.

  • +
  • memory_utilization_limit (float, optional) – Memory utilization limit (fractional). Must be a value +between 0 and 1. This input sets how many site results will +be stored in-memory at any given time before flushing to +disk. By default, 0.4.

  • +
  • scale_outputs (bool, optional) – Flag to scale outputs in-place immediately upon Gen +returning data. By default, True.

  • +
  • write_mapped_gids (bool, optional) – Option to write mapped gids to output meta instead of +resource gids. By default, False.

  • +
  • bias_correct (str | pd.DataFrame, optional) – Optional DataFrame or CSV filepath to a wind or solar +resource bias correction table. This has columns:

    +
    +
      +
    • gid: GID of site (can be index name)

    • +
    • adder: Value to add to resource at each site

    • +
    • scalar: Value to scale resource at each site by

    • +
    +
    +

    The gid field should match the true resource gid +regardless of the optional gid_map input. If both +adder and scalar are present, the wind or solar +resource is corrected by \((res*scalar)+adder\). If +either is missing, scalar defaults to 1 and +adder to 0. Only windspeed or GHI + DNI are +corrected, depending on the technology (wind for the former, +solar for the latter). GHI and DNI are corrected with +the same correction factors. If None, no corrections are +applied. By default, None.

    +
  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_site_data_to_pp(site_data)

Add the site df (site-specific inputs) to project points dataframe.

flush()

Flush the output data in self.out attribute to disk in .h5 format.

get_pc(points, points_range, sam_configs, tech)

Get a PointsControl instance.

get_sites_per_worker(res_file[, default])

Get the nominal sites per worker (x-chunk size) for a given file.

handle_leap_ti(ti[, drop_leap])

Handle a time index for a leap year by dropping a day.

run([out_fpath, max_workers, timeout, pool_size])

Execute a parallel reV generation run with smart data flushing.

site_index(site_gid[, out_index])

Get the index corresponding to the site gid.

unpack_futures(futures)

Combine list of futures results into their native dict format/type.

unpack_output(site_gid, site_output)

Unpack a SAM SiteOutput object to the output attribute.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ECON_ATTRS

LCOE_ARGS

OPTIONS

reV technology options.

OUT_ATTRS

lr_res_file

Get the (optional) low-resolution resource filename and path.

meta

Get resource meta for all sites in project points.

out

Get the reV gen or econ output results.

out_chunk

Get the current output chunk index range (INCLUSIVE).

output_request

Get the output variables requested from the user.

points_control

Get project points controller.

project_points

Get project points

res_file

Get the resource filename and path.

run_attrs

Run time attributes (__init__ args and kwargs)

sam_configs

Get the sam config dictionary.

sam_metas

SAM configurations including runtime module

sam_module

Get the SAM module class to be used for SAM simulations.

site_data

Get the site-specific inputs in dataframe format.

site_limit

Get the number of sites results that can be stored in memory at once

site_mem

Get the memory (MB) required to store all results for a single site.

tech

Get the reV technology string.

time_index

Get the generation resource time index data.

year

Get the resource year.

+
+
+OPTIONS = {'geothermal': <class 'reV.SAM.generation.Geothermal'>, 'lineardirectsteam': <class 'reV.SAM.generation.LinearDirectSteam'>, 'mhkwave': <class 'reV.SAM.generation.MhkWave'>, 'pvsamv1': <class 'reV.SAM.generation.PvSamv1'>, 'pvwattsv5': <class 'reV.SAM.generation.PvWattsv5'>, 'pvwattsv7': <class 'reV.SAM.generation.PvWattsv7'>, 'pvwattsv8': <class 'reV.SAM.generation.PvWattsv8'>, 'solarwaterheat': <class 'reV.SAM.generation.SolarWaterHeat'>, 'tcsmoltensalt': <class 'reV.SAM.generation.TcsMoltenSalt'>, 'troughphysicalheat': <class 'reV.SAM.generation.TroughPhysicalHeat'>, 'windpower': <class 'reV.SAM.generation.WindPower'>}
+

reV technology options.

+
+ +
+
+property res_file
+

Get the resource filename and path.

+
+
Returns:
+

res_file (str) – Filepath to single resource file, multi-h5 directory, +or /h5_dir/prefix*suffix

+
+
+
+ +
+
+property lr_res_file
+

Get the (optional) low-resolution resource filename and path.

+
+
Returns:
+

str | None

+
+
+
+ +
+
+property meta
+

Get resource meta for all sites in project points.

+
+
Returns:
+

meta (pd.DataFrame) – Meta data df for sites in project points. Column names are meta +data variables, rows are different sites. The row index +does not indicate the site number if the project points are +non-sequential or do not start from 0, so a ‘gid’ column is added.

+
+
+
+ +
+
+property time_index
+

Get the generation resource time index data.

+
+
Returns:
+

_time_index (pandas.DatetimeIndex) – Time-series datetime index

+
+
+
+ +
+
+add_site_data_to_pp(site_data)
+

Add the site df (site-specific inputs) to project points dataframe.

+

This ensures that only the relevant site’s data will be passed through +to parallel workers when points_control is iterated and split.

+
+
Parameters:
+

site_data (pd.DataFrame) – Site-specific data for econ calculation. Rows correspond to sites, +columns are variables.

+
+
+
+ +
+
+flush()
+

Flush the output data in self.out attribute to disk in .h5 format.

+

The data to be flushed is accessed from the instance attribute +“self.out”. The disk target is based on the instance attributes +“self._out_fpath”. Data is not flushed if _fpath is None or if .out is +empty.

+
+ +
+
+classmethod get_pc(points, points_range, sam_configs, tech, sites_per_worker=None, res_file=None, curtailment=None)
+

Get a PointsControl instance.

+
+
Parameters:
+
    +
  • points (int | slice | list | str | pandas.DataFrame | PointsControl) – Single site integer, +or slice or list specifying project points, +or string pointing to a project points csv, +or a pre-loaded project points DataFrame, +or a fully instantiated PointsControl object.

  • +
  • points_range (list | None) – Optional two-entry list specifying the index range of the sites to +analyze. To be taken from the reV.config.PointsControl.split_range +property.

  • +
  • sam_configs (dict | str | SAMConfig) – SAM input configuration ID(s) and file path(s). Keys are the SAM +config ID(s) which map to the config column in the project points +CSV. Values are either a JSON SAM config file or dictionary of SAM +config inputs. Can also be a single config file path or a +pre loaded SAMConfig object.

  • +
  • tech (str) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam) +The string should be lower-cased with spaces and _ removed.

  • +
  • sites_per_worker (int) – Number of sites to run in series on a worker. None defaults to the +resource file chunk size.

  • +
  • res_file (str) – Filepath to single resource file, multi-h5 directory, +or /h5_dir/prefix*suffix

  • +
  • curtailment (NoneType | dict | str | config.curtailment.Curtailment) – Inputs for curtailment parameters. If not None, curtailment inputs +are expected. Can be:

    +
    +
      +
    • Explicit namespace of curtailment variables (dict)

    • +
    • Pointer to curtailment config json file with path (str)

    • +
    • Instance of curtailment config object +(config.curtailment.Curtailment)

    • +
    +
    +
  • +
+
+
Returns:
+

pc (reV.config.project_points.PointsControl) – PointsControl object instance.

+
+
+
+ +
+
+static get_sites_per_worker(res_file, default=100)
+

Get the nominal sites per worker (x-chunk size) for a given file.

+

This is based on the concept that it is most efficient for one core to +perform one read on one chunk of resource data, such that chunks will +not have to be read into memory twice and no sites will be read +redundantly.

+
+
Parameters:
+
    +
  • res_file (str) – Filepath to single resource file, multi-h5 directory, +or /h5_dir/prefix*suffix

  • +
  • default (int) – Sites to be analyzed on a single core if the chunk size cannot be +determined from res_file.

  • +
+
+
Returns:
+

sites_per_worker (int) – Nominal sites to be analyzed per worker. This is set to the x-axis +chunk size for windspeed and dni datasets for the WTK and NSRDB +data, respectively.

+
+
+
+ +
+
+static handle_leap_ti(ti, drop_leap=False)
+

Handle a time index for a leap year by dropping a day.

+
+
Parameters:
+
    +
  • ti (pandas.DatetimeIndex) – Time-series datetime index with or without a leap day.

  • +
  • drop_leap (bool) – Option to drop leap day (if True) or drop the last day of the year +(if False).

  • +
+
+
Returns:
+

ti (pandas.DatetimeIndex) – Time-series datetime index with length a multiple of 365.

+
+
+
+ +
+
+property out
+

Get the reV gen or econ output results.

+
+
Returns:
+

out (dict) – Dictionary of gen or econ results from SAM.

+
+
+
+ +
+
+property out_chunk
+

Get the current output chunk index range (INCLUSIVE).

+
+
Returns:
+

_out_chunk (tuple) – Two entry tuple (start, end) indicies (inclusive) for where the +current data in-memory belongs in the final output.

+
+
+
+ +
+
+property output_request
+

Get the output variables requested from the user.

+
+
Returns:
+

output_request (list) – Output variables requested from SAM.

+
+
+
+ +
+
+property points_control
+

Get project points controller.

+
+
Returns:
+

points_control (reV.config.project_points.PointsControl) – Project points control instance for site and SAM config spec.

+
+
+
+ +
+
+property project_points
+

Get project points

+
+
Returns:
+

project_points (reV.config.project_points.ProjectPoints) – Project points from the points control instance.

+
+
+
+ +
+
+property run_attrs
+

Run time attributes (__init__ args and kwargs)

+
+
Returns:
+

run_attrs (dict) – Dictionary of runtime args and kwargs

+
+
+
+ +
+
+property sam_configs
+

Get the sam config dictionary.

+
+
Returns:
+

sam_configs (dict) – SAM config from the project points instance.

+
+
+
+ +
+
+property sam_metas
+

SAM configurations including runtime module

+
+
Returns:
+

sam_metas (dict) – Nested dictionary of SAM configuration files with module used +at runtime

+
+
+
+ +
+
+property sam_module
+

Get the SAM module class to be used for SAM simulations.

+
+
Returns:
+

sam_module (object) – SAM object like PySAM.Pvwattsv7 or PySAM.Lcoefcr

+
+
+
+ +
+
+property site_data
+

Get the site-specific inputs in dataframe format.

+
+
Returns:
+

_site_data (pd.DataFrame) – Site-specific input data for gen or econ calculation. Rows match +sites, columns are variables.

+
+
+
+ +
+
+site_index(site_gid, out_index=False)
+

Get the index corresponding to the site gid.

+
+
Parameters:
+
    +
  • site_gid (int) – Resource-native site index (gid).

  • +
  • out_index (bool) – Option to get output index (if true) which is the column index in +the current in-memory output array, or (if false) the global site +index from the project points site list.

  • +
+
+
Returns:
+

index (int) – Global site index if out_index=False, otherwise column index in +the current in-memory output array.

+
+
+
+ +
+
+property site_limit
+

Get the number of sites results that can be stored in memory at once

+
+
Returns:
+

_site_limit (int) – Number of site result sets that can be stored in memory at once +without violating memory limits.

+
+
+
+ +
+
+property site_mem
+

Get the memory (MB) required to store all results for a single site.

+
+
Returns:
+

_site_mem (float) – Memory (MB) required to store all results in requested in +output_request for a single site.

+
+
+
+ +
+
+property tech
+

Get the reV technology string.

+
+
Returns:
+

tech (str) – SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, +solarwaterheat, troughphysicalheat, lineardirectsteam, econ) +The string should be lower-cased with spaces and _ removed.

+
+
+
+ +
+
+static unpack_futures(futures)
+

Combine list of futures results into their native dict format/type.

+
+
Parameters:
+

futures (list) – List of dictionary futures results.

+
+
Returns:
+

out (dict) – Compiled results of the native future results type (dict).

+
+
+
+ +
+
+unpack_output(site_gid, site_output)
+

Unpack a SAM SiteOutput object to the output attribute.

+
+
Parameters:
+
    +
  • site_gid (int) – Resource-native site gid (index).

  • +
  • site_output (dict) – SAM site output object.

  • +
+
+
+
+ +
+
+property year
+

Get the resource year.

+
+
Returns:
+

_year (int) – Year of the time-series datetime index.

+
+
+
+ +
+
+run(out_fpath=None, max_workers=1, timeout=1800, pool_size=None)[source]
+

Execute a parallel reV generation run with smart data flushing.

+
+
Parameters:
+
    +
  • out_fpath (str, optional) – Path to output file. If None, no output file will +be written. If the filepath is specified but the module name +(generation) and/or resource data year is not included, the +module name and/or resource data year will get added to the +output file name. By default, None.

  • +
  • max_workers (int, optional) – Number of local workers to run on. By default, 1.

  • +
  • timeout (int, optional) – Number of seconds to wait for parallel run iteration to +complete before returning zeros. By default, 1800 +seconds.

  • +
  • pool_size (int, optional) – Number of futures to submit to a single process pool for +parallel futures. If None, the pool size is set to +os.cpu_count() * 2. By default, None.

  • +
+
+
Returns:
+

str | None – Path to output HDF5 file, or None if results were not +written to disk.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.generation.generation.html b/_autosummary/reV.generation.generation.html new file mode 100644 index 000000000..69b415d00 --- /dev/null +++ b/_autosummary/reV.generation.generation.html @@ -0,0 +1,639 @@ + + + + + + + reV.generation.generation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.generation.generation

+

reV generation module.

+

Classes

+ + + + + + +

Gen(technology, project_points, sam_files, ...)

reV generation analysis class.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.generation.html b/_autosummary/reV.generation.html new file mode 100644 index 000000000..742c61e11 --- /dev/null +++ b/_autosummary/reV.generation.html @@ -0,0 +1,643 @@ + + + + + + + reV.generation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.generation

+

reV Generation

+ + + + + + + + + + + + +

reV.generation.base

reV base gen and econ module.

reV.generation.cli_gen

Generation CLI utility functions.

reV.generation.generation

reV generation module.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.cli_collect.html b/_autosummary/reV.handlers.cli_collect.html new file mode 100644 index 000000000..ece2fbf4f --- /dev/null +++ b/_autosummary/reV.handlers.cli_collect.html @@ -0,0 +1,631 @@ + + + + + + + reV.handlers.cli_collect — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.cli_collect

+

File collection CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.cli_multi_year.html b/_autosummary/reV.handlers.cli_multi_year.html new file mode 100644 index 000000000..142bf824a --- /dev/null +++ b/_autosummary/reV.handlers.cli_multi_year.html @@ -0,0 +1,631 @@ + + + + + + + reV.handlers.cli_multi_year — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.cli_multi_year

+

Multi-year means CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.exclusions.ExclusionLayers.html b/_autosummary/reV.handlers.exclusions.ExclusionLayers.html new file mode 100644 index 000000000..cb70530d0 --- /dev/null +++ b/_autosummary/reV.handlers.exclusions.ExclusionLayers.html @@ -0,0 +1,895 @@ + + + + + + + reV.handlers.exclusions.ExclusionLayers — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.exclusions.ExclusionLayers

+
+
+class ExclusionLayers(h5_file, hsds=False)[source]
+

Bases: object

+

Handler of .h5 file and techmap for Exclusion Layers

+
+
Parameters:
+
    +
  • h5_file (str | list | tuple) – .h5 file containing exclusion layers and techmap, +or a list of h5 files

  • +
  • hsds (bool) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + +

close()

Close h5 instance

get_layer_crs(layer)

Get crs for a specific exclusion layer

get_layer_description(layer)

Get description for given layer

get_layer_profile(layer)

Get profile for a specific exclusion layer

get_layer_values(layer)

Get values for given layer in Geotiff format (bands, y, x)

get_nodata_value(layer)

Get the nodata value for a given layer

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

chunks

Exclusion layers chunks default chunk size

crs

GeoTiff projection crs

h5

Open h5py File instance.

iarr

Get an array of 1D index values for the flattened h5 excl extent.

latitude

Latitude coordinates array

layers

Available exclusions layers

longitude

Longitude coordinates array

pixel_area

Get pixel area in km2 from the transform profile of the excl file.

profile

GeoTiff profile for exclusions

shape

Exclusion shape (latitude, longitude)

+
+
+close()[source]
+

Close h5 instance

+
+ +
+
+property h5
+

Open h5py File instance.

+
+
Returns:
+

h5 (rex.MultiFileResource | rex.Resource)

+
+
+
+ +
+
+property iarr
+

Get an array of 1D index values for the flattened h5 excl extent.

+
+
Returns:
+

iarr (np.ndarray) – Uint array with same shape as exclusion extent, representing the 1D +index values if the geotiff extent was flattened +(with default flatten order ‘C’)

+
+
+
+ +
+
+property profile
+

GeoTiff profile for exclusions

+
+
Returns:
+

profile (dict)

+
+
+
+ +
+
+property crs
+

GeoTiff projection crs

+
+
Returns:
+

str

+
+
+
+ +
+
+property pixel_area
+

Get pixel area in km2 from the transform profile of the excl file.

+
+
Returns:
+

area (float) – Exclusion pixel area in km2. Will return None if the +appropriate transform attribute is not found.

+
+
+
+ +
+
+property layers
+

Available exclusions layers

+
+
Returns:
+

layers (list)

+
+
+
+ +
+
+property shape
+

Exclusion shape (latitude, longitude)

+
+
Returns:
+

shape (tuple)

+
+
+
+ +
+
+property chunks
+

Exclusion layers chunks default chunk size

+
+
Returns:
+

chunks (tuple | None) – Chunk size of exclusion layers

+
+
+
+ +
+
+property latitude
+

Latitude coordinates array

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property longitude
+

Longitude coordinates array

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+get_layer_profile(layer)[source]
+

Get profile for a specific exclusion layer

+
+
Parameters:
+

layer (str) – Layer to get profile for

+
+
Returns:
+

profile (dict | None) – GeoTiff profile for single exclusion layer

+
+
+
+ +
+
+get_layer_crs(layer)[source]
+

Get crs for a specific exclusion layer

+
+
Parameters:
+

layer (str) – Layer to get profile for

+
+
Returns:
+

crs (str | None) – GeoTiff projection crs

+
+
+
+ +
+
+get_layer_values(layer)[source]
+

Get values for given layer in Geotiff format (bands, y, x)

+
+
Parameters:
+

layer (str) – Layer to get values for

+
+
Returns:
+

values (ndarray) – GeoTiff values for single exclusion layer

+
+
+
+ +
+
+get_layer_description(layer)[source]
+

Get description for given layer

+
+
Parameters:
+

layer (str) – Layer to get description for

+
+
Returns:
+

description (str) – Description of layer

+
+
+
+ +
+
+get_nodata_value(layer)[source]
+

Get the nodata value for a given layer

+
+
Parameters:
+

layer (str) – Layer to get nodata value for

+
+
Returns:
+

nodata (int | float | None) – nodata value for layer or None if not found

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.exclusions.html b/_autosummary/reV.handlers.exclusions.html new file mode 100644 index 000000000..70bfb1356 --- /dev/null +++ b/_autosummary/reV.handlers.exclusions.html @@ -0,0 +1,639 @@ + + + + + + + reV.handlers.exclusions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.exclusions

+

Exclusion layers handler

+

Classes

+ + + + + + +

ExclusionLayers(h5_file[, hsds])

Handler of .h5 file and techmap for Exclusion Layers

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.html b/_autosummary/reV.handlers.html new file mode 100644 index 000000000..ec75d8636 --- /dev/null +++ b/_autosummary/reV.handlers.html @@ -0,0 +1,652 @@ + + + + + + + reV.handlers — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers

+

Sub-package of data handlers

+ + + + + + + + + + + + + + + + + + + + + +

reV.handlers.cli_collect

File collection CLI utility functions.

reV.handlers.cli_multi_year

Multi-year means CLI utility functions.

reV.handlers.exclusions

Exclusion layers handler

reV.handlers.multi_year

Classes to collect reV outputs from multiple annual files.

reV.handlers.outputs

Classes to handle reV h5 output files.

reV.handlers.transmission

Module to handle Supply Curve Transmission features

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.multi_year.MultiYear.html b/_autosummary/reV.handlers.multi_year.MultiYear.html new file mode 100644 index 000000000..e339f2866 --- /dev/null +++ b/_autosummary/reV.handlers.multi_year.MultiYear.html @@ -0,0 +1,1638 @@ + + + + + + + reV.handlers.multi_year.MultiYear — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.multi_year.MultiYear

+
+
+class MultiYear(h5_file, group=None, unscale=True, mode='r', str_decode=True)[source]
+

Bases: Outputs

+

Class to handle multiple years of data and: +- collect datasets from multiple years +- compute multi-year means +- compute multi-year standard deviations +- compute multi-year coefficient of variations

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • group (str) – Group to collect datasets into

  • +
  • unscale (bool) – Boolean flag to automatically unscale variables on extraction

  • +
  • mode (str) – Mode to instantiate h5py.File instance

  • +
  • str_decode (bool) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

CV(dset)

Extract or compute multi-year coefficient of variation for given source dset

add_dataset(h5_file, dset_name, dset_data, dtype)

Add dataset to h5_file

close()

Close h5 instance

collect(source_files, dset[, profiles, ...])

Collect dataset dset from given list of h5 files

collect_means(my_file, source_files, dset[, ...])

Collect and compute multi-year means for given dataset

collect_profiles(my_file, source_files, dset)

Collect multi-year profiles associated with given dataset

df_str_decode(df)

Decode a dataframe with byte string columns into ordinary str cols.

get_SAM_df(site)

Placeholder for get_SAM_df method that it resource specific

get_attrs([dset])

Get h5 attributes either from file or dataset

get_config(config_name)

Get SAM config

get_dset_properties(dset)

Get dataset properties (shape, dtype, chunks)

get_meta_arr(rec_name[, rows])

Get a meta array by name (faster than DataFrame extraction).

get_scale_factor(dset)

Get dataset scale factor

get_units(dset)

Get dataset units

init_h5(h5_file, dsets, shapes, attrs, ...)

Init a full output file with the final intended shape without data.

is_profile(source_files, dset)

Check dataset in source files to see if it is a profile.

means(dset)

Extract or compute multi-year means for given source dset

open_dataset(ds_name)

Open resource dataset

parse_source_files_pattern(source_files)

Parse a source_files pattern that can be either an explicit list of source files or a unix-style /filepath/pattern*.h5 and either way return a list of explicit filepaths.

pass_through(my_file, source_files, dset[, ...])

Pass through a dataset that is identical in all source files to a dataset of the same name in the output multi-year file.

preload_SAM(h5_file, sites, tech[, unscale, ...])

Pre-load project_points for SAM

set_configs(SAM_configs)

Set SAM configuration JSONs as attributes of 'meta'

set_version_attr()

Set the version attribute to the h5 file.

stdev(dset)

Extract or compute multi-year standard deviation for given source dset

update_dset(dset, dset_array[, dset_slice])

Check to see if dset needs to be updated on disk If so write dset_array to disk

write_dataset(dset_name, data, dtype[, ...])

Write dataset to disk.

write_means(h5_file, meta, dset_name, means, ...)

Write means array to disk

write_profiles(h5_file, meta, time_index, ...)

Write profiles to disk

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ADD_ATTR

SAM_configs

SAM configuration JSONs used to create CF profiles

SCALE_ATTR

UNIT_ATTR

adders

Dictionary of all dataset add offset factors

attrs

Dictionary of all dataset attributes

chunks

Dictionary of all dataset chunk sizes

coordinates

(lat, lon) pairs

data_version

Get the version attribute of the data.

datasets

Datasets available

dsets

Datasets available

dtypes

Dictionary of all dataset dtypes

full_version_record

Get record of versions for dependencies

global_attrs

Global (file) attributes

groups

Groups available

h5

Open h5py File instance.

lat_lon

Extract (latitude, longitude) pairs

meta

Resource meta data DataFrame

package

Package used to create file

res_dsets

Available resource datasets

resource_datasets

Available resource datasets

run_attrs

Runtime attributes stored at the global (file) level

scale_factors

Dictionary of all dataset scale factors

shape

Variable array shape from time_index and meta

shapes

Dictionary of all dataset shapes

source

Package and version used to create file

time_index

Resource DatetimeIndex

units

Dictionary of all dataset units

version

Version of package used to create file

writable

Check to see if h5py.File instance is writable

+
+
+static parse_source_files_pattern(source_files)[source]
+

Parse a source_files pattern that can be either an explicit list of +source files or a unix-style /filepath/pattern*.h5 and either way +return a list of explicit filepaths.

+
+
Parameters:
+

source_files (list | str) – List of .h5 files to collect datasets from. This can also be a +unix-style /filepath/pattern*.h5 to find .h5 files to collect, +however all resulting files must be .h5 otherwise an exception will +be raised. NOTE: .h5 file names must indicate the year the data +pertains to

+
+
Returns:
+

source_files (list) – List of .h5 filepaths.

+
+
+
+ +
+
+collect(source_files, dset, profiles=False, pass_through=False)[source]
+

Collect dataset dset from given list of h5 files

+
+
Parameters:
+
    +
  • source_files (list | str) – List of .h5 files to collect datasets from. This can also be a +unix-style /filepath/pattern*.h5 to find .h5 files to collect, +however all resulting files must be .h5 otherwise an exception will +be raised. NOTE: .h5 file names must indicate the year the data +pertains to

  • +
  • dset (str) – Dataset to collect

  • +
  • profiles (bool) – Boolean flag to indicate if profiles are being collected +If True also collect time_index

  • +
  • pass_through (bool) – Flag to just pass through dataset without name modifications +(no differences between years, no means or stdevs)

  • +
+
+
+
+ +
+
+means(dset)[source]
+

Extract or compute multi-year means for given source dset

+
+
Parameters:
+

dset (str) – Dataset of interest

+
+
Returns:
+

my_means (ndarray) – Array of multi-year means for dataset of interest

+
+
+
+ +
+
+stdev(dset)[source]
+

Extract or compute multi-year standard deviation for given source dset

+
+
Parameters:
+

dset (str) – Dataset of interest

+
+
Returns:
+

my_stdev (ndarray) – Array of multi-year standard deviation for dataset of interest

+
+
+
+ +
+
+CV(dset)[source]
+

Extract or compute multi-year coefficient of variation for given +source dset

+
+
Parameters:
+

dset (str) – Dataset of interest

+
+
Returns:
+

my_cv (ndarray) – Array of multi-year coefficient of variation for +dataset of interest

+
+
+
+ +
+
+classmethod is_profile(source_files, dset)[source]
+

Check dataset in source files to see if it is a profile.

+
+
Parameters:
+
    +
  • source_files (list | str) – List of .h5 files to collect datasets from. This can also be a +unix-style /filepath/pattern*.h5 to find .h5 files to collect, +however all resulting files must be .h5 otherwise an exception will +be raised. NOTE: .h5 file names must indicate the year the data +pertains to

  • +
  • dset (str) – Dataset to collect

  • +
+
+
Returns:
+

is_profile (bool) – True if profile, False if not.

+
+
+
+ +
+
+classmethod pass_through(my_file, source_files, dset, group=None)[source]
+

Pass through a dataset that is identical in all source files to a +dataset of the same name in the output multi-year file.

+
+
Parameters:
+
    +
  • my_file (str) – Path to multi-year .h5 file

  • +
  • source_files (list | str) – List of .h5 files to collect datasets from. This can also be a +unix-style /filepath/pattern*.h5 to find .h5 files to collect, +however all resulting files must be .h5 otherwise an exception will +be raised. NOTE: .h5 file names must indicate the year the data +pertains to

  • +
  • dset (str) – Dataset to pass through (will also be the name of the output +dataset in my_file)

  • +
  • group (str) – Group to collect datasets into

  • +
+
+
+
+ +
+
+classmethod collect_means(my_file, source_files, dset, group=None)[source]
+

Collect and compute multi-year means for given dataset

+
+
Parameters:
+
    +
  • my_file (str) – Path to multi-year .h5 file

  • +
  • source_files (list | str) – List of .h5 files to collect datasets from. This can also be a +unix-style /filepath/pattern*.h5 to find .h5 files to collect, +however all resulting files must be .h5 otherwise an exception will +be raised. NOTE: .h5 file names must indicate the year the data +pertains to

  • +
  • dset (str) – Dataset to collect

  • +
  • group (str) – Group to collect datasets into

  • +
+
+
+
+ +
+
+classmethod collect_profiles(my_file, source_files, dset, group=None)[source]
+

Collect multi-year profiles associated with given dataset

+
+
Parameters:
+
    +
  • my_file (str) – Path to multi-year .h5 file

  • +
  • source_files (list | str) – List of .h5 files to collect datasets from. This can also be a +unix-style /filepath/pattern*.h5 to find .h5 files to collect, +however all resulting files must be .h5 otherwise an exception will +be raised. NOTE: .h5 file names must indicate the year the data +pertains to

  • +
  • dset (str) – Profiles dataset to collect

  • +
  • group (str) – Group to collect datasets into

  • +
+
+
+
+ +
+
+property SAM_configs
+

SAM configuration JSONs used to create CF profiles

+
+
Returns:
+

configs (dict) – Dictionary of SAM configuration JSONs

+
+
+
+ +
+
+classmethod add_dataset(h5_file, dset_name, dset_data, dtype, attrs=None, chunks=None, unscale=True, mode='a', str_decode=True, group=None)
+

Add dataset to h5_file

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • dset_name (str) – Name of dataset to be added to h5 file

  • +
  • dset_data (ndarray) – Data to be added to h5 file

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘a’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+
+property adders
+

Dictionary of all dataset add offset factors

+
+
Returns:
+

adders (dict)

+
+
+
+ +
+
+property attrs
+

Dictionary of all dataset attributes

+
+
Returns:
+

attrs (dict)

+
+
+
+ +
+
+property chunks
+

Dictionary of all dataset chunk sizes

+
+
Returns:
+

chunks (dict)

+
+
+
+ +
+
+close()
+

Close h5 instance

+
+ +
+
+property coordinates
+

(lat, lon) pairs

+
+
Returns:
+

lat_lon (ndarray)

+
+
Type:
+

Coordinates

+
+
+
+ +
+
+property data_version
+

Get the version attribute of the data. None if not available.

+
+
Returns:
+

version (str | None)

+
+
+
+ +
+
+property datasets
+

Datasets available

+
+
Returns:
+

list

+
+
+
+ +
+
+static df_str_decode(df)
+

Decode a dataframe with byte string columns into ordinary str cols.

+
+
Parameters:
+

df (pd.DataFrame) – Dataframe with some columns being byte strings.

+
+
Returns:
+

df (pd.DataFrame) – DataFrame with str columns instead of byte str columns.

+
+
+
+ +
+
+property dsets
+

Datasets available

+
+
Returns:
+

list

+
+
+
+ +
+
+property dtypes
+

Dictionary of all dataset dtypes

+
+
Returns:
+

dtypes (dict)

+
+
+
+ +
+
+property full_version_record
+

Get record of versions for dependencies

+
+
Returns:
+

dict – Dictionary of package versions for dependencies

+
+
+
+ +
+
+get_SAM_df(site)
+

Placeholder for get_SAM_df method that it resource specific

+
+
Parameters:
+

site (int) – Site to extract SAM DataFrame for

+
+
+
+ +
+
+get_attrs(dset=None)
+

Get h5 attributes either from file or dataset

+
+
Parameters:
+

dset (str) – Dataset to get attributes for, if None get file (global) attributes

+
+
Returns:
+

attrs (dict) – Dataset or file attributes

+
+
+
+ +
+
+get_config(config_name)
+

Get SAM config

+
+
Parameters:
+

config_name (str) – Name of config

+
+
Returns:
+

config (dict) – SAM config JSON as a dictionary

+
+
+
+ +
+
+get_dset_properties(dset)
+

Get dataset properties (shape, dtype, chunks)

+
+
Parameters:
+

dset (str) – Dataset to get scale factor for

+
+
Returns:
+

    +
  • shape (tuple) – Dataset array shape

  • +
  • dtype (str) – Dataset array dtype

  • +
  • chunks (tuple) – Dataset chunk size

  • +
+

+
+
+
+ +
+
+get_meta_arr(rec_name, rows=slice(None, None, None))
+

Get a meta array by name (faster than DataFrame extraction).

+
+
Parameters:
+
    +
  • rec_name (str) – Named record from the meta data to retrieve.

  • +
  • rows (slice) – Rows of the record to extract.

  • +
+
+
Returns:
+

meta_arr (np.ndarray) – Extracted array from the meta data record name.

+
+
+
+ +
+
+get_scale_factor(dset)
+

Get dataset scale factor

+
+
Parameters:
+

dset (str) – Dataset to get scale factor for

+
+
Returns:
+

float – Dataset scale factor, used to unscale int values to floats

+
+
+
+ +
+
+get_units(dset)
+

Get dataset units

+
+
Parameters:
+

dset (str) – Dataset to get units for

+
+
Returns:
+

str – Dataset units, None if not defined

+
+
+
+ +
+
+property global_attrs
+

Global (file) attributes

+
+
Returns:
+

global_attrs (dict)

+
+
+
+ +
+
+property groups
+

Groups available

+
+
Returns:
+

groups (list) – List of groups

+
+
+
+ +
+
+property h5
+

Open h5py File instance. If _group is not None return open Group

+
+
Returns:
+

h5 (h5py.File | h5py.Group)

+
+
+
+ +
+
+classmethod init_h5(h5_file, dsets, shapes, attrs, chunks, dtypes, meta, time_index=None, configs=None, unscale=True, mode='w', str_decode=True, group=None, run_attrs=None)
+

Init a full output file with the final intended shape without data.

+
+
Parameters:
+
    +
  • h5_file (str) – Full h5 output filepath.

  • +
  • dsets (list) – List of strings of dataset names to initialize (does not include +meta or time_index).

  • +
  • shapes (dict) – Dictionary of dataset shapes (keys correspond to dsets).

  • +
  • attrs (dict) – Dictionary of dataset attributes (keys correspond to dsets).

  • +
  • chunks (dict) – Dictionary of chunk tuples (keys correspond to dsets).

  • +
  • dtypes (dict) – dictionary of numpy datatypes (keys correspond to dsets).

  • +
  • meta (pd.DataFrame) – Full meta data.

  • +
  • time_index (pd.datetimeindex | None) – Full pandas datetime index. None implies that only 1D results +(no site profiles) are being written.

  • +
  • configs (dict | None) – Optional input configs to set as attr on meta.

  • +
  • unscale (bool) – Boolean flag to automatically unscale variables on extraction

  • +
  • mode (str) – Mode to instantiate h5py.File instance

  • +
  • str_decode (bool) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read.

  • +
  • group (str) – Group within .h5 resource file to open

  • +
  • run_attrs (dict | NoneType) – Runtime attributes (args, kwargs) to add as global (file) +attributes

  • +
+
+
+
+ +
+
+property lat_lon
+

Extract (latitude, longitude) pairs

+
+
Returns:
+

lat_lon (ndarray)

+
+
+
+ +
+
+property meta
+

Resource meta data DataFrame

+
+
Returns:
+

meta (pandas.DataFrame)

+
+
+
+ +
+
+open_dataset(ds_name)
+

Open resource dataset

+
+
Parameters:
+

ds_name (str) – Dataset name to open

+
+
Returns:
+

ds (ResourceDataset) – Resource for open resource dataset

+
+
+
+ +
+
+property package
+

Package used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+classmethod preload_SAM(h5_file, sites, tech, unscale=True, str_decode=True, group=None, hsds=False, hsds_kwargs=None, time_index_step=None, means=False)
+

Pre-load project_points for SAM

+
+
Parameters:
+
    +
  • h5_file (str) – h5_file to extract resource from

  • +
  • sites (list) – List of sites to be provided to SAM

  • +
  • tech (str) – Technology to be run by SAM

  • +
  • unscale (bool) – Boolean flag to automatically unscale variables on extraction

  • +
  • str_decode (bool) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read.

  • +
  • group (str) – Group within .h5 resource file to open

  • +
  • hsds (bool, optional) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS, by default False

  • +
  • hsds_kwargs (dict, optional) – Dictionary of optional kwargs for h5pyd, e.g., bucket, username, +password, by default None

  • +
  • time_index_step (int, optional) – Step size for time_index, used to reduce temporal resolution, +by default None

  • +
  • means (bool, optional) – Boolean flag to compute mean resource when res_array is set, +by default False

  • +
+
+
Returns:
+

SAM_res (SAMResource) – Instance of SAMResource pre-loaded with Solar resource for sites +in project_points

+
+
+
+ +
+
+property res_dsets
+

Available resource datasets

+
+
Returns:
+

list

+
+
+
+ +
+
+property resource_datasets
+

Available resource datasets

+
+
Returns:
+

list

+
+
+
+ +
+
+property run_attrs
+

Runtime attributes stored at the global (file) level

+
+
Returns:
+

global_attrs (dict)

+
+
+
+ +
+
+property scale_factors
+

Dictionary of all dataset scale factors

+
+
Returns:
+

scale_factors (dict)

+
+
+
+ +
+
+set_configs(SAM_configs)
+

Set SAM configuration JSONs as attributes of ‘meta’

+
+
Parameters:
+

SAM_configs (dict) – Dictionary of SAM configuration JSONs

+
+
+
+ +
+
+set_version_attr()
+

Set the version attribute to the h5 file.

+
+ +
+
+property shape
+

Variable array shape from time_index and meta

+
+
Returns:
+

tuple – shape of variables arrays == (time, locations)

+
+
+
+ +
+
+property shapes
+

Dictionary of all dataset shapes

+
+
Returns:
+

shapes (dict)

+
+
+
+ +
+
+property source
+

Package and version used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+property time_index
+

Resource DatetimeIndex

+
+
Returns:
+

time_index (pandas.DatetimeIndex)

+
+
+
+ +
+
+property units
+

Dictionary of all dataset units

+
+
Returns:
+

units (dict)

+
+
+
+ +
+
+update_dset(dset, dset_array, dset_slice=None)
+

Check to see if dset needs to be updated on disk +If so write dset_array to disk

+
+
Parameters:
+
    +
  • dset (str) – dataset to update

  • +
  • dset_array (ndarray) – dataset array

  • +
  • dset_slice (tuple) – slice of dataset to update, it None update all

  • +
+
+
+
+ +
+
+property version
+

Version of package used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+property writable
+

Check to see if h5py.File instance is writable

+
+
Returns:
+

is_writable (bool) – Flag if mode is writable

+
+
+
+ +
+
+write_dataset(dset_name, data, dtype, chunks=None, attrs=None)
+

Write dataset to disk. Dataset it created in .h5 file and data is +scaled if needed.

+
+
Parameters:
+
    +
  • dset_name (str) – Name of dataset to be added to h5 file.

  • +
  • data (ndarray) – Data to be added to h5 file.

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • chunks (tuple) – Chunk size for capacity factor means dataset.

  • +
  • attrs (dict) – Attributes to be set. May include ‘scale_factor’.

  • +
+
+
+
+ +
+
+classmethod write_means(h5_file, meta, dset_name, means, dtype, attrs=None, SAM_configs=None, chunks=None, unscale=True, mode='w-', str_decode=True, group=None)
+

Write means array to disk

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • meta (pandas.Dataframe) – Locational meta data

  • +
  • dset_name (str) – Name of the target dataset (should identify the means).

  • +
  • means (ndarray) – output means array.

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • SAM_configs (dict, optional) – Dictionary of SAM configuration JSONs used to compute cf means, +by default None

  • +
  • chunks (tuple, optional) – Chunk size for capacity factor means dataset, by default None

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘w-’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+
+classmethod write_profiles(h5_file, meta, time_index, dset_name, profiles, dtype, attrs=None, SAM_configs=None, chunks=(None, 100), unscale=True, mode='w-', str_decode=True, group=None)
+

Write profiles to disk

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • meta (pandas.Dataframe) – Locational meta data

  • +
  • time_index (pandas.DatetimeIndex) – Temporal timesteps

  • +
  • dset_name (str) – Name of the target dataset (should identify the profiles).

  • +
  • profiles (ndarray) – output result timeseries profiles

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • SAM_configs (dict, optional) – Dictionary of SAM configuration JSONs used to compute cf means, +by default None

  • +
  • chunks (tuple, optional) – Chunk size for capacity factor means dataset, +by default (None, 100)

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘w-’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.multi_year.MultiYearGroup.html b/_autosummary/reV.handlers.multi_year.MultiYearGroup.html new file mode 100644 index 000000000..167bbc2fe --- /dev/null +++ b/_autosummary/reV.handlers.multi_year.MultiYearGroup.html @@ -0,0 +1,747 @@ + + + + + + + reV.handlers.multi_year.MultiYearGroup — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.multi_year.MultiYearGroup

+
+
+class MultiYearGroup(name, out_dir, source_files=None, source_dir=None, source_prefix=None, source_pattern=None, dsets=('cf_mean',), pass_through_dsets=None)[source]
+

Bases: object

+

Handle group parameters

+
+
Parameters:
+
    +
  • name (str) – Group name. Can be "none" for no collection groups.

  • +
  • out_dir (str) – Output directory - used for Pipeline handling.

  • +
  • source_files (str | list, optional) – Explicit list of source files. Use either this input OR +source_dir + source_prefix. If this input is +"PIPELINE", the source_files input is determined from +the status file of the previous pipeline step. +If None, use source_dir and source_prefix. +By default, None.

  • +
  • source_dir (str, optional) – Directory to extract source files from (must be paired with +source_prefix). By default, None.

  • +
  • source_prefix (str, optional) – File prefix to search for in source directory (must be +paired with source_dir). By default, None.

  • +
  • source_pattern (str, optional) – Optional unix-style /filepath/pattern*.h5 to specify the +source files. This takes priority over source_dir and +source_prefix but is not used if source_files are +specified explicitly. By default, None.

  • +
  • dsets (list | tuple, optional) – List of datasets to collect. By default, ('cf_mean',).

  • +
  • pass_through_dsets (list | tuple, optional) – Optional list of datasets that are identical in the +multi-year files (e.g. input datasets that don’t vary from +year to year) that should be copied to the output multi-year +file once without a year suffix or means/stdev calculation. +By default, None.

  • +
+
+
+

Methods

+ + + +
+

Attributes

+ + + + + + + + + + + + + + + +

dsets

+
returns:
+

_dsets (list | tuple) -- Datasets to collect

+
+
+

name

+
returns:
+

name (str) -- Group name

+
+
+

pass_through_dsets

Optional list of datasets that are identical in the multi-year files (e.g.

source_files

+
returns:
+

source_files (list) -- list of source files to collect from

+
+
+

+
+
+property name
+
+
Returns:
+

name (str) – Group name

+
+
+
+ +
+
+property source_files
+
+
Returns:
+

source_files (list) – list of source files to collect from

+
+
+
+ +
+
+property dsets
+
+
Returns:
+

_dsets (list | tuple) – Datasets to collect

+
+
+
+ +
+
+property pass_through_dsets
+

Optional list of datasets that are identical in the multi-year +files (e.g. input datasets that don’t vary from year to year) that +should be copied to the output multi-year file once without a +year suffix or means/stdev calculation

+
+
Returns:
+

list | tuple | None

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.multi_year.html b/_autosummary/reV.handlers.multi_year.html new file mode 100644 index 000000000..a682c96d6 --- /dev/null +++ b/_autosummary/reV.handlers.multi_year.html @@ -0,0 +1,650 @@ + + + + + + + reV.handlers.multi_year — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.multi_year

+

Classes to collect reV outputs from multiple annual files.

+

Functions

+ + + + + + +

my_collect_groups(out_fpath, groups[, clobber])

Collect all groups into a single multi-year HDF5 file.

+

Classes

+ + + + + + + + + +

MultiYear(h5_file[, group, unscale, mode, ...])

Class to handle multiple years of data and: - collect datasets from multiple years - compute multi-year means - compute multi-year standard deviations - compute multi-year coefficient of variations

MultiYearGroup(name, out_dir[, ...])

Handle group parameters

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.multi_year.my_collect_groups.html b/_autosummary/reV.handlers.multi_year.my_collect_groups.html new file mode 100644 index 000000000..f3b56ad7a --- /dev/null +++ b/_autosummary/reV.handlers.multi_year.my_collect_groups.html @@ -0,0 +1,703 @@ + + + + + + + reV.handlers.multi_year.my_collect_groups — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.multi_year.my_collect_groups

+
+
+my_collect_groups(out_fpath, groups, clobber=True)[source]
+

Collect all groups into a single multi-year HDF5 file.

+

reV multi-year combines reV generation data from multiple +years (typically stored in separate files) into a single multi-year +file. Each dataset in the multi-year file is labeled with the +corresponding years, and multi-year averages of the yearly datasets +are also computed.

+
+
Parameters:
+
    +
  • out_fpath (str) – Path to multi-year HDF5 file to use for multi-year +collection.

  • +
  • groups (dict) – Dictionary of collection groups and their parameters. This +should be a dictionary mapping group names (keys) to a set +of key word arguments (values) that can be used to initialize +MultiYearGroup (excluding the +required name and out_dir inputs, which are populated +automatically). For example:

    +
    groups = {
    +    "none": {
    +        "dsets": [
    +            "cf_profile",
    +            "cf_mean",
    +            "ghi_mean",
    +            "lcoe_fcr",
    +        ],
    +        "source_dir": "./",
    +        "source_prefix": "",
    +        "pass_through_dsets": [
    +            "capital_cost",
    +            "fixed_operating_cost",
    +            "system_capacity",
    +            "fixed_charge_rate",
    +            "variable_operating_cost",
    +        ]
    +    },
    +    "solar_group": {
    +        "source_files": "PIPELINE",
    +        "dsets": [
    +            "cf_profile_ac",
    +            "cf_mean_ac",
    +            "ac",
    +            "dc",
    +            "clipped_power"
    +        ],
    +        "pass_through_dsets": [
    +            "system_capacity_ac",
    +            "dc_ac_ratio"
    +        ]
    +    },
    +    ...
    +}
    +
    +
    +

    The group names will be used as the HDF5 file group name under +which the collected data will be stored. You can have exactly +one group with the name "none" for a “no group” collection +(this is typically what you want and all you need to specify).

    +
  • +
  • clobber (bool, optional) – Flag to purge the multi-year output file prior to running the +multi-year collection step if the file already exists on disk. +This ensures the data is always freshly collected from the +single-year files. If False, then datasets in the existing +file will not be overwritten with (potentially new/updated) +data from the single-year files. By default, True.

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.outputs.Outputs.html b/_autosummary/reV.handlers.outputs.Outputs.html new file mode 100644 index 000000000..3d34a2938 --- /dev/null +++ b/_autosummary/reV.handlers.outputs.Outputs.html @@ -0,0 +1,1551 @@ + + + + + + + reV.handlers.outputs.Outputs — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.outputs.Outputs

+
+
+class Outputs(h5_file, mode='r', unscale=True, str_decode=True, group=None)[source]
+

Bases: Outputs

+

Base class to handle reV output data in .h5 format

+

Examples

+

The reV Outputs handler can be used to initialize h5 files in the standard +reV/rex resource data format.

+
>>> from reV import Outputs
+>>> import pandas as pd
+>>> import numpy as np
+>>>
+>>> meta = pd.DataFrame({'latitude': np.ones(100),
+>>>                      'longitude': np.ones(100)})
+>>>
+>>> time_index = pd.date_range('20210101', '20220101', freq='1h',
+>>>                            closed='right')
+>>>
+>>> with Outputs('test.h5', 'w') as f:
+>>>     f.meta = meta
+>>>     f.time_index = time_index
+
+
+

You can also use the Outputs handler to read output h5 files from disk. +The Outputs handler will automatically parse the meta data and time index +into the expected pandas objects (DataFrame and DatetimeIndex, +respectively).

+
>>> with Outputs('test.h5') as f:
+>>>     print(f.meta.head())
+>>>
+     latitude  longitude
+gid
+0         1.0        1.0
+1         1.0        1.0
+2         1.0        1.0
+3         1.0        1.0
+4         1.0        1.0
+
+
+
>>> with Outputs('test.h5') as f:
+>>>     print(f.time_index)
+DatetimeIndex(['2021-01-01 01:00:00+00:00', '2021-01-01 02:00:00+00:00',
+               '2021-01-01 03:00:00+00:00', '2021-01-01 04:00:00+00:00',
+               '2021-01-01 05:00:00+00:00', '2021-01-01 06:00:00+00:00',
+               '2021-01-01 07:00:00+00:00', '2021-01-01 08:00:00+00:00',
+               '2021-01-01 09:00:00+00:00', '2021-01-01 10:00:00+00:00',
+               ...
+               '2021-12-31 15:00:00+00:00', '2021-12-31 16:00:00+00:00',
+               '2021-12-31 17:00:00+00:00', '2021-12-31 18:00:00+00:00',
+               '2021-12-31 19:00:00+00:00', '2021-12-31 20:00:00+00:00',
+               '2021-12-31 21:00:00+00:00', '2021-12-31 22:00:00+00:00',
+               '2021-12-31 23:00:00+00:00', '2022-01-01 00:00:00+00:00'],
+              dtype='datetime64[ns, UTC]', length=8760, freq=None)
+
+
+

There are a few ways to use the Outputs handler to write data to a file. +Here is one example using the pre-initialized file we created earlier. +Note that the Outputs handler will automatically scale float data using +the “scale_factor” attribute. The Outputs handler will unscale the data +while being read unless the unscale kwarg is explicityly set to False. +This behavior is intended to reduce disk storage requirements for big +data and can be disabled by setting dtype=np.float32 or dtype=np.float64 +when writing data.

+
>>> Outputs.add_dataset(h5_file='test.h5', dset_name='dset1',
+>>>                     dset_data=np.ones((8760, 100)) * 42.42,
+>>>                     attrs={'scale_factor': 100}, dtype=np.int32)
+
+
+
>>> with Outputs('test.h5') as f:
+>>>     print(f['dset1'])
+>>>     print(f['dset1'].dtype)
+[[42.42 42.42 42.42 ... 42.42 42.42 42.42]
+ [42.42 42.42 42.42 ... 42.42 42.42 42.42]
+ [42.42 42.42 42.42 ... 42.42 42.42 42.42]
+ ...
+ [42.42 42.42 42.42 ... 42.42 42.42 42.42]
+ [42.42 42.42 42.42 ... 42.42 42.42 42.42]
+ [42.42 42.42 42.42 ... 42.42 42.42 42.42]]
+float32
+
+
+
>>> with Outputs('test.h5', unscale=False) as f:
+>>>     print(f['dset1'])
+>>>     print(f['dset1'].dtype)
+[[4242 4242 4242 ... 4242 4242 4242]
+ [4242 4242 4242 ... 4242 4242 4242]
+ [4242 4242 4242 ... 4242 4242 4242]
+ ...
+ [4242 4242 4242 ... 4242 4242 4242]
+ [4242 4242 4242 ... 4242 4242 4242]
+ [4242 4242 4242 ... 4242 4242 4242]]
+int32
+
+
+

Note that the reV Outputs handler is specifically designed to read and +write spatiotemporal data. It is therefore important to intialize the meta +data and time index objects even if your data is only spatial or only +temporal. Furthermore, the Outputs handler will always assume that 1D +datasets represent scalar data (non-timeseries) that corresponds to the +meta data shape, and that 2D datasets represent spatiotemporal data whose +shape corresponds to (len(time_index), len(meta)). You can see these +constraints here:

+
>>> Outputs.add_dataset(h5_file='test.h5', dset_name='bad_shape',
+                        dset_data=np.ones((1, 100)) * 42.42,
+                        attrs={'scale_factor': 100}, dtype=np.int32)
+HandlerValueError: 2D data with shape (1, 100) is not of the proper
+spatiotemporal shape: (8760, 100)
+
+
+
>>> Outputs.add_dataset(h5_file='test.h5', dset_name='bad_shape',
+                        dset_data=np.ones((8760,)) * 42.42,
+                        attrs={'scale_factor': 100}, dtype=np.int32)
+HandlerValueError: 1D data with shape (8760,) is not of the proper
+spatial shape: (100,)
+
+
+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘r’

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

add_dataset(h5_file, dset_name, dset_data, dtype)

Add dataset to h5_file

close()

Close h5 instance

df_str_decode(df)

Decode a dataframe with byte string columns into ordinary str cols.

get_SAM_df(site)

Placeholder for get_SAM_df method that it resource specific

get_attrs([dset])

Get h5 attributes either from file or dataset

get_config(config_name)

Get SAM config

get_dset_properties(dset)

Get dataset properties (shape, dtype, chunks)

get_meta_arr(rec_name[, rows])

Get a meta array by name (faster than DataFrame extraction).

get_scale_factor(dset)

Get dataset scale factor

get_units(dset)

Get dataset units

init_h5(h5_file, dsets, shapes, attrs, ...)

Init a full output file with the final intended shape without data.

open_dataset(ds_name)

Open resource dataset

preload_SAM(h5_file, sites, tech[, unscale, ...])

Pre-load project_points for SAM

set_configs(SAM_configs)

Set SAM configuration JSONs as attributes of 'meta'

set_version_attr()

Set the version attribute to the h5 file.

update_dset(dset, dset_array[, dset_slice])

Check to see if dset needs to be updated on disk If so write dset_array to disk

write_dataset(dset_name, data, dtype[, ...])

Write dataset to disk.

write_means(h5_file, meta, dset_name, means, ...)

Write means array to disk

write_profiles(h5_file, meta, time_index, ...)

Write profiles to disk

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

ADD_ATTR

SAM_configs

SAM configuration JSONs used to create CF profiles

SCALE_ATTR

UNIT_ATTR

adders

Dictionary of all dataset add offset factors

attrs

Dictionary of all dataset attributes

chunks

Dictionary of all dataset chunk sizes

coordinates

(lat, lon) pairs

data_version

Get the version attribute of the data.

datasets

Datasets available

dsets

Datasets available

dtypes

Dictionary of all dataset dtypes

full_version_record

Get record of versions for dependencies

global_attrs

Global (file) attributes

groups

Groups available

h5

Open h5py File instance.

lat_lon

Extract (latitude, longitude) pairs

meta

Resource meta data DataFrame

package

Package used to create file

res_dsets

Available resource datasets

resource_datasets

Available resource datasets

run_attrs

Runtime attributes stored at the global (file) level

scale_factors

Dictionary of all dataset scale factors

shape

Variable array shape from time_index and meta

shapes

Dictionary of all dataset shapes

source

Package and version used to create file

time_index

Resource DatetimeIndex

units

Dictionary of all dataset units

version

Version of package used to create file

writable

Check to see if h5py.File instance is writable

+
+
+property full_version_record
+

Get record of versions for dependencies

+
+
Returns:
+

dict – Dictionary of package versions for dependencies

+
+
+
+ +
+
+set_version_attr()[source]
+

Set the version attribute to the h5 file.

+
+ +
+
+property SAM_configs
+

SAM configuration JSONs used to create CF profiles

+
+
Returns:
+

configs (dict) – Dictionary of SAM configuration JSONs

+
+
+
+ +
+
+classmethod add_dataset(h5_file, dset_name, dset_data, dtype, attrs=None, chunks=None, unscale=True, mode='a', str_decode=True, group=None)[source]
+

Add dataset to h5_file

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • dset_name (str) – Name of dataset to be added to h5 file

  • +
  • dset_data (ndarray) – Data to be added to h5 file

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘a’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+
+property adders
+

Dictionary of all dataset add offset factors

+
+
Returns:
+

adders (dict)

+
+
+
+ +
+
+property attrs
+

Dictionary of all dataset attributes

+
+
Returns:
+

attrs (dict)

+
+
+
+ +
+
+property chunks
+

Dictionary of all dataset chunk sizes

+
+
Returns:
+

chunks (dict)

+
+
+
+ +
+
+close()
+

Close h5 instance

+
+ +
+
+property coordinates
+

(lat, lon) pairs

+
+
Returns:
+

lat_lon (ndarray)

+
+
Type:
+

Coordinates

+
+
+
+ +
+
+property data_version
+

Get the version attribute of the data. None if not available.

+
+
Returns:
+

version (str | None)

+
+
+
+ +
+
+property datasets
+

Datasets available

+
+
Returns:
+

list

+
+
+
+ +
+
+static df_str_decode(df)
+

Decode a dataframe with byte string columns into ordinary str cols.

+
+
Parameters:
+

df (pd.DataFrame) – Dataframe with some columns being byte strings.

+
+
Returns:
+

df (pd.DataFrame) – DataFrame with str columns instead of byte str columns.

+
+
+
+ +
+
+property dsets
+

Datasets available

+
+
Returns:
+

list

+
+
+
+ +
+
+property dtypes
+

Dictionary of all dataset dtypes

+
+
Returns:
+

dtypes (dict)

+
+
+
+ +
+
+get_SAM_df(site)
+

Placeholder for get_SAM_df method that it resource specific

+
+
Parameters:
+

site (int) – Site to extract SAM DataFrame for

+
+
+
+ +
+
+get_attrs(dset=None)
+

Get h5 attributes either from file or dataset

+
+
Parameters:
+

dset (str) – Dataset to get attributes for, if None get file (global) attributes

+
+
Returns:
+

attrs (dict) – Dataset or file attributes

+
+
+
+ +
+
+get_config(config_name)[source]
+

Get SAM config

+
+
Parameters:
+

config_name (str) – Name of config

+
+
Returns:
+

config (dict) – SAM config JSON as a dictionary

+
+
+
+ +
+
+get_dset_properties(dset)
+

Get dataset properties (shape, dtype, chunks)

+
+
Parameters:
+

dset (str) – Dataset to get scale factor for

+
+
Returns:
+

    +
  • shape (tuple) – Dataset array shape

  • +
  • dtype (str) – Dataset array dtype

  • +
  • chunks (tuple) – Dataset chunk size

  • +
+

+
+
+
+ +
+
+get_meta_arr(rec_name, rows=slice(None, None, None))
+

Get a meta array by name (faster than DataFrame extraction).

+
+
Parameters:
+
    +
  • rec_name (str) – Named record from the meta data to retrieve.

  • +
  • rows (slice) – Rows of the record to extract.

  • +
+
+
Returns:
+

meta_arr (np.ndarray) – Extracted array from the meta data record name.

+
+
+
+ +
+
+get_scale_factor(dset)
+

Get dataset scale factor

+
+
Parameters:
+

dset (str) – Dataset to get scale factor for

+
+
Returns:
+

float – Dataset scale factor, used to unscale int values to floats

+
+
+
+ +
+
+get_units(dset)
+

Get dataset units

+
+
Parameters:
+

dset (str) – Dataset to get units for

+
+
Returns:
+

str – Dataset units, None if not defined

+
+
+
+ +
+
+property global_attrs
+

Global (file) attributes

+
+
Returns:
+

global_attrs (dict)

+
+
+
+ +
+
+property groups
+

Groups available

+
+
Returns:
+

groups (list) – List of groups

+
+
+
+ +
+
+property h5
+

Open h5py File instance. If _group is not None return open Group

+
+
Returns:
+

h5 (h5py.File | h5py.Group)

+
+
+
+ +
+
+classmethod init_h5(h5_file, dsets, shapes, attrs, chunks, dtypes, meta, time_index=None, configs=None, unscale=True, mode='w', str_decode=True, group=None, run_attrs=None)[source]
+

Init a full output file with the final intended shape without data.

+
+
Parameters:
+
    +
  • h5_file (str) – Full h5 output filepath.

  • +
  • dsets (list) – List of strings of dataset names to initialize (does not include +meta or time_index).

  • +
  • shapes (dict) – Dictionary of dataset shapes (keys correspond to dsets).

  • +
  • attrs (dict) – Dictionary of dataset attributes (keys correspond to dsets).

  • +
  • chunks (dict) – Dictionary of chunk tuples (keys correspond to dsets).

  • +
  • dtypes (dict) – dictionary of numpy datatypes (keys correspond to dsets).

  • +
  • meta (pd.DataFrame) – Full meta data.

  • +
  • time_index (pd.datetimeindex | None) – Full pandas datetime index. None implies that only 1D results +(no site profiles) are being written.

  • +
  • configs (dict | None) – Optional input configs to set as attr on meta.

  • +
  • unscale (bool) – Boolean flag to automatically unscale variables on extraction

  • +
  • mode (str) – Mode to instantiate h5py.File instance

  • +
  • str_decode (bool) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read.

  • +
  • group (str) – Group within .h5 resource file to open

  • +
  • run_attrs (dict | NoneType) – Runtime attributes (args, kwargs) to add as global (file) +attributes

  • +
+
+
+
+ +
+
+property lat_lon
+

Extract (latitude, longitude) pairs

+
+
Returns:
+

lat_lon (ndarray)

+
+
+
+ +
+
+property meta
+

Resource meta data DataFrame

+
+
Returns:
+

meta (pandas.DataFrame)

+
+
+
+ +
+
+open_dataset(ds_name)
+

Open resource dataset

+
+
Parameters:
+

ds_name (str) – Dataset name to open

+
+
Returns:
+

ds (ResourceDataset) – Resource for open resource dataset

+
+
+
+ +
+
+property package
+

Package used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+classmethod preload_SAM(h5_file, sites, tech, unscale=True, str_decode=True, group=None, hsds=False, hsds_kwargs=None, time_index_step=None, means=False)
+

Pre-load project_points for SAM

+
+
Parameters:
+
    +
  • h5_file (str) – h5_file to extract resource from

  • +
  • sites (list) – List of sites to be provided to SAM

  • +
  • tech (str) – Technology to be run by SAM

  • +
  • unscale (bool) – Boolean flag to automatically unscale variables on extraction

  • +
  • str_decode (bool) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read.

  • +
  • group (str) – Group within .h5 resource file to open

  • +
  • hsds (bool, optional) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS, by default False

  • +
  • hsds_kwargs (dict, optional) – Dictionary of optional kwargs for h5pyd, e.g., bucket, username, +password, by default None

  • +
  • time_index_step (int, optional) – Step size for time_index, used to reduce temporal resolution, +by default None

  • +
  • means (bool, optional) – Boolean flag to compute mean resource when res_array is set, +by default False

  • +
+
+
Returns:
+

SAM_res (SAMResource) – Instance of SAMResource pre-loaded with Solar resource for sites +in project_points

+
+
+
+ +
+
+property res_dsets
+

Available resource datasets

+
+
Returns:
+

list

+
+
+
+ +
+
+property resource_datasets
+

Available resource datasets

+
+
Returns:
+

list

+
+
+
+ +
+
+property run_attrs
+

Runtime attributes stored at the global (file) level

+
+
Returns:
+

global_attrs (dict)

+
+
+
+ +
+
+property scale_factors
+

Dictionary of all dataset scale factors

+
+
Returns:
+

scale_factors (dict)

+
+
+
+ +
+
+set_configs(SAM_configs)[source]
+

Set SAM configuration JSONs as attributes of ‘meta’

+
+
Parameters:
+

SAM_configs (dict) – Dictionary of SAM configuration JSONs

+
+
+
+ +
+
+property shape
+

Variable array shape from time_index and meta

+
+
Returns:
+

tuple – shape of variables arrays == (time, locations)

+
+
+
+ +
+
+property shapes
+

Dictionary of all dataset shapes

+
+
Returns:
+

shapes (dict)

+
+
+
+ +
+
+property source
+

Package and version used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+property time_index
+

Resource DatetimeIndex

+
+
Returns:
+

time_index (pandas.DatetimeIndex)

+
+
+
+ +
+
+property units
+

Dictionary of all dataset units

+
+
Returns:
+

units (dict)

+
+
+
+ +
+
+update_dset(dset, dset_array, dset_slice=None)[source]
+

Check to see if dset needs to be updated on disk +If so write dset_array to disk

+
+
Parameters:
+
    +
  • dset (str) – dataset to update

  • +
  • dset_array (ndarray) – dataset array

  • +
  • dset_slice (tuple) – slice of dataset to update, it None update all

  • +
+
+
+
+ +
+
+property version
+

Version of package used to create file

+
+
Returns:
+

str

+
+
+
+ +
+
+property writable
+

Check to see if h5py.File instance is writable

+
+
Returns:
+

is_writable (bool) – Flag if mode is writable

+
+
+
+ +
+
+write_dataset(dset_name, data, dtype, chunks=None, attrs=None)[source]
+

Write dataset to disk. Dataset it created in .h5 file and data is +scaled if needed.

+
+
Parameters:
+
    +
  • dset_name (str) – Name of dataset to be added to h5 file.

  • +
  • data (ndarray) – Data to be added to h5 file.

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • chunks (tuple) – Chunk size for capacity factor means dataset.

  • +
  • attrs (dict) – Attributes to be set. May include ‘scale_factor’.

  • +
+
+
+
+ +
+
+classmethod write_means(h5_file, meta, dset_name, means, dtype, attrs=None, SAM_configs=None, chunks=None, unscale=True, mode='w-', str_decode=True, group=None)[source]
+

Write means array to disk

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • meta (pandas.Dataframe) – Locational meta data

  • +
  • dset_name (str) – Name of the target dataset (should identify the means).

  • +
  • means (ndarray) – output means array.

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • SAM_configs (dict, optional) – Dictionary of SAM configuration JSONs used to compute cf means, +by default None

  • +
  • chunks (tuple, optional) – Chunk size for capacity factor means dataset, by default None

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘w-’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+
+classmethod write_profiles(h5_file, meta, time_index, dset_name, profiles, dtype, attrs=None, SAM_configs=None, chunks=(None, 100), unscale=True, mode='w-', str_decode=True, group=None)[source]
+

Write profiles to disk

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 resource file

  • +
  • meta (pandas.Dataframe) – Locational meta data

  • +
  • time_index (pandas.DatetimeIndex) – Temporal timesteps

  • +
  • dset_name (str) – Name of the target dataset (should identify the profiles).

  • +
  • profiles (ndarray) – output result timeseries profiles

  • +
  • dtype (str) – Intended dataset datatype after scaling.

  • +
  • attrs (dict, optional) – Attributes to be set. May include ‘scale_factor’, by default None

  • +
  • SAM_configs (dict, optional) – Dictionary of SAM configuration JSONs used to compute cf means, +by default None

  • +
  • chunks (tuple, optional) – Chunk size for capacity factor means dataset, +by default (None, 100)

  • +
  • unscale (bool, optional) – Boolean flag to automatically unscale variables on extraction, +by default True

  • +
  • mode (str, optional) – Mode to instantiate h5py.File instance, by default ‘w-’

  • +
  • str_decode (bool, optional) – Boolean flag to decode the bytestring meta data into normal +strings. Setting this to False will speed up the meta data read, +by default True

  • +
  • group (str, optional) – Group within .h5 resource file to open, by default None

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.outputs.html b/_autosummary/reV.handlers.outputs.html new file mode 100644 index 000000000..31b3f2b86 --- /dev/null +++ b/_autosummary/reV.handlers.outputs.html @@ -0,0 +1,639 @@ + + + + + + + reV.handlers.outputs — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.outputs

+

Classes to handle reV h5 output files.

+

Classes

+ + + + + + +

Outputs(h5_file[, mode, unscale, ...])

Base class to handle reV output data in .h5 format

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.transmission.TransmissionCosts.html b/_autosummary/reV.handlers.transmission.TransmissionCosts.html new file mode 100644 index 000000000..e760e7c13 --- /dev/null +++ b/_autosummary/reV.handlers.transmission.TransmissionCosts.html @@ -0,0 +1,811 @@ + + + + + + + reV.handlers.transmission.TransmissionCosts — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.transmission.TransmissionCosts

+
+
+class TransmissionCosts(trans_table, line_tie_in_cost=14000, line_cost=2279, station_tie_in_cost=0, center_tie_in_cost=0, sink_tie_in_cost=1000000000.0, avail_cap_frac=1, line_limited=False)[source]
+

Bases: TransmissionFeatures

+

Class to compute supply curve -> transmission feature costs

+
+
Parameters:
+
    +
  • trans_table (str | pandas.DataFrame) – Path to .csv or config file or DataFrame containing supply curve +transmission mapping

  • +
  • line_tie_in_cost (float, optional) – Cost of connecting to a transmission line in $/MW, +by default 14000

  • +
  • line_cost (float, optional) – Cost of building transmission line during connection in $/MW-km, +by default 2279

  • +
  • station_tie_in_cost (float, optional) – Cost of connecting to a substation in $/MW, +by default 0

  • +
  • center_tie_in_cost (float, optional) – Cost of connecting to a load center in $/MW, +by default 0

  • +
  • sink_tie_in_cost (float, optional) – Cost of connecting to a synthetic load center (infinite sink) +in $/MW, by default 1e9

  • +
  • avail_cap_frac (float, optional) – Fraction of capacity that is available for connection, by default 1

  • +
  • line_limited (bool, optional) – Substation connection is limited by maximum capacity of the +attached lines, legacy method, by default False

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + +

available_capacity(gid)

Get available capacity for given line

check_availability(gid)

Check availablity of feature with given gid

check_feature_dependencies()

Check features for dependencies that are missing and raise error.

connect(gid, capacity[, apply])

Check if you can connect to given feature If apply, update internal dictionary accordingly

cost(gid, distance[, ...])

Compute levelized cost of transmission (LCOT) for connecting to give feature

feature_capacity(trans_table[, avail_cap_frac])

Compute available capacity for all features

feature_costs(trans_table[, capacity, ...])

Compute costs for all connections in given transmission table

+
+
+available_capacity(gid)[source]
+

Get available capacity for given line

+
+
Parameters:
+

gid (int) – Unique id of feature of interest

+
+
Returns:
+

avail_cap (float) – Available capacity = capacity * available fraction +default = 100%

+
+
+
+ +
+
+classmethod feature_costs(trans_table, capacity=None, line_tie_in_cost=14000, line_cost=2279, station_tie_in_cost=0, center_tie_in_cost=0, sink_tie_in_cost=1000000000.0, avail_cap_frac=1, line_limited=False)[source]
+

Compute costs for all connections in given transmission table

+
+
Parameters:
+
    +
  • trans_table (str | pandas.DataFrame) – Path to .csv or .json containing supply curve transmission mapping

  • +
  • capacity (float) – Capacity needed in MW, if None DO NOT check if connection is +possible

  • +
  • line_tie_in_cost (float, optional) – Cost of connecting to a transmission line in $/MW, +by default 14000

  • +
  • line_cost (float, optional) – Cost of building transmission line during connection in $/MW-km, +by default 2279

  • +
  • station_tie_in_cost (float, optional) – Cost of connecting to a substation in $/MW, +by default 0

  • +
  • center_tie_in_cost (float, optional) – Cost of connecting to a load center in $/MW, +by default 0

  • +
  • sink_tie_in_cost (float, optional) – Cost of connecting to a synthetic load center (infinite sink) +in $/MW, by default 1e9

  • +
  • avail_cap_frac (float, optional) – Fraction of capacity that is available for connection, by default 1

  • +
  • line_limited (bool, optional) – Substation connection is limited by maximum capacity of the +attached lines, legacy method, by default False

  • +
+
+
Returns:
+

cost (ndarray) – Cost of transmission in $/MW, if None indicates connection is +NOT possible

+
+
+
+ +
+
+check_availability(gid)
+

Check availablity of feature with given gid

+
+
Parameters:
+

gid (int) – Feature gid to check

+
+
Returns:
+

bool – Whether the gid is available or not

+
+
+
+ +
+
+check_feature_dependencies()
+

Check features for dependencies that are missing and raise error.

+
+ +
+
+connect(gid, capacity, apply=True)
+

Check if you can connect to given feature +If apply, update internal dictionary accordingly

+
+
Parameters:
+
    +
  • gid (int) – Unique id of feature of intereset

  • +
  • capacity (float) – Capacity needed in MW

  • +
  • apply (bool) – Apply capacity to feature with given gid and update +internal dictionary

  • +
+
+
Returns:
+

connected (bool) – Flag as to whether connection is possible or not

+
+
+
+ +
+
+cost(gid, distance, transmission_multiplier=1, capacity=None)
+

Compute levelized cost of transmission (LCOT) for connecting to give +feature

+
+
Parameters:
+
    +
  • gid (int) – Feature gid to connect to

  • +
  • distance (float) – Distance to feature in kms

  • +
  • line_multiplier (float) – Multiplier for region specific line cost increases

  • +
  • capacity (float) – Capacity needed in MW, if None DO NOT check if connection is +possible

  • +
+
+
Returns:
+

cost (float) – Cost of transmission in $/MW, if None indicates connection is +NOT possible

+
+
+
+ +
+
+classmethod feature_capacity(trans_table, avail_cap_frac=1)
+

Compute available capacity for all features

+
+
Parameters:
+
    +
  • trans_table (str | pandas.DataFrame) – Path to .csv or .json containing supply curve transmission mapping

  • +
  • avail_cap_frac (float, optional) – Fraction of capacity that is available for connection, by default 1

  • +
+
+
Returns:
+

feature_cap (pandas.DataFrame) – Available Capacity for each transmission feature

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.transmission.TransmissionFeatures.html b/_autosummary/reV.handlers.transmission.TransmissionFeatures.html new file mode 100644 index 000000000..2575877bb --- /dev/null +++ b/_autosummary/reV.handlers.transmission.TransmissionFeatures.html @@ -0,0 +1,776 @@ + + + + + + + reV.handlers.transmission.TransmissionFeatures — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.transmission.TransmissionFeatures

+
+
+class TransmissionFeatures(trans_table, line_tie_in_cost=14000, line_cost=2279, station_tie_in_cost=0, center_tie_in_cost=0, sink_tie_in_cost=1000000000.0, avail_cap_frac=1, line_limited=False)[source]
+

Bases: object

+

Class to handle Supply Curve Transmission features

+
+
Parameters:
+
    +
  • trans_table (str | pandas.DataFrame) – Path to .csv or config file or DataFrame containing supply curve +transmission mapping

  • +
  • line_tie_in_cost (float, optional) – Cost of connecting to a transmission line in $/MW, +by default 14000

  • +
  • line_cost (float, optional) – Cost of building transmission line during connection in $/MW-km, +by default 2279

  • +
  • station_tie_in_cost (float, optional) – Cost of connecting to a substation in $/MW, +by default 0

  • +
  • center_tie_in_cost (float, optional) – Cost of connecting to a load center in $/MW, +by default 0

  • +
  • sink_tie_in_cost (float, optional) – Cost of connecting to a synthetic load center (infinite sink) +in $/MW, by default 1e9

  • +
  • avail_cap_frac (float, optional) – Fraction of capacity that is available for connection, by default 1

  • +
  • line_limited (bool, optional) – Substation connection is limited by maximum capacity of the +attached lines, legacy method, by default False

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + +

available_capacity(gid)

Get available capacity for given line

check_availability(gid)

Check availablity of feature with given gid

check_feature_dependencies()

Check features for dependencies that are missing and raise error.

connect(gid, capacity[, apply])

Check if you can connect to given feature If apply, update internal dictionary accordingly

cost(gid, distance[, ...])

Compute levelized cost of transmission (LCOT) for connecting to give feature

feature_capacity(trans_table[, avail_cap_frac])

Compute available capacity for all features

+
+
+check_feature_dependencies()[source]
+

Check features for dependencies that are missing and raise error.

+
+ +
+
+available_capacity(gid)[source]
+

Get available capacity for given line

+
+
Parameters:
+

gid (int) – Unique id of feature of interest

+
+
Returns:
+

avail_cap (float) – Available capacity = capacity * available fraction +default = 100%

+
+
+
+ +
+
+check_availability(gid)[source]
+

Check availablity of feature with given gid

+
+
Parameters:
+

gid (int) – Feature gid to check

+
+
Returns:
+

bool – Whether the gid is available or not

+
+
+
+ +
+
+connect(gid, capacity, apply=True)[source]
+

Check if you can connect to given feature +If apply, update internal dictionary accordingly

+
+
Parameters:
+
    +
  • gid (int) – Unique id of feature of intereset

  • +
  • capacity (float) – Capacity needed in MW

  • +
  • apply (bool) – Apply capacity to feature with given gid and update +internal dictionary

  • +
+
+
Returns:
+

connected (bool) – Flag as to whether connection is possible or not

+
+
+
+ +
+
+cost(gid, distance, transmission_multiplier=1, capacity=None)[source]
+

Compute levelized cost of transmission (LCOT) for connecting to give +feature

+
+
Parameters:
+
    +
  • gid (int) – Feature gid to connect to

  • +
  • distance (float) – Distance to feature in kms

  • +
  • line_multiplier (float) – Multiplier for region specific line cost increases

  • +
  • capacity (float) – Capacity needed in MW, if None DO NOT check if connection is +possible

  • +
+
+
Returns:
+

cost (float) – Cost of transmission in $/MW, if None indicates connection is +NOT possible

+
+
+
+ +
+
+classmethod feature_capacity(trans_table, avail_cap_frac=1)[source]
+

Compute available capacity for all features

+
+
Parameters:
+
    +
  • trans_table (str | pandas.DataFrame) – Path to .csv or .json containing supply curve transmission mapping

  • +
  • avail_cap_frac (float, optional) – Fraction of capacity that is available for connection, by default 1

  • +
+
+
Returns:
+

feature_cap (pandas.DataFrame) – Available Capacity for each transmission feature

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.handlers.transmission.html b/_autosummary/reV.handlers.transmission.html new file mode 100644 index 000000000..1031051af --- /dev/null +++ b/_autosummary/reV.handlers.transmission.html @@ -0,0 +1,642 @@ + + + + + + + reV.handlers.transmission — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.handlers.transmission

+

Module to handle Supply Curve Transmission features

+

Classes

+ + + + + + + + + +

TransmissionCosts(trans_table[, ...])

Class to compute supply curve -> transmission feature costs

TransmissionFeatures(trans_table[, ...])

Class to handle Supply Curve Transmission features

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.html b/_autosummary/reV.html new file mode 100644 index 000000000..2916e362c --- /dev/null +++ b/_autosummary/reV.html @@ -0,0 +1,678 @@ + + + + + + + reV — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV

+

The Renewable Energy Potential Model

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

reV.SAM

reV-SAM interface module

reV.bespoke

reV bespoke wind plant analysis tools

reV.cli

reV command line interface (CLI).

reV.config

reV Configuration

reV.econ

reV Econ analysis module.

reV.generation

reV Generation

reV.handlers

Sub-package of data handlers

reV.hybrids

reV hybridization tool

reV.losses

reV Losses Module

reV.nrwal

reV offshore wind econ and generation analysis module

reV.qa_qc

reV quality assurance and control module

reV.rep_profiles

reV representative profile extraction tool,

reV.supply_curve

reV Supply Curve

reV.utilities

reV utilities.

reV.version

reV Version number

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.cli_hybrids.html b/_autosummary/reV.hybrids.cli_hybrids.html new file mode 100644 index 000000000..9a04f4657 --- /dev/null +++ b/_autosummary/reV.hybrids.cli_hybrids.html @@ -0,0 +1,631 @@ + + + + + + + reV.hybrids.cli_hybrids — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.cli_hybrids

+

reV Representative Profiles CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.html b/_autosummary/reV.hybrids.html new file mode 100644 index 000000000..e5c2bdd88 --- /dev/null +++ b/_autosummary/reV.hybrids.html @@ -0,0 +1,643 @@ + + + + + + + reV.hybrids — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids

+

reV hybridization tool

+ + + + + + + + + + + + +

reV.hybrids.cli_hybrids

reV Representative Profiles CLI utility functions.

reV.hybrids.hybrid_methods

Collection of functions used to hybridize columns in rep profiles meta.

reV.hybrids.hybrids

reV Hybridization module.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity.html b/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity.html new file mode 100644 index 000000000..dc7172038 --- /dev/null +++ b/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity.html @@ -0,0 +1,648 @@ + + + + + + + reV.hybrids.hybrid_methods.aggregate_capacity — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrid_methods.aggregate_capacity

+
+
+aggregate_capacity(h)[source]
+

Compute the total capcity by summing the individual capacities.

+
+
Parameters:
+

h (reV.hybrids.Hybridization) – Instance of reV.hybrids.Hybridization class containing the +attribute hybrid_meta, which is a DataFrame containing +hybridized meta data.

+
+
Returns:
+

data (Series | None) – A series of data containing the aggregated capacity, or None +if the capacity columns are missing.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity_factor.html b/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity_factor.html new file mode 100644 index 000000000..8dbae8893 --- /dev/null +++ b/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity_factor.html @@ -0,0 +1,648 @@ + + + + + + + reV.hybrids.hybrid_methods.aggregate_capacity_factor — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrid_methods.aggregate_capacity_factor

+
+
+aggregate_capacity_factor(h)[source]
+

Compute the capacity-weighted mean capcity factor.

+
+
Parameters:
+

h (reV.hybrids.Hybridization) – Instance of reV.hybrids.Hybridization class containing the +attribute hybrid_meta, which is a DataFrame containing +hybridized meta data.

+
+
Returns:
+

data (Series | None) – A series of data containing the aggregated capacity, or None +if the capacity and/or mean_cf columns are missing.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrid_methods.aggregate_solar_capacity.html b/_autosummary/reV.hybrids.hybrid_methods.aggregate_solar_capacity.html new file mode 100644 index 000000000..71e4a7efd --- /dev/null +++ b/_autosummary/reV.hybrids.hybrid_methods.aggregate_solar_capacity.html @@ -0,0 +1,656 @@ + + + + + + + reV.hybrids.hybrid_methods.aggregate_solar_capacity — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrid_methods.aggregate_solar_capacity

+
+
+aggregate_solar_capacity(h)[source]
+

Compute the total solar capcity allowed in hybridization.

+
+

Note

+

No limiting is done on the ratio of wind to solar. This method +checks for an existing ‘hybrid_solar_capacity’. If one does not exist, +it is assumed that there is no limit on the solar to wind capacity +ratio and the solar capacity is copied into this new column.

+
+
+
Parameters:
+

h (reV.hybrids.Hybridization) – Instance of reV.hybrids.Hybridization class containing the +attribute hybrid_meta, which is a DataFrame containing +hybridized meta data.

+
+
Returns:
+

data (Series | None) – A series of data containing the capacity allowed in the hybrid +capacity sum, or None if ‘hybrid_solar_capacity’ already exists.

+
+
+

Notes

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrid_methods.aggregate_wind_capacity.html b/_autosummary/reV.hybrids.hybrid_methods.aggregate_wind_capacity.html new file mode 100644 index 000000000..4fdaee882 --- /dev/null +++ b/_autosummary/reV.hybrids.hybrid_methods.aggregate_wind_capacity.html @@ -0,0 +1,656 @@ + + + + + + + reV.hybrids.hybrid_methods.aggregate_wind_capacity — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrid_methods.aggregate_wind_capacity

+
+
+aggregate_wind_capacity(h)[source]
+

Compute the total wind capcity allowed in hybridization.

+
+

Note

+

No limiting is done on the ratio of wind to solar. This method +checks for an existing ‘hybrid_wind_capacity’. If one does not exist, +it is assumed that there is no limit on the solar to wind capacity +ratio and the wind capacity is copied into this new column.

+
+
+
Parameters:
+

h (reV.hybrids.Hybridization) – Instance of reV.hybrids.Hybridization class containing the +attribute hybrid_meta, which is a DataFrame containing +hybridized meta data.

+
+
Returns:
+

data (Series | None) – A series of data containing the capacity allowed in the hybrid +capacity sum, or None if ‘hybrid_solar_capacity’ already exists.

+
+
+

Notes

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrid_methods.html b/_autosummary/reV.hybrids.hybrid_methods.html new file mode 100644 index 000000000..d21465ae8 --- /dev/null +++ b/_autosummary/reV.hybrids.hybrid_methods.html @@ -0,0 +1,649 @@ + + + + + + + reV.hybrids.hybrid_methods — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrid_methods

+

Collection of functions used to hybridize columns in rep profiles meta.

+

@author: ppinchuk

+

Functions

+ + + + + + + + + + + + + + + +

aggregate_capacity(h)

Compute the total capcity by summing the individual capacities.

aggregate_capacity_factor(h)

Compute the capacity-weighted mean capcity factor.

aggregate_solar_capacity(h)

Compute the total solar capcity allowed in hybridization.

aggregate_wind_capacity(h)

Compute the total wind capcity allowed in hybridization.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrids.ColNameFormatter.html b/_autosummary/reV.hybrids.hybrids.ColNameFormatter.html new file mode 100644 index 000000000..1a9d126fb --- /dev/null +++ b/_autosummary/reV.hybrids.hybrids.ColNameFormatter.html @@ -0,0 +1,671 @@ + + + + + + + reV.hybrids.hybrids.ColNameFormatter — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrids.ColNameFormatter

+
+
+class ColNameFormatter[source]
+

Bases: object

+

Column name formatting helper class.

+

Methods

+ + + + + + +

fmt(n)

Format an input column name to remove excess chars and whitespace.

+

Attributes

+ + + + + + +

ALLOWED

+
+
+classmethod fmt(n)[source]
+

Format an input column name to remove excess chars and whitespace.

+

This method should help facilitate the merging of column names +between two DataFrames.

+
+
Parameters:
+

n (str) – Input column name.

+
+
Returns:
+

str – The column name with all characters except ascii stripped +and all lowercase.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrids.Hybridization.html b/_autosummary/reV.hybrids.hybrids.Hybridization.html new file mode 100644 index 000000000..293600bdd --- /dev/null +++ b/_autosummary/reV.hybrids.hybrids.Hybridization.html @@ -0,0 +1,870 @@ + + + + + + + reV.hybrids.hybrids.Hybridization — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrids.Hybridization

+
+
+class Hybridization(solar_fpath, wind_fpath, allow_solar_only=False, allow_wind_only=False, fillna=None, limits=None, ratio_bounds=None, ratio='solar_capacity/wind_capacity')[source]
+

Bases: object

+

Framework to handle hybridization of SC and corresponding profiles.

+

reV hybrids computes a “hybrid” wind and solar supply curve, +where each supply curve point contains some wind and some solar +capacity. Various ratio limits on wind-to-solar farm properties +(e.g. wind-to-solar capacity) can be applied during the +hybridization process. Hybrid generation profiles are also +computed during this process.

+
+
Parameters:
+
    +
  • solar_fpath (str) – Filepath to rep profile output file to extract solar +profiles and summaries from.

  • +
  • wind_fpath (str) – Filepath to rep profile output file to extract wind profiles +and summaries from.

  • +
  • allow_solar_only (bool, optional) – Option to allow SC points with only solar capacity +(no wind). By default, False.

  • +
  • allow_wind_only (bool, optional) – Option to allow SC points with only wind capacity +(no solar). By default, False.

  • +
  • fillna (dict, optional) – Dictionary containing column_name, fill_value pairs +representing any fill values that should be applied after +merging the wind and solar meta. Note that column names will +likely have to be prefixed with solar or wind. +By default None.

  • +
  • limits (dict, optional) – Option to specify mapping (in the form of a dictionary) of +{colum_name: max_value} representing the upper limit +(maximum value) for the values of a column in the merged +meta. For example, limits={'solar_capacity': 100} would +limit all the values of the solar capacity in the merged +meta to a maximum value of 100. This limit is applied +BEFORE ratio calculations. The names of the columns should +match the column names in the merged meta, so they are +likely prefixed with solar or wind. +By default, None (no limits applied).

  • +
  • ratio_bounds (tuple, optional) – Option to set ratio bounds (in two-tuple form) on the +columns of the ratio input. For example, +ratio_bounds=(0.5, 1.5) would adjust the values of both +of the ratio columns such that their ratio is always +between half and double (e.g., no value would be more than +double the other). To specify a single ratio value, use the +same value as the upper and lower bound. For example, +ratio_bounds=(1, 1) would adjust the values of both of +the ratio columns such that their ratio is always equal. +By default, None (no limit on the ratio).

  • +
  • ratio (str, optional) – Option to specify the columns used to calculate the ratio +that is limited by the ratio_bounds input. This input is a +string in the form “{numerator_column}/{denominator_column}”. +For example, ratio='solar_capacity/wind_capacity' +would limit the ratio of the solar to wind capacities as +specified by the ratio_bounds input. If ratio_bounds +is None, this input does nothing. The names of the columns +should be prefixed with one of the prefixes defined as class +variables. By default 'solar_capacity/wind_capacity'.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + +

run([fout, save_hybrid_meta])

Run hybridization of profiles and save to disc.

run_meta()

Compute the hybridized profiles.

run_profiles()

Compute all hybridized profiles.

save_profiles(fout[, save_hybrid_meta])

Initialize fout and save profiles.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + +

hybrid_meta

Hybridized summary for the representative profiles.

hybrid_time_index

Get the time index for the hybrid rep profiles.

profiles

Get the arrays of the hybridized representative profiles.

solar_meta

Summary for the solar representative profiles.

solar_time_index

Get the time index for the solar rep profiles.

wind_meta

Summary for the wind representative profiles.

wind_time_index

Get the time index for the wind rep profiles.

+
+
+property solar_meta
+

Summary for the solar representative profiles.

+
+
Returns:
+

solar_meta (pd.DataFrame) – Summary for the solar representative profiles.

+
+
+
+ +
+
+property wind_meta
+

Summary for the wind representative profiles.

+
+
Returns:
+

wind_meta (pd.DataFrame) – Summary for the wind representative profiles.

+
+
+
+ +
+
+property hybrid_meta
+

Hybridized summary for the representative profiles.

+
+
Returns:
+

hybrid_meta (pd.DataFrame) – Summary for the hybridized representative profiles. +At the very least, this has a column that the data was merged on.

+
+
+
+ +
+
+property solar_time_index
+

Get the time index for the solar rep profiles.

+
+
Returns:
+

solar_time_index (pd.Datetimeindex) – Time index sourced from the solar rep profile file.

+
+
+
+ +
+
+property wind_time_index
+

Get the time index for the wind rep profiles.

+
+
Returns:
+

wind_time_index (pd.Datetimeindex) – Time index sourced from the wind rep profile file.

+
+
+
+ +
+
+property hybrid_time_index
+

Get the time index for the hybrid rep profiles.

+
+
Returns:
+

hybrid_time_index (pd.Datetimeindex) – Time index for the hybrid rep profiles.

+
+
+
+ +
+
+property profiles
+

Get the arrays of the hybridized representative profiles.

+
+
Returns:
+

profiles (dict) – Dict of hybridized representative profiles.

+
+
+
+ +
+
+run(fout=None, save_hybrid_meta=True)[source]
+

Run hybridization of profiles and save to disc.

+
+
Parameters:
+
    +
  • fout (str, optional) – Filepath to output HDF5 file. If None, output data are +not written to a file. By default, None.

  • +
  • save_hybrid_meta (bool, optional) – Flag to save hybrid SC table to hybrid rep profile output. +By default, True.

  • +
+
+
Returns:
+

str – Filepath to output h5 file.

+
+
+
+ +
+
+run_meta()[source]
+

Compute the hybridized profiles.

+
+
Returns:
+

Hybridization – Instance of Hybridization object (itself) containing the +hybridized meta as an attribute.

+
+
+
+ +
+
+run_profiles()[source]
+

Compute all hybridized profiles.

+
+
Returns:
+

Hybridization – Instance of Hybridization object (itself) containing the +hybridized profiles as attributes.

+
+
+
+ +
+
+save_profiles(fout, save_hybrid_meta=True)[source]
+

Initialize fout and save profiles.

+
+
Parameters:
+
    +
  • fout (str) – Filepath to output h5 file.

  • +
  • save_hybrid_meta (bool) – Flag to save hybrid SC table to hybrid rep profile output.

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrids.HybridsData.html b/_autosummary/reV.hybrids.hybrids.HybridsData.html new file mode 100644 index 000000000..72f3b6356 --- /dev/null +++ b/_autosummary/reV.hybrids.hybrids.HybridsData.html @@ -0,0 +1,757 @@ + + + + + + + reV.hybrids.hybrids.HybridsData — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrids.HybridsData

+
+
+class HybridsData(solar_fpath, wind_fpath)[source]
+

Bases: object

+

Hybrids input data container.

+
+
Parameters:
+
    +
  • solar_fpath (str) – Filepath to rep profile output file to extract solar profiles and +summaries from.

  • +
  • wind_fpath (str) – Filepath to rep profile output file to extract wind profiles and +summaries from.

  • +
+
+
+

Methods

+ + + + + + + + + +

contains_col(col_name)

Check if input column name exists in either meta data set.

validate()

Validate the input data.

+

Attributes

+ + + + + + + + + + + + + + + + + + +

hybrid_time_index

Get the time index for the hybrid rep profiles.

solar_meta

Summary for the solar representative profiles.

solar_time_index

Get the time index for the solar rep profiles.

wind_meta

Summary for the wind representative profiles.

wind_time_index

Get the time index for the wind rep profiles.

+
+
+property solar_meta
+

Summary for the solar representative profiles.

+
+
Returns:
+

solar_meta (pd.DataFrame) – Summary for the solar representative profiles.

+
+
+
+ +
+
+property wind_meta
+

Summary for the wind representative profiles.

+
+
Returns:
+

wind_meta (pd.DataFrame) – Summary for the wind representative profiles.

+
+
+
+ +
+
+property solar_time_index
+

Get the time index for the solar rep profiles.

+
+
Returns:
+

solar_time_index (pd.datetimeindex) – Time index sourced from the solar reV gen file.

+
+
+
+ +
+
+property wind_time_index
+

Get the time index for the wind rep profiles.

+
+
Returns:
+

wind_time_index (pd.datetimeindex) – Time index sourced from the wind reV gen file.

+
+
+
+ +
+
+property hybrid_time_index
+

Get the time index for the hybrid rep profiles.

+
+
Returns:
+

hybrid_time_index (pd.datetimeindex) – Time index for the hybrid rep profiles.

+
+
+
+ +
+
+contains_col(col_name)[source]
+

Check if input column name exists in either meta data set.

+
+
Parameters:
+

col_name (str) – Name of column to check for.

+
+
Returns:
+

bool – Whether or not the column is found in either meta data set.

+
+
+
+ +
+
+validate()[source]
+

Validate the input data.

+

This method checks for a minimum time index length, a unique +profile, and unique merge column that overlaps between both data +sets.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrids.MetaHybridizer.html b/_autosummary/reV.hybrids.hybrids.MetaHybridizer.html new file mode 100644 index 000000000..c73c832dd --- /dev/null +++ b/_autosummary/reV.hybrids.hybrids.MetaHybridizer.html @@ -0,0 +1,768 @@ + + + + + + + reV.hybrids.hybrids.MetaHybridizer — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrids.MetaHybridizer

+
+
+class MetaHybridizer(data, allow_solar_only=False, allow_wind_only=False, fillna=None, limits=None, ratio_bounds=None, ratio='solar_capacity/wind_capacity')[source]
+

Bases: object

+

Framework to handle hybridization of meta data.

+
+
Parameters:
+
    +
  • data (HybridsData) – Instance of HybridsData containing input data to +hybridize.

  • +
  • allow_solar_only (bool, optional) – Option to allow SC points with only solar capacity +(no wind). By default, False.

  • +
  • allow_wind_only (bool, optional) – Option to allow SC points with only wind capacity +(no solar), By default, False.

  • +
  • fillna (dict, optional) – Dictionary containing column_name, fill_value pairs +representing any fill values that should be applied after +merging the wind and solar meta. Note that column names will +likely have to be prefixed with solar or wind. +By default, None.

  • +
  • limits (dict, optional) – Option to specify mapping (in the form of a dictionary) of +{colum_name: max_value} representing the upper limit +(maximum value) for the values of a column in the merged +meta. For example, limits={‘solar_capacity’: 100} would +limit all the values of the solar capacity in the merged +meta to a maximum value of 100. This limit is applied +BEFORE ratio calculations. The names of the columns should +match the column names in the merged meta, so they are +likely prefixed with solar or wind`. By default, +``None (no limits applied).

  • +
  • ratio_bounds (tuple, optional) – Option to set ratio bounds (in two-tuple form) on the +columns of the ratio input. For example, +ratio_bounds=(0.5, 1.5) would adjust the values of both of +the ratio columns such that their ratio is always between +half and double (e.g., no value would be more than double +the other). To specify a single ratio value, use the same +value as the upper and lower bound. For example, +ratio_bounds=(1, 1) would adjust the values of both of the +ratio columns such that their ratio is always equal. +By default, None (no limit on the ratio).

  • +
  • ratio (str, optional) – Option to specify the columns used to calculate the ratio +that is limited by the ratio_bounds input. This input is a +string in the form +“numerator_column_name/denominator_column_name”. +For example, ratio=’solar_capacity/wind_capacity’ would +limit the ratio of the solar to wind capacities as specified +by the ratio_bounds input. If ratio_bounds is None, +this input does nothing. The names of the columns should be +prefixed with one of the prefixes defined as class +variables. By default 'solar_capacity/wind_capacity'.

  • +
+
+
+

Methods

+ + + + + + + + + +

hybridize()

Combine the solar and wind metas and run hybridize methods.

validate_input()

Validate the input parameters.

+

Attributes

+ + + + + + + + + + + + +

hybrid_meta

Hybridized summary for the representative profiles.

solar_profile_indices_map

Map hybrid to solar rep indices.

wind_profile_indices_map

Map hybrid to wind rep indices.

+
+
+property hybrid_meta
+

Hybridized summary for the representative profiles.

+
+
Returns:
+

hybrid_meta (pd.DataFrame) – Summary for the hybridized representative profiles. +At the very least, this has a column that the data was merged on.

+
+
+
+ +
+
+validate_input()[source]
+

Validate the input parameters.

+

This method validates that the input limit, fill, and ratio columns +are formatted correctly.

+
+ +
+
+hybridize()[source]
+

Combine the solar and wind metas and run hybridize methods.

+
+ +
+
+property solar_profile_indices_map
+

Map hybrid to solar rep indices.

+
+
Returns:
+

    +
  • hybrid_indices (np.ndarray) – Index values corresponding to hybrid rep profiles.

  • +
  • solar_indices (np.ndarray) – Index values of the solar rep profiles corresponding +to the hybrid rep profile indices.

  • +
+

+
+
+
+ +
+
+property wind_profile_indices_map
+

Map hybrid to wind rep indices.

+
+
Returns:
+

    +
  • hybrid_indices (np.ndarray) – Index values corresponding to hybrid rep profiles.

  • +
  • wind_indices (np.ndarray) – Index values of the wind rep profiles corresponding +to the hybrid rep profile indices.

  • +
+

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrids.RatioColumns.html b/_autosummary/reV.hybrids.hybrids.RatioColumns.html new file mode 100644 index 000000000..381912aaa --- /dev/null +++ b/_autosummary/reV.hybrids.hybrids.RatioColumns.html @@ -0,0 +1,706 @@ + + + + + + + reV.hybrids.hybrids.RatioColumns — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrids.RatioColumns

+
+
+class RatioColumns(num, denom, fixed)
+

Bases: tuple

+

Create new instance of RatioColumns(num, denom, fixed)

+

Methods

+ + + + + + + + + +

count(value, /)

Return number of occurrences of value.

index(value[, start, stop])

Return first index of value.

+

Attributes

+ + + + + + + + + + + + +

denom

Alias for field number 1

fixed

Alias for field number 2

num

Alias for field number 0

+
+
+__add__(value, /)
+

Return self+value.

+
+ +
+
+__mul__(value, /)
+

Return self*value.

+
+ +
+
+count(value, /)
+

Return number of occurrences of value.

+
+ +
+
+denom
+

Alias for field number 1

+
+ +
+
+fixed
+

Alias for field number 2

+
+ +
+
+index(value, start=0, stop=9223372036854775807, /)
+

Return first index of value.

+

Raises ValueError if the value is not present.

+
+ +
+
+num
+

Alias for field number 0

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.hybrids.hybrids.html b/_autosummary/reV.hybrids.hybrids.html new file mode 100644 index 000000000..9361d0e48 --- /dev/null +++ b/_autosummary/reV.hybrids.hybrids.html @@ -0,0 +1,652 @@ + + + + + + + reV.hybrids.hybrids — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.hybrids.hybrids

+

reV Hybridization module.

+

@author: ppinchuk

+

Classes

+ + + + + + + + + + + + + + + + + + +

ColNameFormatter()

Column name formatting helper class.

Hybridization(solar_fpath, wind_fpath[, ...])

Framework to handle hybridization of SC and corresponding profiles.

HybridsData(solar_fpath, wind_fpath)

Hybrids input data container.

MetaHybridizer(data[, allow_solar_only, ...])

Framework to handle hybridization of meta data.

RatioColumns(num, denom, fixed)

Create new instance of RatioColumns(num, denom, fixed)

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.html b/_autosummary/reV.losses.html new file mode 100644 index 000000000..a01e6b4b2 --- /dev/null +++ b/_autosummary/reV.losses.html @@ -0,0 +1,643 @@ + + + + + + + reV.losses — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses

+

reV Losses Module

+ + + + + + + + + + + + +

reV.losses.power_curve

reV power curve losses module.

reV.losses.scheduled

reV scheduled losses module.

reV.losses.utils

reV-losses utilities.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.AbstractPowerCurveTransformation.html b/_autosummary/reV.losses.power_curve.AbstractPowerCurveTransformation.html new file mode 100644 index 000000000..da3d208ee --- /dev/null +++ b/_autosummary/reV.losses.power_curve.AbstractPowerCurveTransformation.html @@ -0,0 +1,741 @@ + + + + + + + reV.losses.power_curve.AbstractPowerCurveTransformation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.AbstractPowerCurveTransformation

+
+
+class AbstractPowerCurveTransformation(power_curve)[source]
+

Bases: ABC

+

Abstract base class for power curve transformations.

+

This class is not meant to be instantiated.

+

This class provides an interface for power curve transformations, +which are meant to more realistically represent certain types of +losses when compared to simple haircut losses (i.e. constant loss +value applied at all points on the power curve).

+

If you would like to implement your own power curve transformation, +you should subclass this class and implement the apply() +method and the bounds property. See the documentation for +each of these below for more details.

+
+
+power_curve
+

The “original” input power curve.

+
+
Type:
+

PowerCurve

+
+
+
+ +

Abstract Power Curve Transformation class.

+
+
Parameters:
+

power_curve (PowerCurve) – The turbine power curve. This input is treated as the +“original” power curve.

+
+
+

Methods

+ + + + + + +

apply(transformation_var)

Apply a transformation to the original power curve.

+

Attributes

+ + + + + + + + + +

bounds

true Bounds on the transformation_var.

optm_bounds

Bounds for scipy optimization, sometimes more generous than the actual fit parameter bounds which are enforced after the optimization.

+
+
+abstract apply(transformation_var)[source]
+

Apply a transformation to the original power curve.

+
+
Parameters:
+

transformation_var (: float) – A single variable controlling the “strength” of the +transformation. The PowerCurveLosses object will +run an optimization using this variable to fit the target +annual losses incurred with the transformed power curve +compared to the original power curve using the given wind +resource distribution.

+
+
Returns:
+

PowerCurve – An new power curve containing the generation values from the +transformed power curve.

+
+
Raises:
+

NotImplementedError – If the transformation implementation did not set the + _transformed_generation attribute.

+
+
+

Notes

+

When implementing a new transformation, override this method and +set the _transformed_generation protected attribute to be +the generation corresponding to the transformed power curve. +Then, call super().apply(transformation_var) in order to +apply cutout speed curtailment and validation for the +transformed power curve. For example, here is the implementation +for a transformation that shifts the power curve horizontally:

+
self._transformed_generation = self.power_curve(
+    self.power_curve.wind_speed - transformation_var
+)
+return super().apply(transformation_var)
+
+
+
+ +
+
+abstract property bounds
+

true Bounds on the transformation_var.

+
+
Type:
+

tuple

+
+
+
+ +
+
+property optm_bounds
+

Bounds for scipy optimization, sometimes more generous than the +actual fit parameter bounds which are enforced after the +optimization.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.ExponentialStretching.html b/_autosummary/reV.losses.power_curve.ExponentialStretching.html new file mode 100644 index 000000000..e3edb09ca --- /dev/null +++ b/_autosummary/reV.losses.power_curve.ExponentialStretching.html @@ -0,0 +1,725 @@ + + + + + + + reV.losses.power_curve.ExponentialStretching — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.ExponentialStretching

+
+
+class ExponentialStretching(power_curve)[source]
+

Bases: AbstractPowerCurveTransformation

+

Utility for applying an exponential stretch to the power curve.

+

The mathematical representation of this transformation is:

+
+\[P_{transformed}(u) = P_{original}(u^{1/t}),\]
+

where \(P_{transformed}\) is the transformed power curve, +\(P_{original}\) is the original power curve, \(u\) is +the wind speed, and \(t\) is the transformation variable +(wind speed exponent).

+

The losses in this type of transformation are distributed primarily +across regions 2 and 3 of the power curve. In particular, losses are +smaller for wind speeds closer to the cut-in speed, and larger for +speeds close to rated power:

+../_images/exponential_stretching.png +
+
+power_curve
+

The “original” input power curve.

+
+
Type:
+

PowerCurve

+
+
+
+ +

Abstract Power Curve Transformation class.

+
+
Parameters:
+

power_curve (PowerCurve) – The turbine power curve. This input is treated as the +“original” power curve.

+
+
+

Methods

+ + + + + + +

apply(transformation_var)

Apply an exponential stretch to the original power curve.

+

Attributes

+ + + + + + + + + +

bounds

Bounds on the wind speed exponent.

optm_bounds

Bounds for scipy optimization, sometimes more generous than the actual fit parameter bounds which are enforced after the optimization.

+
+
+apply(transformation_var)[source]
+

Apply an exponential stretch to the original power curve.

+

This function stretches the original power curve along the +“wind speed” (x) axis. Any power above the cutoff speed (if one +was detected) is truncated after the transformation.

+
+
Parameters:
+

transformation_var (float) – The exponent of the wind speed scaling.

+
+
Returns:
+

PowerCurve – An new power curve containing the generation values from the +shifted power curve.

+
+
+
+ +
+
+property bounds
+

Bounds on the wind speed exponent.

+
+
Type:
+

tuple

+
+
+
+ +
+
+property optm_bounds
+

Bounds for scipy optimization, sometimes more generous than the +actual fit parameter bounds which are enforced after the +optimization.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.HorizontalTranslation.html b/_autosummary/reV.losses.power_curve.HorizontalTranslation.html new file mode 100644 index 000000000..7af218de9 --- /dev/null +++ b/_autosummary/reV.losses.power_curve.HorizontalTranslation.html @@ -0,0 +1,737 @@ + + + + + + + reV.losses.power_curve.HorizontalTranslation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.HorizontalTranslation

+
+
+class HorizontalTranslation(power_curve)[source]
+

Bases: AbstractPowerCurveTransformation

+

Utility for applying horizontal power curve translations.

+

The mathematical representation of this transformation is:

+
+\[P_{transformed}(u) = P_{original}(u - t),\]
+

where \(P_{transformed}\) is the transformed power curve, +\(P_{original}\) is the original power curve, \(u\) is +the wind speed, and \(t\) is the transformation variable +(horizontal translation amount).

+

This kind of power curve transformation is simplistic, and should +only be used for a small handful of applicable turbine losses +(i.e. blade degradation). See Warnings for more details.

+

The losses in this type of transformation are distributed primarily +across region 2 of the power curve (the steep, almost linear, +portion where the generation rapidly increases):

+../_images/horizontal_translation.png +
+
+power_curve
+

The “original” input power curve.

+
+
Type:
+

PowerCurve

+
+
+
+ +
+

Warning

+

This kind of power curve translation is not generally realistic. +Using this transformation as a primary source of losses (i.e. many +different kinds of losses bundled together) is extremely likely to +yield unrealistic results!

+
+

Abstract Power Curve Transformation class.

+
+
Parameters:
+

power_curve (PowerCurve) – The turbine power curve. This input is treated as the +“original” power curve.

+
+
+

Methods

+ + + + + + +

apply(transformation_var)

Apply a horizontal translation to the original power curve.

+

Attributes

+ + + + + + + + + +

bounds

true Bounds on the power curve shift (different from the optimization boundaries)

optm_bounds

Bounds for scipy optimization, sometimes more generous than the actual fit parameter bounds which are enforced after the optimization.

+
+
+apply(transformation_var)[source]
+

Apply a horizontal translation to the original power curve.

+

This function shifts the original power curve horizontally, +along the “wind speed” (x) axis, by the given amount. Any power +above the cutoff speed (if one was detected) is truncated after +the transformation.

+
+
Parameters:
+

transformation_var (float) – The amount to shift the original power curve by, in wind +speed units (typically, m/s).

+
+
Returns:
+

PowerCurve – An new power curve containing the generation values from the +shifted power curve.

+
+
+
+ +
+
+property bounds
+

true Bounds on the power curve shift (different from the +optimization boundaries)

+
+
Type:
+

tuple

+
+
+
+ +
+
+property optm_bounds
+

Bounds for scipy optimization, sometimes more generous than the +actual fit parameter bounds which are enforced after the +optimization.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.LinearStretching.html b/_autosummary/reV.losses.power_curve.LinearStretching.html new file mode 100644 index 000000000..8a8c9176c --- /dev/null +++ b/_autosummary/reV.losses.power_curve.LinearStretching.html @@ -0,0 +1,726 @@ + + + + + + + reV.losses.power_curve.LinearStretching — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.LinearStretching

+
+
+class LinearStretching(power_curve)[source]
+

Bases: AbstractPowerCurveTransformation

+

Utility for applying a linear stretch to the power curve.

+

The mathematical representation of this transformation is:

+
+\[P_{transformed}(u) = P_{original}(u/t),\]
+

where \(P_{transformed}\) is the transformed power curve, +\(P_{original}\) is the original power curve, \(u\) is +the wind speed, and \(t\) is the transformation variable +(wind speed multiplier).

+

The losses in this type of transformation are distributed primarily +across regions 2 and 3 of the power curve. In particular, losses are +smaller for wind speeds closer to the cut-in speed, and larger for +speeds close to rated power:

+../_images/linear_stretching.png +
+
+power_curve
+

The “original” input power curve.

+
+
Type:
+

PowerCurve

+
+
+
+ +

Abstract Power Curve Transformation class.

+
+
Parameters:
+

power_curve (PowerCurve) – The turbine power curve. This input is treated as the +“original” power curve.

+
+
+

Methods

+ + + + + + +

apply(transformation_var)

Apply a linear stretch to the original power curve.

+

Attributes

+ + + + + + + + + +

bounds

true Bounds on the wind speed multiplier (different from the optimization boundaries)

optm_bounds

Bounds for scipy optimization, sometimes more generous than the actual fit parameter bounds which are enforced after the optimization.

+
+
+apply(transformation_var)[source]
+

Apply a linear stretch to the original power curve.

+

This function stretches the original power curve along the +“wind speed” (x) axis. Any power above the cutoff speed (if one +was detected) is truncated after the transformation.

+
+
Parameters:
+

transformation_var (float) – The linear multiplier of the wind speed scaling.

+
+
Returns:
+

PowerCurve – An new power curve containing the generation values from the +shifted power curve.

+
+
+
+ +
+
+property bounds
+

true Bounds on the wind speed multiplier (different from the +optimization boundaries)

+
+
Type:
+

tuple

+
+
+
+ +
+
+property optm_bounds
+

Bounds for scipy optimization, sometimes more generous than the +actual fit parameter bounds which are enforced after the +optimization.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.PowerCurve.html b/_autosummary/reV.losses.power_curve.PowerCurve.html new file mode 100644 index 000000000..3a53a0319 --- /dev/null +++ b/_autosummary/reV.losses.power_curve.PowerCurve.html @@ -0,0 +1,760 @@ + + + + + + + reV.losses.power_curve.PowerCurve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.PowerCurve

+
+
+class PowerCurve(wind_speed, generation)[source]
+

Bases: object

+

A turbine power curve.

+
+
+wind_speed
+

An array containing the wind speeds corresponding to the values +in the generation array.

+
+
Type:
+

numpy.array

+
+
+
+ +
+
+generation
+

An array containing the generated power in kW at the corresponding +wind speed in the wind_speed array. This input must have +at least one positive value, and if a cutoff speed is detected +(see Warnings section below), then all values above that wind +speed must be set to 0.

+
+
Type:
+

numpy.array

+
+
+
+ +
+

Warning

+

This class will attempt to infer a cutoff speed from the +generation input. Specifically, it will look for a transition +from the highest rated power down to zero in a single wind_speed +step of the power curve. If such a transition is detected, the wind +speed corresponding to the zero value will be set as the cutoff +speed, and all calculated power curves will be clipped at this +speed. If your input power curve contains a cutoff speed, ensure +that it adheres to the expected pattern of dropping from max rated +power to zero power in a single wind speed step.

+
+
+
Parameters:
+
    +
  • wind_speed (array_like) – An iterable containing the wind speeds corresponding to the +generated power values in generation input. The input +values should all be non-zero.

  • +
  • generation (array_like) – An iterable containing the generated power in kW at the +corresponding wind speed in the wind_speed input. This +input must have at least one positive value, and if a cutoff +speed is detected (see Warnings section below), then all +values above that wind speed must be set to 0.

  • +
+
+
+

Methods

+ + + +
+

Attributes

+ + + + + + + + + + + + +

cutin_wind_speed

The detected cut-in wind speed at which power generation begins

cutoff_wind_speed

The detected cutoff wind speed at which the power generation is zero

rated_power

Get the rated power (max power) of the turbine power curve.

+
+
+property cutin_wind_speed
+

The detected cut-in wind speed at which power generation begins

+
+
Returns:
+

float

+
+
+
+ +
+
+property cutoff_wind_speed
+

The detected cutoff wind speed at which the power generation is zero

+
+
Returns:
+

float | np.inf

+
+
+
+ +
+
+property rated_power
+

Get the rated power (max power) of the turbine power curve. The +units are dependent on the input power curve but this is typically in +units of kW.

+
+
Returns:
+

float

+
+
+
+ +
+
+__call__(wind_speed)[source]
+

Calculate the power curve value for the given wind_speed.

+
+
Parameters:
+

wind_speed (int | float | list | array_like) – Wind speed value corresponding to the desired power curve +value.

+
+
Returns:
+

float | numpy.array – The power curve value(s) for the input wind speed(s).

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.PowerCurveLosses.html b/_autosummary/reV.losses.power_curve.PowerCurveLosses.html new file mode 100644 index 000000000..30955543b --- /dev/null +++ b/_autosummary/reV.losses.power_curve.PowerCurveLosses.html @@ -0,0 +1,810 @@ + + + + + + + reV.losses.power_curve.PowerCurveLosses — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.PowerCurveLosses

+
+
+class PowerCurveLosses(power_curve, wind_resource, weights=None, site=None)[source]
+

Bases: object

+

A converter between annual losses and power curve transformation.

+

Given a target annual loss value, this class facilitates the +calculation of a power curve transformation such that the annual +generation losses incurred by using the transformed power curve when +compared to the original (non-transformed) power curve match the +target loss as close as possible.

+

The underlying assumption for this approach is that some types of +losses can be realized by a transformation of the power curve (see +the values of TRANSFORMATIONS for details on all of the +power curve transformations that have been implemented).

+

The advantage of this approach is that, unlike haircut losses (where +a single loss value is applied across the board to all generation), +the losses are distributed non-uniformly across the power curve. For +example, even in the overly simplified case of a horizontal +translation of the power curve (which is only physically realistic +for certain types of losses like blade degradation), the losses are +distributed primarily across region 2 of the power curve (the steep, +almost linear, portion where the generation rapidly increases). This +means that, unlike with haircut losses, generation is able to reach +max rated power (albeit at a greater wind speed).

+
+
+power_curve
+

The original Power Curve.

+
+
Type:
+

PowerCurve

+
+
+
+ +
+
+wind_resource
+

An array containing the wind speeds (i.e. wind speed +distribution) for the site at which the power curve will be +used. This distribution is used to calculate the annual +generation of the original power curve as well as any additional +calculated power curves. The generation values are then compared +in order to calculate the loss resulting from a transformed +power curve.

+
+
Type:
+

numpy.array

+
+
+
+ +
+
+weights
+

An array of the same length as wind_resource containing +weights to apply to each generation value calculated for the +corresponding wind speed.

+
+
Type:
+

numpy.array

+
+
+
+ +
+
Parameters:
+
    +
  • power_curve (PowerCurve) – The “original” power curve to be adjusted.

  • +
  • wind_resource (array_like) – An iterable containing the wind speeds measured at the site +where this power curve will be applied to calculate +generation. These values are used to calculate the loss +resulting from a transformed power curve compared to the +generation of the original power curve. The input +values should all be non-zero, and the units of +should match the units of the power_curve input +(typically, m/s).

  • +
  • weights (array_like, optional) – An iterable of the same length as wind_resource +containing weights to apply to each generation value +calculated for the corresponding wind speed.

  • +
  • site (int | str, optional) – Site number (gid) for debugging and logging. +By default, None.

  • +
+
+
+

Methods

+ + + + + + + + + +

annual_losses_with_transformed_power_curve(...)

Calculate the annual losses from a transformed power curve.

fit(target, transformation)

Fit a power curve transformation.

+

Attributes

+ + + + + + +

power_gen_no_losses

Total power generation from original power curve.

+
+
+annual_losses_with_transformed_power_curve(transformed_power_curve)[source]
+

Calculate the annual losses from a transformed power curve.

+

This function uses the wind resource data that the object was +initialized with to calculate the total annual power generation +with a transformed power curve. This generation is compared with +the generation of the original (non-transformed) power curve to +compute the total annual losses as a result of the +transformation.

+
+
Parameters:
+

transformed_power_curve (PowerCurve) – A transformed power curve. The power generated with this +power curve will be compared with the power generated by the +“original” power curve to calculate annual losses.

+
+
Returns:
+

float – Total losses (%) as a result of a the power curve +transformation.

+
+
+
+ +
+
+fit(target, transformation)[source]
+

Fit a power curve transformation.

+

This function fits a transformation to the input power curve +(the one used to initialize the object) to generate an annual +loss percentage closest to the target. The losses are +computed w.r.t the generation of the original (non-transformed) +power curve.

+
+
Parameters:
+
    +
  • target (float) – Target value for annual generation losses (%).

  • +
  • transformation (PowerCurveTransformation) – A PowerCurveTransformation class representing the power +curve transformation to use.

  • +
+
+
Returns:
+

numpy.array – An array containing a transformed power curve that most +closely yields the target annual generation losses.

+
+
Warns:
+

reVLossesWarning – If the fit did not meet the target annual losses to within +1%.

+
+
+
+

Warning

+

This function attempts to find an optimal transformation for the +power curve such that the annual generation losses match the +target value, but there is no guarantee that a close match +can be found, if it even exists. Therefore, it is possible that +the losses resulting from the transformed power curve will not +match the target. This is especially likely if the +target is large or if the input power curve and/or wind +resource is abnormal.

+
+
+ +
+
+property power_gen_no_losses
+

Total power generation from original power curve.

+
+
Type:
+

float

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.PowerCurveLossesInput.html b/_autosummary/reV.losses.power_curve.PowerCurveLossesInput.html new file mode 100644 index 000000000..4375d2cd4 --- /dev/null +++ b/_autosummary/reV.losses.power_curve.PowerCurveLossesInput.html @@ -0,0 +1,720 @@ + + + + + + + reV.losses.power_curve.PowerCurveLossesInput — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.PowerCurveLossesInput

+
+
+class PowerCurveLossesInput(specs)[source]
+

Bases: object

+

Power curve losses specification.

+

This class stores and validates information about the desired losses +from a given type of power curve transformation. In particular, the +target loss percentage must be provided. This input is then +validated to be used power curve transformation fitting.

+
+
Parameters:
+

specs (dict) – A dictionary containing specifications for the power curve +losses. This dictionary must contain the following keys:

+
+
    +
  • +
    target_losses_percent

    An integer or float value representing the +total percentage of annual energy production that +should be lost due to the power curve transformation. +This value must be in the range [0, 100].

    +
    +
    +
  • +
+
+

The input dictionary can also provide the following optional +keys:

+
+
    +
  • transformation - by default, horizontal_translation +A string representing the type of transformation to +apply to the power curve. This sting must be one of +the keys of TRANSFORMATIONS. See the relevant +transformation class documentation for detailed +information on that type of power curve +transformation.

  • +
+
+
+
+

Methods

+ + + +
+

Attributes

+ + + + + + + + + + + + +

REQUIRED_KEYS

Required keys in the input specification dictionary.

target

Target loss percentage due to transformation.

transformation

Power curve transformation.

+
+
+REQUIRED_KEYS = {'target_losses_percent'}
+

Required keys in the input specification dictionary.

+
+ +
+
+property target
+

Target loss percentage due to transformation.

+
+
Type:
+

int or float

+
+
+
+ +
+
+property transformation
+

Power curve transformation.

+
+
Type:
+

PowerCurveTransformation

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.PowerCurveLossesMixin.html b/_autosummary/reV.losses.power_curve.PowerCurveLossesMixin.html new file mode 100644 index 000000000..d1deef0d1 --- /dev/null +++ b/_autosummary/reV.losses.power_curve.PowerCurveLossesMixin.html @@ -0,0 +1,717 @@ + + + + + + + reV.losses.power_curve.PowerCurveLossesMixin — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.PowerCurveLossesMixin

+
+
+class PowerCurveLossesMixin[source]
+

Bases: object

+

Mixin class for reV.SAM.generation.AbstractSamWind.

+
+

Warning

+

Using this class for anything except as a mixin for +AbstractSamWind may result in +unexpected results and/or errors.

+
+

Methods

+ + + + + + + + + +

add_power_curve_losses()

Adjust power curve in SAM config file to account for losses.

wind_resource_from_input()

Collect wind resource and weights from inputs.

+

Attributes

+ + + + + + + + + +

POWER_CURVE_CONFIG_KEY

Specify power curve loss target in the config file using this key.

input_power_curve

Original power curve for site.

+
+
+POWER_CURVE_CONFIG_KEY = 'reV_power_curve_losses'
+

Specify power curve loss target in the config file using this key.

+
+ +
+
+add_power_curve_losses()[source]
+

Adjust power curve in SAM config file to account for losses.

+

This function reads the information in the +reV_power_curve_losses key of the sam_sys_inputs +dictionary and computes a new power curve that accounts for the +loss percentage specified from that input. If no power curve +loss info is specified in sam_sys_inputs, the power curve +will not be adjusted.

+
+

See also

+
+
adjust_power_curve()

Power curve shift calculation.

+
+
+
+
+ +
+
+property input_power_curve
+

Original power curve for site.

+
+
Type:
+

PowerCurve

+
+
+
+ +
+
+wind_resource_from_input()[source]
+

Collect wind resource and weights from inputs.

+
+
Returns:
+

PowerCurveWindResource – Wind resource used to compute power curve shift.

+
+
Raises:
+

reVLossesValueError – If power curve losses are not compatible with the + ‘wind_resource_model_choice’.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.PowerCurveWindResource.html b/_autosummary/reV.losses.power_curve.PowerCurveWindResource.html new file mode 100644 index 000000000..c33197c00 --- /dev/null +++ b/_autosummary/reV.losses.power_curve.PowerCurveWindResource.html @@ -0,0 +1,709 @@ + + + + + + + reV.losses.power_curve.PowerCurveWindResource — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.PowerCurveWindResource

+
+
+class PowerCurveWindResource(temperature, pressure, wind_speed)[source]
+

Bases: object

+

Wind resource data for calculating power curve shift.

+

Power Curve Wind Resource.

+
+
Parameters:
+
    +
  • temperature (array_like) – An iterable representing the temperatures at a single site +(in C). Must be the same length as the pressure and +wind_speed inputs.

  • +
  • pressure (array_like) – An iterable representing the pressures at a single site +(in PA or ATM). Must be the same length as the temperature +and wind_speed inputs.

  • +
  • wind_speed (array_like) – An iterable representing the wind speeds at a single site +(in m/s). Must be the same length as the temperature and +pressure inputs.

  • +
+
+
+

Methods

+ + + + + + +

wind_resource_for_site()

Extract scaled wind speeds at the resource site.

+

Attributes

+ + + + + + +

wind_speeds

Array of adjusted wind speeds.

+
+
+wind_resource_for_site()[source]
+

Extract scaled wind speeds at the resource site.

+

Get the wind speeds for this site, accounting for the scaling +done in SAM [1] based on air pressure [2]. These wind speeds +can then be used to sample the power curve and obtain generation +values.

+
+
Returns:
+

array-like – Array of scaled wind speeds.

+
+
+

References

+ +
+ +
+
+property wind_speeds
+

Array of adjusted wind speeds.

+
+
Type:
+

numpy.array

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.TRANSFORMATIONS.html b/_autosummary/reV.losses.power_curve.TRANSFORMATIONS.html new file mode 100644 index 000000000..beec8b603 --- /dev/null +++ b/_autosummary/reV.losses.power_curve.TRANSFORMATIONS.html @@ -0,0 +1,637 @@ + + + + + + + reV.losses.power_curve.TRANSFORMATIONS — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.TRANSFORMATIONS

+
+
+TRANSFORMATIONS = {'exponential_stretching': <class 'reV.losses.power_curve.ExponentialStretching'>, 'horizontal_translation': <class 'reV.losses.power_curve.HorizontalTranslation'>, 'linear_stretching': <class 'reV.losses.power_curve.LinearStretching'>}
+

Implemented power curve transformations.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.adjust_power_curve.html b/_autosummary/reV.losses.power_curve.adjust_power_curve.html new file mode 100644 index 000000000..ae28de2f5 --- /dev/null +++ b/_autosummary/reV.losses.power_curve.adjust_power_curve.html @@ -0,0 +1,662 @@ + + + + + + + reV.losses.power_curve.adjust_power_curve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve.adjust_power_curve

+
+
+adjust_power_curve(power_curve, resource_data, target_losses, site=None)[source]
+

Adjust power curve to account for losses.

+

This function computes a new power curve that accounts for the +loss percentage specified from the target loss.

+
+
Parameters:
+
    +
  • power_curve (PowerCurve) – Power curve to be adjusted to match target losses.

  • +
  • resource_data (PowerCurveWindResource) – Resource data for the site being investigated.

  • +
  • target_losses (PowerCurveLossesInput) – Target loss and power curve shift info.

  • +
  • site (int | str, optional) – Site number (gid) for debugging and logging. +By default, None.

  • +
+
+
Returns:
+

PowerCurve – Power Curve shifted to meet the target losses. Power Curve is +not adjusted if all wind speeds are above the cutout or below +the cutin speed.

+
+
+
+

See also

+
+
PowerCurveLosses

Power curve re-calculation.

+
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.power_curve.html b/_autosummary/reV.losses.power_curve.html new file mode 100644 index 000000000..4ebb45f09 --- /dev/null +++ b/_autosummary/reV.losses.power_curve.html @@ -0,0 +1,679 @@ + + + + + + + reV.losses.power_curve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.power_curve

+

reV power curve losses module.

+

Module attributes

+ + + + + + +

TRANSFORMATIONS

Implemented power curve transformations.

+

Functions

+ + + + + + +

adjust_power_curve(power_curve, ...[, site])

Adjust power curve to account for losses.

+

Classes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

AbstractPowerCurveTransformation(power_curve)

Abstract base class for power curve transformations.

ExponentialStretching(power_curve)

Utility for applying an exponential stretch to the power curve.

HorizontalTranslation(power_curve)

Utility for applying horizontal power curve translations.

LinearStretching(power_curve)

Utility for applying a linear stretch to the power curve.

PowerCurve(wind_speed, generation)

A turbine power curve.

PowerCurveLosses(power_curve, wind_resource)

A converter between annual losses and power curve transformation.

PowerCurveLossesInput(specs)

Power curve losses specification.

PowerCurveLossesMixin()

Mixin class for reV.SAM.generation.AbstractSamWind.

PowerCurveWindResource(temperature, ...)

Wind resource data for calculating power curve shift.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.scheduled.Outage.html b/_autosummary/reV.losses.scheduled.Outage.html new file mode 100644 index 000000000..aa1b84864 --- /dev/null +++ b/_autosummary/reV.losses.scheduled.Outage.html @@ -0,0 +1,822 @@ + + + + + + + reV.losses.scheduled.Outage — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.scheduled.Outage

+
+
+class Outage(specs)[source]
+

Bases: object

+

A specific type of outage.

+

This class stores and validates information about a single type of +outage. In particular, the number of outages, duration, percentage +of farm down, and the allowed months for scheduling the outage +must all be provided. These inputs are then validated so that they +can be used in instances of scheduling objects.

+
+
Parameters:
+

specs (dict) – A dictionary containing specifications for this outage. This +dictionary must contain the following keys:

+
+
    +
  • +
    count

    An integer value representing the total number of +times this outage should be scheduled. This number +should be larger than 0.

    +
    +
    +
  • +
  • +
    duration

    An integer value representing the total number of +consecutive hours that this outage should take. This +value must be larger than 0 and less than the number +of hours in the allowed months.

    +
    +
    +
  • +
  • +
    percentage_of_capacity_lost

    An integer or float value representing the total +percentage of the total capacity that will be lost +for the duration of the outage. This value must be +in the range (0, 100].

    +
    +
    +
  • +
  • +
    allowed_months

    A list of month names corresponding to the allowed +months for the scheduled outages. Month names can be +unformatted and can be specified using 3-letter +month abbreviations.

    +
    +
    +
  • +
+
+

The input dictionary can also provide the following optional +keys:

+
+
    +
  • +
    allow_outage_overlap - by default, True

    A bool flag indicating whether or not this outage is +allowed to overlap with other outages, including +itself. It is recommended to set this value to +True whenever possible, as it allows for more +flexible scheduling.

    +
    +
    +
  • +
  • +
    name - by default, string containing init parameters

    A unique name for the outage, used for more +descriptive error messages.

    +
    +
    +
  • +
+
+
+
+

Methods

+ + + +
+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

REQUIRED_KEYS

Required keys in the input specification dictionary.

allow_outage_overlap

Indicator for overlap with other outages.

allowed_months

Months during which outage can be scheduled.

count

Total number of times outage should be scheduled.

duration

Total number of consecutive hours per outage.

name

Name of the outage.

percentage_of_capacity_lost

Percent of capacity taken down per outage.

total_available_hours

Total number of hours available based on allowed months.

+
+
+REQUIRED_KEYS = {'allowed_months', 'count', 'duration', 'percentage_of_capacity_lost'}
+

Required keys in the input specification dictionary.

+
+ +
+
+property count
+

Total number of times outage should be scheduled.

+
+
Type:
+

int

+
+
+
+ +
+
+property duration
+

Total number of consecutive hours per outage.

+
+
Type:
+

int

+
+
+
+ +
+
+property percentage_of_capacity_lost
+

Percent of capacity taken down per outage.

+
+
Type:
+

int | float

+
+
+
+ +
+
+property allowed_months
+

Months during which outage can be scheduled.

+
+
Type:
+

list

+
+
+
+ +
+
+property allow_outage_overlap
+

Indicator for overlap with other outages.

+
+
Type:
+

bool

+
+
+
+ +
+
+property name
+

Name of the outage.

+
+
Type:
+

str

+
+
+
+ +
+
+property total_available_hours
+

Total number of hours available based on allowed months.

+
+
Type:
+

int

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.scheduled.OutageScheduler.html b/_autosummary/reV.losses.scheduled.OutageScheduler.html new file mode 100644 index 000000000..29a0044c0 --- /dev/null +++ b/_autosummary/reV.losses.scheduled.OutageScheduler.html @@ -0,0 +1,756 @@ + + + + + + + reV.losses.scheduled.OutageScheduler — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.scheduled.OutageScheduler

+
+
+class OutageScheduler(outages, seed=0)[source]
+

Bases: object

+

A scheduler for multiple input outages.

+

Given a list of information about different types of desired +outages, this class leverages the stochastic scheduling routines of +SingleOutageScheduler to calculate the total losses due to +the input outages on an hourly basis.

+
+
+outages
+

The user-provided list of Outages containing +info about all types of outages to be scheduled.

+
+
Type:
+

list of Outages

+
+
+
+ +
+
+seed
+

The seed value used to seed the random generator in order +to produce random but reproducible losses. This is useful +for ensuring that stochastically scheduled losses vary +between different sites (i.e. that randomly scheduled +outages in two different location do not match perfectly on +an hourly basis).

+
+
Type:
+

int

+
+
+
+ +
+
+total_losses
+

An array (of length 8760) containing the per-hour total loss +percentage resulting from the stochastically scheduled outages. +This array contains only zero values before the +calculate() method is run.

+
+
Type:
+

np.array

+
+
+
+ +
+
+can_schedule_more
+

A boolean array (of length 8760) indicating wether or not more +losses can be scheduled for a given hour. This array keeps track +of all the scheduling conflicts between input outages.

+
+
Type:
+

np.array

+
+
+
+ +
+

Warning

+

It is possible that not all outages input by the user will be +scheduled. This can happen when there is not enough time allowed +for all of the input outages. To avoid this issue, always be sure to +allow a large enough month range for long outages that take up a big +portion of the farm and try to allow outage overlap whenever +possible.

+
+
+

See also

+
+
SingleOutageScheduler

Single outage scheduler.

+
+
Outage

Specifications for a single outage.

+
+
+
+
+
Parameters:
+
    +
  • outages (list of Outages) – A list of Outages, where each Outage +contains info about a single type of outage. See the +documentation of Outage for a description of the +required keys of each outage dictionary.

  • +
  • seed (int, optional) – An integer value used to seed the random generator in order +to produce random but reproducible losses. This is useful +for ensuring that stochastically scheduled losses vary +between different sites (i.e. that randomly scheduled +outages in two different location do not match perfectly on +an hourly basis). By default, the seed is set to 0.

  • +
+
+
+

Methods

+ + + + + + +

calculate()

Calculate total losses from stochastically scheduled outages.

+
+
+calculate()[source]
+

Calculate total losses from stochastically scheduled outages.

+

This function calls SingleOutageScheduler.calculate() +on every outage input (sorted by largest duration and then +largest number of outages) and returns the aggregate the losses +from the result.

+
+
Returns:
+

np.array – An array (of length 8760) containing the per-hour total loss +percentage resulting from the stochastically scheduled +outages.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.scheduled.ScheduledLossesMixin.html b/_autosummary/reV.losses.scheduled.ScheduledLossesMixin.html new file mode 100644 index 000000000..26405b964 --- /dev/null +++ b/_autosummary/reV.losses.scheduled.ScheduledLossesMixin.html @@ -0,0 +1,737 @@ + + + + + + + reV.losses.scheduled.ScheduledLossesMixin — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.scheduled.ScheduledLossesMixin

+
+
+class ScheduledLossesMixin[source]
+

Bases: object

+

Mixin class for reV.SAM.generation.AbstractSamGeneration.

+
+

Warning

+

Using this class for anything except as a mixin for +AbstractSamGeneration may result in +unexpected results and/or errors.

+
+

Methods

+ + + + + + +

add_scheduled_losses([resource])

Add stochastically scheduled losses to SAM config file.

+

Attributes

+ + + + + + + + + + + + +

OUTAGE_CONFIG_KEY

Specify outage information in the config file using this key.

OUTAGE_SEED_CONFIG_KEY

Specify a randomizer seed in the config file using this key.

outage_seed

A value to use as the seed for the outage losses.

+
+
+OUTAGE_CONFIG_KEY = 'reV_outages'
+

Specify outage information in the config file using this key.

+
+ +
+
+OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed'
+

Specify a randomizer seed in the config file using this key.

+
+ +
+
+add_scheduled_losses(resource=None)[source]
+

Add stochastically scheduled losses to SAM config file.

+

This function reads the information in the reV_outages key +of the sam_sys_inputs dictionary and computes stochastically +scheduled losses from that input. If the value for +reV_outages is a string, it must have been generated by +calling json.dumps() on the list of dictionaries +containing outage specifications. Otherwise, the outage +information is expected to be a list of dictionaries containing +outage specifications. See Outage for a description of +the specifications allowed for each outage. The scheduled losses +are passed to SAM via the hourly key to signify which hourly +capacity factors should be adjusted with outage losses. If no +outage info is specified in sam_sys_inputs, no scheduled +losses are added.

+
+
Parameters:
+

resource (pd.DataFrame, optional) – Time series resource data for a single location with a +pandas DatetimeIndex. The year value of the index will +be used to seed the stochastically scheduled losses. If +None, no yearly seed will be used.

+
+
+
+

See also

+
+
Outage

Single outage specification.

+
+
+
+

Notes

+

The scheduled losses are passed to SAM via the hourly key to +signify which hourly capacity factors should be adjusted with +outage losses. If the user specifies other hourly adjustment +factors via the hourly key, the effect is combined. For +example, if the user inputs a 33% hourly adjustment factor and +reV schedules an outage for 70% of the farm down for the same +hour, then the resulting adjustment factor is

+
+
+

This means the generation will be reduced by ~80%, because the +user requested 33% losses for the 30% the farm that remained +operational during the scheduled outage (i.e. 20% remaining of +the original generation).

+
+ +
+
+property outage_seed
+

A value to use as the seed for the outage losses.

+
+
Type:
+

int

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.scheduled.SingleOutageScheduler.html b/_autosummary/reV.losses.scheduled.SingleOutageScheduler.html new file mode 100644 index 000000000..ba1999158 --- /dev/null +++ b/_autosummary/reV.losses.scheduled.SingleOutageScheduler.html @@ -0,0 +1,816 @@ + + + + + + + reV.losses.scheduled.SingleOutageScheduler — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.scheduled.SingleOutageScheduler

+
+
+class SingleOutageScheduler(outage, scheduler)[source]
+

Bases: object

+

A scheduler for a single outage.

+

Given information about a single type of outage, this class +facilitates the (randomized) scheduling of all requested instances +of the outage. See SingleOutageScheduler.calculate() for +specific details about the scheduling process.

+
+
+outage
+

The user-provided Outage containing info about the outage +to be scheduled.

+
+
Type:
+

Outage

+
+
+
+ +
+
+scheduler
+

A scheduler object that keeps track of the total hourly losses +from the input outage as well as any other outages it has +already scheduled.

+
+
Type:
+

OutageScheduler

+
+
+
+ +
+
+can_schedule_more
+

A boolean array (of length 8760) indicating wether or not more +losses can be scheduled for a given hour. This is specific +to the input outage only.

+
+
Type:
+

np.array

+
+
+
+ +
+

Warning

+

It is possible that not all outages input by the user can be +scheduled. This can happen when there is not enough time allowed +for all of the input outages. To avoid this issue, always be sure to +allow a large enough month range for long outages that take up a big +portion of the farm and try to allow outage overlap whenever +possible.

+
+
+

See also

+
+
OutageScheduler

Scheduler for multiple outages.

+
+
Outage

Specifications for a single outage.

+
+
+
+
+
Parameters:
+
    +
  • outage (Outage) – An outage object containing info about the outage to be +scheduled.

  • +
  • scheduler (OutageScheduler) – A scheduler object that keeps track of the total hourly +losses from the input outage as well as any other outages +it has already scheduled.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + +

calculate()

Calculate losses from stochastically scheduled outages.

find_random_outage_slice([seed])

Find a random slot of time for this type of outage.

schedule_losses(outage_slice)

Schedule the input outage during the given slice of time.

update_when_can_schedule()

Update can_schedule_more using OutageScheduler.

update_when_can_schedule_from_months()

Update can_schedule_more using Outage.allowed_months.

+

Attributes

+ + + + + + +

MAX_ITER

Max number of extra attempts to schedule outages.

+
+
+MAX_ITER = 10000
+

Max number of extra attempts to schedule outages.

+
+ +
+
+calculate()[source]
+

Calculate losses from stochastically scheduled outages.

+

This function attempts to schedule outages according to the +specification provided in the Outage input. Specifically, +it checks the available hours based on the main +Scheduler (which may have other outages +already scheduled) and attempts to randomly add new outages with +the specified duration and percent of losses. The function +terminates when the desired number of outages (specified by +Outage.count) have been successfully scheduled, or when +the number of attempts exceeds +MAX_ITER + Outage.count.

+
+
Warns:
+

reVLossesWarning – If the number of requested outages could not be scheduled.

+
+
+
+ +
+
+update_when_can_schedule_from_months()[source]
+

Update can_schedule_more using Outage.allowed_months.

+

This function sets the can_schedule_more bool array to +True for all of the months in Outage.allowed_months.

+
+ +
+
+update_when_can_schedule()[source]
+

Update can_schedule_more using OutageScheduler.

+

This function sets the can_schedule_more bool array to +True wherever OutageScheduler.can_schedule_more is +also True and wherever the losses from this outage would not +cause the OutageScheduler.total_losses to exceed 100%.

+
+ +
+
+find_random_outage_slice(seed=None)[source]
+

Find a random slot of time for this type of outage.

+

This function randomly selects a starting time for this outage +given the allowed times in can_schedule_more. It does +not verify that the outage can be scheduled for the entire +requested duration.

+
+
Parameters:
+

seed (int, optional) – Integer used to seed the np.random.choice() call. +If None, seed is not used.

+
+
Returns:
+

slice – A slice corresponding to the random slot of time for this +type of outage.

+
+
+
+ +
+
+schedule_losses(outage_slice)[source]
+

Schedule the input outage during the given slice of time.

+

Given a slice in the hourly loss array, add the losses from this +outage (which is equivalent to scheduling them).

+
+
Parameters:
+

outage_slice (slice) – A slice corresponding to the slot of time to schedule this +outage.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.scheduled.html b/_autosummary/reV.losses.scheduled.html new file mode 100644 index 000000000..e7850c9cc --- /dev/null +++ b/_autosummary/reV.losses.scheduled.html @@ -0,0 +1,648 @@ + + + + + + + reV.losses.scheduled — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.scheduled

+

reV scheduled losses module.

+

Classes

+ + + + + + + + + + + + + + + +

Outage(specs)

A specific type of outage.

OutageScheduler(outages[, seed])

A scheduler for multiple input outages.

ScheduledLossesMixin()

Mixin class for reV.SAM.generation.AbstractSamGeneration.

SingleOutageScheduler(outage, scheduler)

A scheduler for a single outage.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.utils.convert_to_full_month_names.html b/_autosummary/reV.losses.utils.convert_to_full_month_names.html new file mode 100644 index 000000000..53d23827c --- /dev/null +++ b/_autosummary/reV.losses.utils.convert_to_full_month_names.html @@ -0,0 +1,660 @@ + + + + + + + reV.losses.utils.convert_to_full_month_names — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.utils.convert_to_full_month_names

+
+
+convert_to_full_month_names(month_names)[source]
+

Format an iterable of month names to match those in calendar.

+

This function will format each input name to match the formatting +in calendar.month_name (upper case, no extra whitespace), and +it will convert all abbreviations to full month names. No other +assumptions are made about the inputs, so an input string ” abc ” +will get formatted and passed though as “Abc”.

+
+
Parameters:
+

month_names (iter) – An iterable of strings representing the input month names. +Month names can be unformatted and contain 3-letter month +abbreviations.

+
+
Returns:
+

list – A list of month names matching the formatting of +calendar.month_name (upper case, no extra whitespace). +Abbreviations are also converted to a full month name.

+
+
+

Examples

+
>>> input_names = ['March', ' aprIl  ', 'Jun', 'jul', '  abc ']
+>>> convert_to_full_month_names(input_names)
+['March', 'April', 'June', 'July', 'Abc']
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.utils.filter_unknown_month_names.html b/_autosummary/reV.losses.utils.filter_unknown_month_names.html new file mode 100644 index 000000000..4b9da4d8a --- /dev/null +++ b/_autosummary/reV.losses.utils.filter_unknown_month_names.html @@ -0,0 +1,652 @@ + + + + + + + reV.losses.utils.filter_unknown_month_names — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.utils.filter_unknown_month_names

+
+
+filter_unknown_month_names(month_names)[source]
+

Split the input into known and unknown month names.

+
+
Parameters:
+

month_names (iter) – An iterable of strings representing the input month names. Month +names must match the formatting in calendar.month_name +(upper case, no extra whitespace), otherwise they will be placed +into the unknown_months return list.

+
+
Returns:
+

    +
  • known_months (list) – List of known month names.

  • +
  • unknown_months (list) – List of unknown month names.

  • +
+

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.utils.format_month_name.html b/_autosummary/reV.losses.utils.format_month_name.html new file mode 100644 index 000000000..6adef8839 --- /dev/null +++ b/_autosummary/reV.losses.utils.format_month_name.html @@ -0,0 +1,658 @@ + + + + + + + reV.losses.utils.format_month_name — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.utils.format_month_name

+
+
+format_month_name(month_name)[source]
+

Format a month name to match the names in the calendar module.

+

In particular, any extra spaces at the beginning or end of the +string are stripped, and the name is converted to a title (first +letter is uppercase).

+
+
Parameters:
+

month_name (str) – Name of month.

+
+
Returns:
+

str – Name of month, formatted to match the month names in the +calendar module.

+
+
+

Examples

+
>>> format_month_name("June")
+"June"
+>>> format_month_name("aprIl")
+"April"
+>>> format_month_name(" aug  ")
+"Aug"
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.utils.full_month_name_from_abbr.html b/_autosummary/reV.losses.utils.full_month_name_from_abbr.html new file mode 100644 index 000000000..79506793e --- /dev/null +++ b/_autosummary/reV.losses.utils.full_month_name_from_abbr.html @@ -0,0 +1,674 @@ + + + + + + + reV.losses.utils.full_month_name_from_abbr — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.utils.full_month_name_from_abbr

+
+
+full_month_name_from_abbr(month_name)[source]
+

Convert a month abbreviation to a full month name.

+
+
Parameters:
+

month_name (str) –

+

Abbreviated month name. Must be one of:

+
+
    +
  • “Jan”

  • +
  • “Feb”

  • +
  • “Mar”

  • +
  • “Apr”

  • +
  • “May”

  • +
  • “Jun”

  • +
  • “Jul”

  • +
  • “Aug”

  • +
  • “Sep”

  • +
  • “Oct”

  • +
  • “Nov”

  • +
  • “Dec”

  • +
+
+

If the input does not match one of these, this function returns +None.

+
+
Returns:
+

str | None – Unabbreviated month name, or None if input abbreviation +is not understood.

+
+
+

Examples

+
>>> full_month_name_from_abbr("Jun")
+"June"
+>>> full_month_name_from_abbr("June") is None
+True
+>>> full_month_name_from_abbr('Abcdef') is None
+True
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.utils.hourly_indices_for_months.html b/_autosummary/reV.losses.utils.hourly_indices_for_months.html new file mode 100644 index 000000000..9c93dcd89 --- /dev/null +++ b/_autosummary/reV.losses.utils.hourly_indices_for_months.html @@ -0,0 +1,653 @@ + + + + + + + reV.losses.utils.hourly_indices_for_months — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.utils.hourly_indices_for_months

+
+
+hourly_indices_for_months(month_names)[source]
+

Convert month names into a list of hourly indices.

+

Given a list of month names, this function will return a list +of indices such that any index value corresponds to an hour within +the input months.

+
+
Parameters:
+

month_names (iter) – An iterable of month names for the desired starting indices. +The month names must match the formatting in +calendar.month_name (upper case, no extra whitespace), +otherwise their hourly indices will not be included in the +output.

+
+
Returns:
+

list – A list of hourly index values such that any index corresponds to +an hour within the input months.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.utils.html b/_autosummary/reV.losses.utils.html new file mode 100644 index 000000000..458f30905 --- /dev/null +++ b/_autosummary/reV.losses.utils.html @@ -0,0 +1,657 @@ + + + + + + + reV.losses.utils — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.utils

+

reV-losses utilities.

+

Functions

+ + + + + + + + + + + + + + + + + + + + + + + + +

convert_to_full_month_names(month_names)

Format an iterable of month names to match those in calendar.

filter_unknown_month_names(month_names)

Split the input into known and unknown month names.

format_month_name(month_name)

Format a month name to match the names in the calendar module.

full_month_name_from_abbr(month_name)

Convert a month abbreviation to a full month name.

hourly_indices_for_months(month_names)

Convert month names into a list of hourly indices.

month_index(month_name)

Convert a month name (as string) to an index (0-11) of the month.

month_indices(month_names)

Convert input month names to an indices (0-11) of the months.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.utils.month_index.html b/_autosummary/reV.losses.utils.month_index.html new file mode 100644 index 000000000..9ad9989bf --- /dev/null +++ b/_autosummary/reV.losses.utils.month_index.html @@ -0,0 +1,659 @@ + + + + + + + reV.losses.utils.month_index — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.utils.month_index

+
+
+month_index(month_name)[source]
+

Convert a month name (as string) to an index (0-11) of the month.

+
+
Parameters:
+

month_name (str) – Name of month to corresponding to desired index. This input +must match the formatting in calendar.month_name +(upper case, no extra whitespace).

+
+
Returns:
+

int – The 0-index of the month, or -1 if the month name is not +understood.

+
+
+

Examples

+
>>> month_index("June")
+5
+>>> month_index("July")
+6
+>>> month_index("Jun")
+-1
+>>> month_index("july")
+-1
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.losses.utils.month_indices.html b/_autosummary/reV.losses.utils.month_indices.html new file mode 100644 index 000000000..25505e1c4 --- /dev/null +++ b/_autosummary/reV.losses.utils.month_indices.html @@ -0,0 +1,649 @@ + + + + + + + reV.losses.utils.month_indices — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.losses.utils.month_indices

+
+
+month_indices(month_names)[source]
+

Convert input month names to an indices (0-11) of the months.

+
+
Parameters:
+

month_names (iter) – An iterable of month names for the desired starting indices. +The month names must match the formatting in +calendar.month_name (upper case, no extra whitespace), +otherwise their index will not be included in the output.

+
+
Returns:
+

set – A set of month indices for the input month names. Unknown +month indices (-1) are removed.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.nrwal.cli_nrwal.html b/_autosummary/reV.nrwal.cli_nrwal.html new file mode 100644 index 000000000..82f6175c2 --- /dev/null +++ b/_autosummary/reV.nrwal.cli_nrwal.html @@ -0,0 +1,631 @@ + + + + + + + reV.nrwal.cli_nrwal — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.nrwal.cli_nrwal

+

reV-NRWAL module CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.nrwal.html b/_autosummary/reV.nrwal.html new file mode 100644 index 000000000..8ec5da8ff --- /dev/null +++ b/_autosummary/reV.nrwal.html @@ -0,0 +1,640 @@ + + + + + + + reV.nrwal — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.nrwal

+

reV offshore wind econ and generation analysis module

+ + + + + + + + + +

reV.nrwal.cli_nrwal

reV-NRWAL module CLI utility functions.

reV.nrwal.nrwal

reV-NRWAL analysis module.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.nrwal.nrwal.RevNrwal.html b/_autosummary/reV.nrwal.nrwal.RevNrwal.html new file mode 100644 index 000000000..dc50dd444 --- /dev/null +++ b/_autosummary/reV.nrwal.nrwal.RevNrwal.html @@ -0,0 +1,948 @@ + + + + + + + reV.nrwal.nrwal.RevNrwal — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.nrwal.nrwal.RevNrwal

+
+
+class RevNrwal(gen_fpath, site_data, sam_files, nrwal_configs, output_request, save_raw=True, meta_gid_col='gid', site_meta_cols=None)[source]
+

Bases: object

+

Framework to handle reV-NRWAL analysis.

+

reV NRWAL analysis runs reV data through the NRWAL +compute library. Everything in this module operates on the +spatiotemporal resolution of the reV generation output file +(usually the wind or solar resource resolution but could also be +the supply curve resolution after representative profiles is +run).

+
+
Parameters:
+
    +
  • gen_fpath (str) – Full filepath to HDF5 file with reV generation or +rep_profiles output. Anything in the output_request input +is added to and/or manipulated within this file.

    +
    +

    Note

    +

    If executing reV from the command line, this +input can also be "PIPELINE" to parse the output of +one of the previous step and use it as input to this call. +However, note that duplicate executions of reV +commands prior to this one within the pipeline may +invalidate this parsing, meaning the gen_fpath input +will have to be specified manually.

    +
    +
  • +
  • site_data (str | pd.DataFrame) – Site-specific input data for NRWAL calculation.If this input +is a string, it should be a path that points to a CSV file. +Otherwise, this input should be a DataFrame with +pre-extracted site data. Rows in this table should match +the meta_gid_col in the gen_fpath meta data input +sites via a gid column. A config column must also be +provided that corresponds to the nrwal_configs input. Only +sites with a gid in this file’s gid column will be run +through NRWAL.

  • +
  • sam_files (dict | str) – A dictionary mapping SAM input configuration ID(s) to SAM +configuration(s). Keys are the SAM config ID(s) which +correspond to the keys in the nrwal_configs input. Values +for each key are either a path to a corresponding SAM +config file or a full dictionary of SAM config inputs. For +example:

    +
    sam_files = {
    +    "default": "/path/to/default/sam.json",
    +    "onshore": "/path/to/onshore/sam_config.yaml",
    +    "offshore": {
    +        "sam_key_1": "sam_value_1",
    +        "sam_key_2": "sam_value_2",
    +        ...
    +    },
    +    ...
    +}
    +
    +
    +

    This input can also be a string pointing to a single SAM +config file. In this case, the config column of the +CSV points input should be set to None or left out +completely. See the documentation for the reV SAM class +(e.g. reV.SAM.generation.WindPower, +reV.SAM.generation.PvWattsv8, +reV.SAM.generation.Geothermal, etc.) for +documentation on the allowed and/or required SAM config file +inputs.

    +
  • +
  • nrwal_configs (dict) – A dictionary mapping SAM input configuration ID(s) to NRWAL +configuration(s). Keys are the SAM config ID(s) which +correspond to the keys in the sam_files input. Values +for each key are either a path to a corresponding NRWAL YAML +or JSON config file or a full dictionary of NRWAL config +inputs. For example:

    +
    nrwal_configs = {
    +    "default": "/path/to/default/nrwal.json",
    +    "onshore": "/path/to/onshore/nrwal_config.yaml",
    +    "offshore": {
    +        "nrwal_key_1": "nrwal_value_1",
    +        "nrwal_key_2": "nrwal_value_2",
    +        ...
    +    },
    +    ...
    +}
    +
    +
    +
  • +
  • output_request (list | tuple) – List of output dataset names to be written to the +gen_fpath file. Any key from the NRWAL configs or any of +the inputs (site_data or sam_files) is available to be +exported as an output dataset. If you want to manipulate a +dset like cf_mean from gen_fpath and include it in the +output_request, you should set save_raw=True and then +use cf_mean_raw in the NRWAL equations as the input. +This allows you to define an equation in the NRWAL configs +for a manipulated cf_mean output that can be included in +the output_request list.

  • +
  • save_raw (bool, optional) – Flag to save an initial (“raw”) copy of input datasets from +gen_fpath that are also part of the output_request. For +example, if you request cf_mean in output_request but +also manipulate the cf_mean dataset in the NRWAL +equations, the original cf_mean will be archived under +the cf_mean_raw dataset in gen_fpath. +By default, True.

  • +
  • meta_gid_col (str, optional) – Column label in the source meta data from gen_fpath that +contains the unique gid identifier. This will be joined to +the site_data gid column. By default, "gid".

  • +
  • site_meta_cols (list | tuple, optional) – Column labels from site_data to be added to the meta data +table in gen_fpath. If None, only the columns in +DEFAULT_META_COLS will be added. Any columns +requested via this input will be considered in addition to +the DEFAULT_META_COLS. By default, None.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + +

check_outputs()

Check the nrwal outputs for nan values and raise errors if found.

run([csv_output, out_fpath])

Run NRWAL analysis.

run_nrwal()

Run analysis via the NRWAL analysis library

save_raw_dsets()

If requested by save_raw=True, archive raw datasets that exist in the gen_fpath file and are also requested in the output_request

write_meta_to_csv([out_fpath])

Combine NRWAL outputs with meta and write to output csv.

write_to_gen_fpath()

Save NRWAL outputs to input generation fpath file.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

DEFAULT_META_COLS

Columns from the site_data table to join to the output meta data

analysis_gids

Get an array of gids from the source generation meta data that are to-be analyzed by nrwal.

analysis_mask

Get a boolean array to mask the source generation meta data where True is sites that are to be analyzed by NRWAL.

gen_dsets

Get the available datasets from the gen source file

meta_out

Get the combined onshore and offshore meta data.

meta_source

Get the full meta data (onshore + offshore)

outputs

Get a dict of NRWAL outputs.

time_index

Get the source time index.

+
+
+DEFAULT_META_COLS = ('config',)
+

Columns from the site_data table to join to the output meta data

+
+ +
+
+property time_index
+

Get the source time index.

+
+ +
+
+property gen_dsets
+

Get the available datasets from the gen source file

+
+ +
+
+property meta_source
+

Get the full meta data (onshore + offshore)

+
+ +
+
+property meta_out
+

Get the combined onshore and offshore meta data.

+
+ +
+
+property analysis_mask
+

Get a boolean array to mask the source generation meta data where +True is sites that are to be analyzed by NRWAL.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property analysis_gids
+

Get an array of gids from the source generation meta data that are +to-be analyzed by nrwal.

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property outputs
+

Get a dict of NRWAL outputs. Only active analysis sites will have +data in the output, sites that were not found in the site_data “gid” +column will not have data in these output arrays

+
+ +
+
+run_nrwal()[source]
+

Run analysis via the NRWAL analysis library

+
+ +
+
+check_outputs()[source]
+

Check the nrwal outputs for nan values and raise errors if found.

+
+ +
+
+save_raw_dsets()[source]
+

If requested by save_raw=True, archive raw datasets that exist in +the gen_fpath file and are also requested in the output_request

+
+ +
+
+write_to_gen_fpath()[source]
+

Save NRWAL outputs to input generation fpath file.

+
+
Returns:
+

str – Path to output file.

+
+
+
+ +
+
+write_meta_to_csv(out_fpath=None)[source]
+

Combine NRWAL outputs with meta and write to output csv.

+
+
Parameters:
+

out_fpath (str, optional) – Full path to output NRWAL CSV file. The file path does not +need to include file ending - it will be added automatically +if missing. If None, the generation HDF5 filepath will +be converted to a CSV out path by replacing the “.h5” file +ending with “.csv”. By default, None.

+
+
Returns:
+

str – Path to output file.

+
+
+
+ +
+
+run(csv_output=False, out_fpath=None)[source]
+

Run NRWAL analysis.

+
+
Parameters:
+
    +
  • csv_output (bool, optional) – Option to write H5 file meta + all requested outputs to +CSV file instead of storing in the HDF5 file directly. This +can be useful if the same HDF5 file is used for multiple +sets of NRWAL runs. Note that all requested output datasets +must be 1-dimensional in order to fir within the CSV output.

    +
    +

    Important

    +

    This option is not compatible with +save_raw=True. If you set csv_output=True, then +the save_raw option is forced to be False. +Therefore, make sure that you do not have any references +to “input_dataset_name_raw” in your NRWAL config. If you +need to manipulate an input dataset, save it to a +different output name in the NRWAL config or manually add +an “input_dataset_name_raw” dataset to your generation +HDF5 file before running NRWAL.

    +
    +

    By default, False.

    +
  • +
  • out_fpath (str, optional) – This option has no effect if csv_output=False. +Otherwise, this should be the full path to output NRWAL CSV +file. The file path does not need to include file ending - +it will be added automatically if missing. If None, the +generation HDF5 filepath will be converted to a CSV out path +by replacing the “.h5” file ending with “.csv”. +By default, None.

  • +
+
+
Returns:
+

str – Path to output file.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.nrwal.nrwal.html b/_autosummary/reV.nrwal.nrwal.html new file mode 100644 index 000000000..4007bb455 --- /dev/null +++ b/_autosummary/reV.nrwal.nrwal.html @@ -0,0 +1,645 @@ + + + + + + + reV.nrwal.nrwal — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.nrwal.nrwal

+

reV-NRWAL analysis module.

+

This module runs reV data through the NRWAL compute library. This code was +first developed to use a custom offshore wind LCOE equation library but has +since been refactored to analyze any equation library in NRWAL.

+

Everything in this module operates on the spatiotemporal resolution of the reV +generation output file. This is usually the wind or solar resource resolution +but could be the supply curve resolution after representative profiles is run.

+

Classes

+ + + + + + +

RevNrwal(gen_fpath, site_data, sam_files, ...)

Framework to handle reV-NRWAL analysis.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.cli_qa_qc.cli_qa_qc.html b/_autosummary/reV.qa_qc.cli_qa_qc.cli_qa_qc.html new file mode 100644 index 000000000..eadad7e2c --- /dev/null +++ b/_autosummary/reV.qa_qc.cli_qa_qc.cli_qa_qc.html @@ -0,0 +1,657 @@ + + + + + + + reV.qa_qc.cli_qa_qc.cli_qa_qc — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.cli_qa_qc.cli_qa_qc

+
+
+cli_qa_qc(modules, out_dir, max_workers=None)[source]
+

Run QA/QC on reV outputs

+

reV QA/QC performs quality assurance checks on reV output +data. Users can specify the type of QA/QC that should be applied +to each reV module.

+
+
Parameters:
+
    +
  • modules (dict) – Dictionary of modules to QA/QC. Keys should be the names of the +modules to QA/QC. The values are dictionaries that represent the +config for the respective QA/QC step. Allowed config keys for +QA/QC are the “property” attributes of +QaQcModule.

  • +
  • out_dir (str) – Path to output directory.

  • +
  • max_workers (int, optional) – Max number of workers to run for QA/QA. If None, uses all +CPU cores. By default, None.

  • +
+
+
Raises:
+

ValueError – If fpath is not an H5 or CSV file.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.cli_qa_qc.html b/_autosummary/reV.qa_qc.cli_qa_qc.html new file mode 100644 index 000000000..421265a4a --- /dev/null +++ b/_autosummary/reV.qa_qc.cli_qa_qc.html @@ -0,0 +1,639 @@ + + + + + + + reV.qa_qc.cli_qa_qc — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.cli_qa_qc

+

QA/QC CLI utility functions.

+

Functions

+ + + + + + +

cli_qa_qc(modules, out_dir[, max_workers])

Run QA/QC on reV outputs

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.html b/_autosummary/reV.qa_qc.html new file mode 100644 index 000000000..68c2aeafd --- /dev/null +++ b/_autosummary/reV.qa_qc.html @@ -0,0 +1,643 @@ + + + + + + + reV.qa_qc — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc

+

reV quality assurance and control module

+ + + + + + + + + + + + +

reV.qa_qc.cli_qa_qc

QA/QC CLI utility functions.

reV.qa_qc.qa_qc

reV quality assurance and control classes

reV.qa_qc.summary

Compute and plot summary data

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.qa_qc.QaQc.html b/_autosummary/reV.qa_qc.qa_qc.QaQc.html new file mode 100644 index 000000000..eaff58e85 --- /dev/null +++ b/_autosummary/reV.qa_qc.qa_qc.QaQc.html @@ -0,0 +1,761 @@ + + + + + + + reV.qa_qc.qa_qc.QaQc — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.qa_qc.QaQc

+
+
+class QaQc(out_dir)[source]
+

Bases: object

+

reV QA/QC

+
+
Parameters:
+

out_dir (str) – Directory path to save summary data and plots too

+
+
+

Methods

+ + + + + + + + + + + + + + + +

create_scatter_plots([plot_type, cmap])

Create scatter plot for all compatible summary .csv files

exclusions_mask(excl_h5, out_dir[, ...])

Create inclusion mask from given layers dictionary, dump to disk and plot

h5(h5_file, out_dir[, dsets, group, ...])

Run QA/QC by computing summary stats from dsets in h5_file and plotting scatters plots of compatible summary stats

supply_curve(sc_table, out_dir[, columns, ...])

Plot supply curve

+

Attributes

+ + + + + + +

out_dir

Output directory

+
+
+property out_dir
+

Output directory

+
+
Returns:
+

str

+
+
+
+ +
+
+create_scatter_plots(plot_type='plotly', cmap='viridis', **kwargs)[source]
+

Create scatter plot for all compatible summary .csv files

+
+
Parameters:
+
    +
  • plot_type (str, optional) – plot_type of plot to create ‘plot’ or ‘plotly’, by default ‘plotly’

  • +
  • cmap (str, optional) – Colormap name, by default ‘viridis’

  • +
  • kwargs (dict) – Additional plotting kwargs

  • +
+
+
+
+ +
+
+classmethod h5(h5_file, out_dir, dsets=None, group=None, process_size=None, max_workers=None, plot_type='plotly', cmap='viridis', **kwargs)[source]
+

Run QA/QC by computing summary stats from dsets in h5_file and +plotting scatters plots of compatible summary stats

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 file to run QA/QC on

  • +
  • out_dir (str) – Directory path to save summary tables and plots too

  • +
  • dsets (str | list, optional) – Datasets to summarize, by default None

  • +
  • group (str, optional) – Group within h5_file to summarize datasets for, by default None

  • +
  • process_size (int, optional) – Number of sites to process at a time, by default None

  • +
  • max_workers (int, optional) – Number of workers to use when summarizing 2D datasets, +by default None

  • +
  • plot_type (str, optional) – plot_type of plot to create ‘plot’ or ‘plotly’, by default ‘plotly’

  • +
  • cmap (str, optional) – Colormap name, by default ‘viridis’

  • +
  • kwargs (dict) – Additional plotting kwargs

  • +
+
+
+
+ +
+
+classmethod supply_curve(sc_table, out_dir, columns=None, lcoe='mean_lcoe', plot_type='plotly', cmap='viridis', sc_plot_kwargs=None, scatter_plot_kwargs=None)[source]
+

Plot supply curve

+
+
Parameters:
+
    +
  • sc_table (str) – Path to .csv file containing supply curve table

  • +
  • out_dir (str) – Directory path to save summary tables and plots too

  • +
  • columns (str | list, optional) – Column(s) to summarize, if None summarize all numeric columns, +by default None

  • +
  • lcoe (str, optional) – LCOE value to plot, by default ‘mean_lcoe’

  • +
  • plot_type (str, optional) – plot_type of plot to create ‘plot’ or ‘plotly’, by default ‘plotly’

  • +
  • cmap (str, optional) – Colormap name, by default ‘viridis’

  • +
  • sc_plot_kwargs (dict, optional) – Kwargs for supply curve plot, by default None

  • +
  • scatter_plot_kwargs (dict) – Kwargs for scatter plot, by default None

  • +
+
+
+
+ +
+
+classmethod exclusions_mask(excl_h5, out_dir, layers_dict=None, min_area=None, kernel='queen', hsds=False, plot_type='plotly', cmap='viridis', plot_step=100, **kwargs)[source]
+

Create inclusion mask from given layers dictionary, dump to disk and +plot

+
+
Parameters:
+
    +
  • excl_h5 (str) – Path to exclusions .h5 file

  • +
  • layers_dict (dict | NoneType) – Dictionary of LayerMask arugments {layer: {kwarg: value}}

  • +
  • min_area (float | NoneType) – Minimum required contiguous area in sq-km

  • +
  • kernel (str) – Contiguous filter method to use on final exclusions

  • +
  • hsds (bool) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS

  • +
  • plot_type (str, optional) – plot_type of plot to create ‘plot’ or ‘plotly’, by default ‘plotly’

  • +
  • cmap (str, optional) – Colormap name, by default ‘viridis’

  • +
  • plot_step (int) – Step between points to plot

  • +
  • kwargs (dict) – Additional plotting kwargs

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.qa_qc.QaQcModule.html b/_autosummary/reV.qa_qc.qa_qc.QaQcModule.html new file mode 100644 index 000000000..0a0f632eb --- /dev/null +++ b/_autosummary/reV.qa_qc.qa_qc.QaQcModule.html @@ -0,0 +1,789 @@ + + + + + + + reV.qa_qc.qa_qc.QaQcModule — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.qa_qc.QaQcModule

+
+
+class QaQcModule(module_name, config, out_root)[source]
+

Bases: object

+

Class to handle Module QA/QC

+
+
Parameters:
+

config (dict) – Dictionary with pre-extracted config input group.

+
+
+

Methods

+ + + +
+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

area_filter_kernel

Get the minimum area filter kernel name ('queen' or 'rook').

cmap

Get the QA/QC plot colormap

columns

Get the supply_curve columns to QA/QC

dsets

Get the reV_h5 dsets to QA/QC

excl_dict

Get the exclusions dictionary

excl_fpath

Get the source exclusions filepath

fpath

Get the reV module output filepath(s)

group

Get the reV_h5 group to QA/QC

lcoe

Get the supply_curve lcoe column to plot

min_area

Get the minimum area filter minimum area in km2.

plot_step

Get the QA/QC step between exclusion mask points to plot

plot_type

either 'plot' or 'plotly'

process_size

Get the reV_h5 process_size for QA/QC

sub_dir

QA/QC sub directory for this module's outputs

+
+
+property fpath
+

Get the reV module output filepath(s)

+
+
Returns:
+

fpaths (str | list) – One or more filepaths output by current module being QA’d

+
+
+
+ +
+
+property sub_dir
+

QA/QC sub directory for this module’s outputs

+
+ +
+
+property plot_type
+

either ‘plot’ or ‘plotly’

+
+
Type:
+

Get the QA/QC plot type

+
+
+
+ +
+
+property dsets
+

Get the reV_h5 dsets to QA/QC

+
+ +
+
+property group
+

Get the reV_h5 group to QA/QC

+
+ +
+
+property process_size
+

Get the reV_h5 process_size for QA/QC

+
+ +
+
+property cmap
+

Get the QA/QC plot colormap

+
+ +
+
+property plot_step
+

Get the QA/QC step between exclusion mask points to plot

+
+ +
+
+property columns
+

Get the supply_curve columns to QA/QC

+
+ +
+
+property lcoe
+

Get the supply_curve lcoe column to plot

+
+ +
+
+property excl_fpath
+

Get the source exclusions filepath

+
+ +
+
+property excl_dict
+

Get the exclusions dictionary

+
+ +
+
+property area_filter_kernel
+

Get the minimum area filter kernel name (‘queen’ or ‘rook’).

+
+ +
+
+property min_area
+

Get the minimum area filter minimum area in km2.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.qa_qc.html b/_autosummary/reV.qa_qc.qa_qc.html new file mode 100644 index 000000000..c4a90de4f --- /dev/null +++ b/_autosummary/reV.qa_qc.qa_qc.html @@ -0,0 +1,642 @@ + + + + + + + reV.qa_qc.qa_qc — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.qa_qc

+

reV quality assurance and control classes

+

Classes

+ + + + + + + + + +

QaQc(out_dir)

reV QA/QC

QaQcModule(module_name, config, out_root)

Class to handle Module QA/QC

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.summary.ExclusionsMask.html b/_autosummary/reV.qa_qc.summary.ExclusionsMask.html new file mode 100644 index 000000000..fee916e28 --- /dev/null +++ b/_autosummary/reV.qa_qc.summary.ExclusionsMask.html @@ -0,0 +1,742 @@ + + + + + + + reV.qa_qc.summary.ExclusionsMask — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.summary.ExclusionsMask

+
+
+class ExclusionsMask(excl_mask)[source]
+

Bases: PlotBase

+

Plot Exclusions mask as a heat map data for QA/QC

+
+
Parameters:
+

excl_mask (str | ndarray) – Exclusions mask or path to .npy file containing final mask

+
+
+

Methods

+ + + + + + + + + + + + +

exclusions_plot([cmap, plot_step, out_path])

Plot exclusions mask as a seaborn heatmap

exclusions_plotly([cmap, plot_step, out_path])

Plot exclusions mask as a plotly heatmap

plot(mask, out_dir[, plot_type, cmap, plot_step])

Plot exclusions mask and save to out_dir

+

Attributes

+ + + + + + + + + +

data

Data to plot

mask

Final Exclusions mask

+
+
+property mask
+

Final Exclusions mask

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+exclusions_plot(cmap='Viridis', plot_step=100, out_path=None, **kwargs)[source]
+

Plot exclusions mask as a seaborn heatmap

+
+
Parameters:
+
    +
  • cmap (str | px.color, optional) – Continuous color scale to use, by default ‘Viridis’

  • +
  • plot_step (int) – Step between points to plot

  • +
  • out_path (str, optional) – File path to save plot to, can be a .html or static image, +by default None

  • +
  • kwargs (dict) – Additional kwargs for plotting.colormaps.heatmap_plot

  • +
+
+
+
+ +
+
+exclusions_plotly(cmap='Viridis', plot_step=100, out_path=None, **kwargs)[source]
+

Plot exclusions mask as a plotly heatmap

+
+
Parameters:
+
    +
  • cmap (str | px.color, optional) – Continuous color scale to use, by default ‘Viridis’

  • +
  • plot_step (int) – Step between points to plot

  • +
  • out_path (str, optional) – File path to save plot to, can be a .html or static image, +by default None

  • +
  • kwargs (dict) – Additional kwargs for plotly.express.imshow

  • +
+
+
+
+ +
+
+classmethod plot(mask, out_dir, plot_type='plotly', cmap='Viridis', plot_step=100, **kwargs)[source]
+

Plot exclusions mask and save to out_dir

+
+
Parameters:
+
    +
  • mask (ndarray) – ndarray of final exclusions mask

  • +
  • out_dir (str) – Output directory to save plots to

  • +
  • plot_type (str, optional) – plot_type of plot to create ‘plot’ or ‘plotly’, by default ‘plotly’

  • +
  • cmap (str, optional) – Colormap name, by default ‘viridis’

  • +
  • plot_step (int) – Step between points to plot

  • +
  • kwargs (dict) – Additional plotting kwargs

  • +
+
+
+
+ +
+
+property data
+

Data to plot

+
+
Returns:
+

pandas.DataFrame | ndarray

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.summary.PlotBase.html b/_autosummary/reV.qa_qc.summary.PlotBase.html new file mode 100644 index 000000000..5f73d52df --- /dev/null +++ b/_autosummary/reV.qa_qc.summary.PlotBase.html @@ -0,0 +1,667 @@ + + + + + + + reV.qa_qc.summary.PlotBase — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.summary.PlotBase

+
+
+class PlotBase(data)[source]
+

Bases: object

+

QA/QC Plotting base class

+
+
Parameters:
+

data (str | pandas.DataFrame | ndarray) – data to plot or file containing data to plot

+
+
+

Methods

+ + + +
+

Attributes

+ + + + + + +

data

Data to plot

+
+
+property data
+

Data to plot

+
+
Returns:
+

pandas.DataFrame | ndarray

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.summary.SummarizeH5.html b/_autosummary/reV.qa_qc.summary.SummarizeH5.html new file mode 100644 index 000000000..21e532ff6 --- /dev/null +++ b/_autosummary/reV.qa_qc.summary.SummarizeH5.html @@ -0,0 +1,733 @@ + + + + + + + reV.qa_qc.summary.SummarizeH5 — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.summary.SummarizeH5

+
+
+class SummarizeH5(h5_file, group=None)[source]
+

Bases: object

+

reV Summary data for QA/QC

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 file to summarize data from

  • +
  • group (str, optional) – Group within h5_file to summarize datasets for, by default None

  • +
+
+
+

Methods

+ + + + + + + + + + + + +

run(h5_file, out_dir[, group, dsets, ...])

Summarize all datasets in h5_file and dump to out_dir

summarize_dset(ds_name[, process_size, ...])

Compute dataset summary.

summarize_means([out_path])

Add means datasets to meta data

+

Attributes

+ + + + + + +

h5_file

.h5 file path

+
+
+property h5_file
+

.h5 file path

+
+
Returns:
+

str

+
+
+
+ +
+
+summarize_dset(ds_name, process_size=None, max_workers=None, out_path=None)[source]
+

Compute dataset summary. If dataset is 2D compute temporal statistics +for each site

+
+
Parameters:
+
    +
  • ds_name (str) – Dataset name of interest

  • +
  • process_size (int, optional) – Number of sites to process at a time, by default None

  • +
  • max_workers (int, optional) – Number of workers to use in parallel, if 1 run in serial, +if None use all available cores, by default None

  • +
  • out_path (str) – File path to save summary to

  • +
+
+
Returns:
+

summary (pandas.DataFrame) – Summary summary for dataset

+
+
+
+ +
+
+summarize_means(out_path=None)[source]
+

Add means datasets to meta data

+
+
Parameters:
+

out_path (str, optional) – Path to .csv file to save update meta data to, by default None

+
+
Returns:
+

meta (pandas.DataFrame) – Meta data with means datasets added

+
+
+
+ +
+
+classmethod run(h5_file, out_dir, group=None, dsets=None, process_size=None, max_workers=None)[source]
+

Summarize all datasets in h5_file and dump to out_dir

+
+
Parameters:
+
    +
  • h5_file (str) – Path to .h5 file to summarize data from

  • +
  • out_dir (str) – Directory to dump summary .csv files to

  • +
  • group (str, optional) – Group within h5_file to summarize datasets for, by default None

  • +
  • dsets (str | list, optional) – Datasets to summarize, by default None

  • +
  • process_size (int, optional) – Number of sites to process at a time, by default None

  • +
  • max_workers (int, optional) – Number of workers to use when summarizing 2D datasets, +by default None

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.summary.SummarizeSupplyCurve.html b/_autosummary/reV.qa_qc.summary.SummarizeSupplyCurve.html new file mode 100644 index 000000000..921e1f320 --- /dev/null +++ b/_autosummary/reV.qa_qc.summary.SummarizeSupplyCurve.html @@ -0,0 +1,704 @@ + + + + + + + reV.qa_qc.summary.SummarizeSupplyCurve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.summary.SummarizeSupplyCurve

+
+
+class SummarizeSupplyCurve(sc_table)[source]
+

Bases: object

+

Summarize Supply Curve table

+

Methods

+ + + + + + + + + +

run(sc_table, out_dir[, columns])

Summarize Supply Curve Table and save to disk

supply_curve_summary([columns, out_path])

Summarize Supply Curve Table

+

Attributes

+ + + + + + +

sc_table

Supply Curve table

+
+
+property sc_table
+

Supply Curve table

+
+
Returns:
+

pd.DataFrame

+
+
+
+ +
+
+supply_curve_summary(columns=None, out_path=None)[source]
+

Summarize Supply Curve Table

+
+
Parameters:
+
    +
  • sc_table (str | pandas.DataFrame) – Supply curve table or .csv containing table

  • +
  • columns (str | list, optional) – Column(s) to summarize, if None summarize all numeric columns, +by default None

  • +
  • out_path (str, optional) – Path to .csv to save summary to, by default None

  • +
+
+
Returns:
+

sc_summary (pandas.DataFrame) – Summary statistics (mean, stdev, median, min, max, sum) for +Supply Curve table columns

+
+
+
+ +
+
+classmethod run(sc_table, out_dir, columns=None)[source]
+

Summarize Supply Curve Table and save to disk

+
+
Parameters:
+
    +
  • sc_table (str | pandas.DataFrame) – Path to .csv containing Supply Curve table

  • +
  • out_dir (str) – Directory to dump summary .csv files to

  • +
  • columns (str | list, optional) – Column(s) to summarize, if None summarize all numeric columns, +by default None

  • +
+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.summary.SummaryPlots.html b/_autosummary/reV.qa_qc.summary.SummaryPlots.html new file mode 100644 index 000000000..512d4675d --- /dev/null +++ b/_autosummary/reV.qa_qc.summary.SummaryPlots.html @@ -0,0 +1,815 @@ + + + + + + + reV.qa_qc.summary.SummaryPlots — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.summary.SummaryPlots

+
+
+class SummaryPlots(summary)[source]
+

Bases: PlotBase

+

Plot summary data for QA/QC

+
+
Parameters:
+

summary (str | pandas.DataFrame) – Summary DataFrame or path to summary .csv

+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + +

dist_plot(value[, out_path])

Plot distribution plot of value using seaborn.distplot

dist_plotly(value[, out_path])

Plot histogram of value using plotly

scatter(summary_csv, out_dir, value[, ...])

Create scatter plot for given value in summary table and save to out_dir

scatter_all(summary_csv, out_dir[, ...])

Create scatter plot for all summary stats in summary table and save to out_dir

scatter_plot(value[, cmap, out_path])

Plot scatter plot of value versus longitude and latitude using pandas.plot.scatter

scatter_plotly(value[, cmap, out_path])

Plot scatter plot of value versus longitude and latitude using plotly

+

Attributes

+ + + + + + + + + + + + +

columns

Available columns in summary table

data

Data to plot

summary

Summary table

+
+
+property summary
+

Summary table

+
+
Returns:
+

pandas.DataFrame

+
+
+
+ +
+
+property columns
+

Available columns in summary table

+
+
Returns:
+

list

+
+
+
+ +
+
+scatter_plot(value, cmap='viridis', out_path=None, **kwargs)[source]
+

Plot scatter plot of value versus longitude and latitude using +pandas.plot.scatter

+
+
Parameters:
+
    +
  • value (str) – Column name to plot as color

  • +
  • cmap (str, optional) – Matplotlib colormap name, by default ‘viridis’

  • +
  • out_path (str, optional) – File path to save plot to, by default None

  • +
  • kwargs (dict) – Additional kwargs for plotting.dataframes.df_scatter

  • +
+
+
+
+ +
+
+scatter_plotly(value, cmap='Viridis', out_path=None, **kwargs)[source]
+

Plot scatter plot of value versus longitude and latitude using +plotly

+
+
Parameters:
+
    +
  • value (str) – Column name to plot as color

  • +
  • cmap (str | px.color, optional) – Continuous color scale to use, by default ‘Viridis’

  • +
  • out_path (str, optional) – File path to save plot to, can be a .html or static image, +by default None

  • +
  • kwargs (dict) – Additional kwargs for plotly.express.scatter

  • +
+
+
+
+ +
+
+dist_plot(value, out_path=None, **kwargs)[source]
+

Plot distribution plot of value using seaborn.distplot

+
+
Parameters:
+
    +
  • value (str) – Column name to plot

  • +
  • out_path (str, optional) – File path to save plot to, by default None

  • +
  • kwargs (dict) – Additional kwargs for plotting.dataframes.dist_plot

  • +
+
+
+
+ +
+
+dist_plotly(value, out_path=None, **kwargs)[source]
+

Plot histogram of value using plotly

+
+
Parameters:
+
    +
  • value (str) – Column name to plot

  • +
  • out_path (str, optional) – File path to save plot to, by default None

  • +
  • kwargs (dict) – Additional kwargs for plotly.express.histogram

  • +
+
+
+
+ +
+
+classmethod scatter(summary_csv, out_dir, value, plot_type='plotly', cmap='viridis', **kwargs)[source]
+

Create scatter plot for given value in summary table and save to +out_dir

+
+
Parameters:
+
    +
  • summary_csv (str) – Path to .csv file containing summary table

  • +
  • out_dir (str) – Output directory to save plots to

  • +
  • value (str) – Column name to plot as color

  • +
  • plot_type (str, optional) – plot_type of plot to create ‘plot’ or ‘plotly’, by default ‘plotly’

  • +
  • cmap (str, optional) – Colormap name, by default ‘viridis’

  • +
  • kwargs (dict) – Additional plotting kwargs

  • +
+
+
+
+ +
+
+classmethod scatter_all(summary_csv, out_dir, plot_type='plotly', cmap='viridis', **kwargs)[source]
+

Create scatter plot for all summary stats in summary table and save to +out_dir

+
+
Parameters:
+
    +
  • summary_csv (str) – Path to .csv file containing summary table

  • +
  • out_dir (str) – Output directory to save plots to

  • +
  • plot_type (str, optional) – plot_type of plot to create ‘plot’ or ‘plotly’, by default ‘plotly’

  • +
  • cmap (str, optional) – Colormap name, by default ‘viridis’

  • +
  • kwargs (dict) – Additional plotting kwargs

  • +
+
+
+
+ +
+
+property data
+

Data to plot

+
+
Returns:
+

pandas.DataFrame | ndarray

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.summary.SupplyCurvePlot.html b/_autosummary/reV.qa_qc.summary.SupplyCurvePlot.html new file mode 100644 index 000000000..b3676eb02 --- /dev/null +++ b/_autosummary/reV.qa_qc.summary.SupplyCurvePlot.html @@ -0,0 +1,753 @@ + + + + + + + reV.qa_qc.summary.SupplyCurvePlot — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.summary.SupplyCurvePlot

+
+
+class SupplyCurvePlot(sc_table)[source]
+

Bases: PlotBase

+

Plot supply curve data for QA/QC

+
+
Parameters:
+

sc_table (str | pandas.DataFrame) – Supply curve table or path to supply curve .csv

+
+
+

Methods

+ + + + + + + + + + + + +

plot(sc_table, out_dir[, plot_type, lcoe])

Create supply curve plot from supply curve table using lcoe value and save to out_dir

supply_curve_plot([lcoe, out_path])

Plot supply curve (cumulative capacity vs lcoe) using seaborn.scatter

supply_curve_plotly([lcoe, out_path])

Plot supply curve (cumulative capacity vs lcoe) using plotly

+

Attributes

+ + + + + + + + + + + + +

columns

Available columns in supply curve table

data

Data to plot

sc_table

Supply curve table

+
+
+property sc_table
+

Supply curve table

+
+
Returns:
+

pandas.DataFrame

+
+
+
+ +
+
+property columns
+

Available columns in supply curve table

+
+
Returns:
+

list

+
+
+
+ +
+
+supply_curve_plot(lcoe='mean_lcoe', out_path=None, **kwargs)[source]
+

Plot supply curve (cumulative capacity vs lcoe) using seaborn.scatter

+
+
Parameters:
+
    +
  • lcoe (str, optional) – LCOE value to plot, by default ‘mean_lcoe’

  • +
  • out_path (str, optional) – File path to save plot to, by default None

  • +
  • kwargs (dict) – Additional kwargs for plotting.dataframes.df_scatter

  • +
+
+
+
+ +
+
+supply_curve_plotly(lcoe='mean_lcoe', out_path=None, **kwargs)[source]
+

Plot supply curve (cumulative capacity vs lcoe) using plotly

+
+
Parameters:
+
    +
  • lcoe (str, optional) – LCOE value to plot, by default ‘mean_lcoe’

  • +
  • out_path (str, optional) – File path to save plot to, can be a .html or static image, +by default None

  • +
  • kwargs (dict) – Additional kwargs for plotly.express.scatter

  • +
+
+
+
+ +
+
+classmethod plot(sc_table, out_dir, plot_type='plotly', lcoe='mean_lcoe', **kwargs)[source]
+

Create supply curve plot from supply curve table using lcoe value +and save to out_dir

+
+
Parameters:
+
    +
  • sc_table (str) – Path to .csv file containing Supply Curve table

  • +
  • out_dir (str) – Output directory to save plots to

  • +
  • plot_type (str, optional) – plot_type of plot to create ‘plot’ or ‘plotly’, by default ‘plotly’

  • +
  • lcoe (str, optional) – LCOE value to plot, by default ‘mean_lcoe’

  • +
  • kwargs (dict) – Additional plotting kwargs

  • +
+
+
+
+ +
+
+property data
+

Data to plot

+
+
Returns:
+

pandas.DataFrame | ndarray

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.qa_qc.summary.html b/_autosummary/reV.qa_qc.summary.html new file mode 100644 index 000000000..d0666e6fd --- /dev/null +++ b/_autosummary/reV.qa_qc.summary.html @@ -0,0 +1,654 @@ + + + + + + + reV.qa_qc.summary — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.qa_qc.summary

+

Compute and plot summary data

+

Classes

+ + + + + + + + + + + + + + + + + + + + + +

ExclusionsMask(excl_mask)

Plot Exclusions mask as a heat map data for QA/QC

PlotBase(data)

QA/QC Plotting base class

SummarizeH5(h5_file[, group])

reV Summary data for QA/QC

SummarizeSupplyCurve(sc_table)

Summarize Supply Curve table

SummaryPlots(summary)

Plot summary data for QA/QC

SupplyCurvePlot(sc_table)

Plot supply curve data for QA/QC

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.rep_profiles.cli_rep_profiles.html b/_autosummary/reV.rep_profiles.cli_rep_profiles.html new file mode 100644 index 000000000..51f55c744 --- /dev/null +++ b/_autosummary/reV.rep_profiles.cli_rep_profiles.html @@ -0,0 +1,631 @@ + + + + + + + reV.rep_profiles.cli_rep_profiles — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.rep_profiles.cli_rep_profiles

+

reV Representative Profiles CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.rep_profiles.html b/_autosummary/reV.rep_profiles.html new file mode 100644 index 000000000..b5786ac6f --- /dev/null +++ b/_autosummary/reV.rep_profiles.html @@ -0,0 +1,640 @@ + + + + + + + reV.rep_profiles — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.rep_profiles

+

reV representative profile extraction tool,

+ + + + + + + + + +

reV.rep_profiles.cli_rep_profiles

reV Representative Profiles CLI utility functions.

reV.rep_profiles.rep_profiles

Representative profile extraction utilities.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.rep_profiles.rep_profiles.RegionRepProfile.html b/_autosummary/reV.rep_profiles.rep_profiles.RegionRepProfile.html new file mode 100644 index 000000000..762574700 --- /dev/null +++ b/_autosummary/reV.rep_profiles.rep_profiles.RegionRepProfile.html @@ -0,0 +1,780 @@ + + + + + + + reV.rep_profiles.rep_profiles.RegionRepProfile — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.rep_profiles.rep_profiles.RegionRepProfile

+
+
+class RegionRepProfile(gen_fpath, rev_summary, cf_dset='cf_profile', rep_method='meanoid', err_method='rmse', weight='gid_counts', n_profiles=1)[source]
+

Bases: object

+

Framework to handle rep profile for one resource region

+
+
Parameters:
+
    +
  • gen_fpath (str) – Filepath to reV gen output file to extract “cf_profile” from.

  • +
  • rev_summary (pd.DataFrame) – Aggregated rev supply curve summary file trimmed to just one +region to get a rep profile for. +Must include “res_gids”, “gen_gids”, and the “weight” column (if +weight is not None)

  • +
  • cf_dset (str) – Dataset name to pull generation profiles from.

  • +
  • rep_method (str) – Method identifier for calculation of the representative profile.

  • +
  • err_method (str | None) – Method identifier for calculation of error from the representative +profile (e.g. “rmse”, “mae”, “mbe”). If this is None, the +representative meanoid / medianoid profile will be returned +directly

  • +
  • weight (str | None) – Column in rev_summary used to apply weighted mean to profiles. +The supply curve table data in the weight column should have +weight values corresponding to the res_gids in the same row.

  • +
  • n_profiles (int) – Number of representative profiles to retrieve.

  • +
+
+
+

Methods

+ + + + + + +

get_region_rep_profile(gen_fpath, rev_summary)

Class method for parallelization of rep profile calc.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

GEN_GID_COL

RES_GID_COL

i_reps

Get the representative profile index(es) of this region.

rep_gen_gids

Get the representative profile gen gids of this region.

rep_profiles

Get the representative profiles of this region.

rep_res_gids

Get the representative profile resource gids of this region.

source_profiles

Retrieve the cf profile array from the source generation h5 file.

weights

Get the weights array

+
+
+property source_profiles
+

Retrieve the cf profile array from the source generation h5 file.

+
+
Returns:
+

profiles (np.ndarray) – Timeseries array of cf profile data.

+
+
+
+ +
+
+property weights
+

Get the weights array

+
+
Returns:
+

weights (np.ndarray | None) – Flat array of weight values from the weight column. The supply +curve table data in the weight column should have a list of weight +values corresponding to the gen_gids list in the same row.

+
+
+
+ +
+
+property rep_profiles
+

Get the representative profiles of this region.

+
+ +
+
+property i_reps
+

Get the representative profile index(es) of this region.

+
+ +
+
+property rep_gen_gids
+

Get the representative profile gen gids of this region.

+
+ +
+
+property rep_res_gids
+

Get the representative profile resource gids of this region.

+
+ +
+
+classmethod get_region_rep_profile(gen_fpath, rev_summary, cf_dset='cf_profile', rep_method='meanoid', err_method='rmse', weight='gid_counts', n_profiles=1)[source]
+

Class method for parallelization of rep profile calc.

+
+
Parameters:
+
    +
  • gen_fpath (str) – Filepath to reV gen output file to extract “cf_profile” from.

  • +
  • rev_summary (pd.DataFrame) – Aggregated rev supply curve summary file trimmed to just one +region to get a rep profile for. +Must include “res_gids”, “gen_gids”, and the “weight” column (if +weight is not None)

  • +
  • cf_dset (str) – Dataset name to pull generation profiles from.

  • +
  • rep_method (str) – Method identifier for calculation of the representative profile.

  • +
  • err_method (str | None) – Method identifier for calculation of error from the representative +profile (e.g. “rmse”, “mae”, “mbe”). If this is None, the +representative meanoid / medianoid profile will be returned +directly

  • +
  • weight (str | None) – Column in rev_summary used to apply weighted mean to profiles. +The supply curve table data in the weight column should have +weight values corresponding to the res_gids in the same row.

  • +
  • n_profiles (int) – Number of representative profiles to retrieve.

  • +
+
+
Returns:
+

    +
  • rep_profile (np.ndarray) – (time, n_profiles) array for the most representative profile(s)

  • +
  • i_rep (list) – Column Index in profiles of the representative profile(s).

  • +
  • gen_gid_reps (list) – Generation gid(s) of the representative profile(s).

  • +
  • res_gid_reps (list) – Resource gid(s) of the representative profile(s).

  • +
+

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.rep_profiles.rep_profiles.RepProfiles.html b/_autosummary/reV.rep_profiles.rep_profiles.RepProfiles.html new file mode 100644 index 000000000..164a8b388 --- /dev/null +++ b/_autosummary/reV.rep_profiles.rep_profiles.RepProfiles.html @@ -0,0 +1,839 @@ + + + + + + + reV.rep_profiles.rep_profiles.RepProfiles — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.rep_profiles.rep_profiles.RepProfiles

+
+
+class RepProfiles(gen_fpath, rev_summary, reg_cols, cf_dset='cf_profile', rep_method='meanoid', err_method='rmse', weight='gid_counts', n_profiles=1, aggregate_profiles=False)[source]
+

Bases: RepProfilesBase

+

reV rep profiles class.

+

reV rep profiles compute representative generation profiles +for each supply curve point output by reV supply curve +aggregation. Representative profiles can either be a spatial +aggregation of generation profiles or actual generation profiles +that most closely resemble an aggregated profile (selected based +on an error metric).

+
+
Parameters:
+
    +
  • gen_fpath (str) – Filepath to reV generation output HDF5 file to extract +cf_dset dataset from.

    +
    +

    Note

    +

    If executing reV from the command line, this +path can contain brackets {} that will be filled in by +the analysis_years input. Alternatively, this input can +be set to "PIPELINE", which will parse this input from +one of these preceding pipeline steps: multi-year, +collect, generation, or +supply-curve-aggregation. However, note that duplicate +executions of any of these commands within the pipeline +may invalidate this parsing, meaning the gen_fpath input +will have to be specified manually.

    +
    +
  • +
  • rev_summary (str | pd.DataFrame) – Aggregated reV supply curve summary file. Must include +the following columns:

    +
    +
      +
    • res_gids : string representation of python list +containing the resource GID values corresponding to +each supply curve point.

    • +
    • gen_gids : string representation of python list +containing the reV generation GID values +corresponding to each supply curve point.

    • +
    • weight column (name based on weight input) : string +representation of python list containing the resource +GID weights for each supply curve point.

    • +
    +
    +
    +

    Note

    +

    If executing reV from the command line, this +input can be set to "PIPELINE", which will parse this +input from one of these preceding pipeline steps: +supply-curve-aggregation or supply-curve. +However, note that duplicate executions of any of these +commands within the pipeline may invalidate this parsing, +meaning the rev_summary input will have to be specified +manually.

    +
    +
  • +
  • reg_cols (str | list) – Label(s) for a categorical region column(s) to extract +profiles for. For example, "state" will extract a rep +profile for each unique entry in the "state" column in +rev_summary. To get a profile for each supply curve point, +try setting reg_cols to a primary key such as +"sc_gid".

  • +
  • cf_dset (str, optional) – Dataset name to pull generation profiles from. This dataset +must be present in the gen_fpath HDF5 file. By default, +"cf_profile"

    +
    +

    Note

    +

    If executing reV from the command line, this +name can contain brackets {} that will be filled in by +the analysis_years input (e.g. "cf_profile-{}").

    +
    +
  • +
  • rep_method ({‘mean’, ‘meanoid’, ‘median’, ‘medianoid’}, optional) – Method identifier for calculation of the representative +profile. By default, 'meanoid'

  • +
  • err_method ({‘mbe’, ‘mae’, ‘rmse’}, optional) – Method identifier for calculation of error from the +representative profile. If this input is None, the +representative meanoid / medianoid profile will be returned +directly. By default, 'rmse'.

  • +
  • weight (str, optional) – Column in rev_summary used to apply weights when computing +mean profiles. The supply curve table data in the weight +column should have weight values corresponding to the +res_gids in the same row (i.e. string representation of +python list containing weight values).

    +
    +

    Important

    +

    You’ll often want to set this value to +something other than None (typically "gid_counts" +if running on standard reV outputs). Otherwise, the +unique generation profiles within each supply curve point +are weighted equally. For example, if you have a 64x64 +supply curve point, and one generation profile takes up +4095 (99.98%) 90m cells while a second generation profile +takes up only one 90m cell (0.02%), they will contribute +equally to the meanoid profile unless these weights are +specified.

    +
    +

    By default, 'gid_counts'.

    +
  • +
  • n_profiles (int, optional) – Number of representative profiles to save to the output +file. By default, 1.

  • +
  • aggregate_profiles (bool, optional) – Flag to calculate the aggregate (weighted meanoid) profile +for each supply curve point. This behavior is in lieu of +finding the single profile per region closest to the +meanoid. If you set this flag to True, the rep_method, +err_method, and n_profiles inputs will be forcibly set +to the default values. By default, False.

  • +
+
+
+

Methods

+ + + + + + + + + +

run([fout, save_rev_summary, ...])

Run representative profiles in serial or parallel and save to disc

save_profiles(fout[, save_rev_summary, ...])

Initialize fout and save profiles.

+

Attributes

+ + + + + + + + + + + + +

meta

Meta data for the representative profiles.

profiles

Get the arrays of representative CF profiles corresponding to meta.

time_index

Get the time index for the rep profiles.

+
+
+run(fout=None, save_rev_summary=True, scaled_precision=False, max_workers=None)[source]
+

Run representative profiles in serial or parallel and save to disc

+
+
Parameters:
+
    +
  • fout (str, optional) – Filepath to output HDF5 file. If None, output data are +not written to a file. By default, None.

  • +
  • save_rev_summary (bool, optional) – Flag to save full reV supply curve table to rep profile +output. By default, True.

  • +
  • scaled_precision (bool, optional) – Flag to scale cf_profiles by 1000 and save as uint16. +By default, False.

  • +
  • max_workers (int, optional) – Number of parallel rep profile workers. 1 will run +serial, while None will use all available. +By default, None.

  • +
+
+
+
+ +
+
+property meta
+

Meta data for the representative profiles.

+
+
Returns:
+

meta (pd.DataFrame) – Meta data for the representative profiles. At the very least, +this has columns for the region and res class.

+
+
+
+ +
+
+property profiles
+

Get the arrays of representative CF profiles corresponding to meta.

+
+
Returns:
+

profiles (dict) – dict of n_profile-keyed arrays with shape (time, n) for the +representative profiles for each region.

+
+
+
+ +
+
+save_profiles(fout, save_rev_summary=True, scaled_precision=False)
+

Initialize fout and save profiles.

+
+
Parameters:
+
    +
  • fout (str) – None or filepath to output h5 file.

  • +
  • save_rev_summary (bool) – Flag to save full reV SC table to rep profile output.

  • +
  • scaled_precision (bool) – Flag to scale cf_profiles by 1000 and save as uint16.

  • +
+
+
+
+ +
+
+property time_index
+

Get the time index for the rep profiles.

+
+
Returns:
+

time_index (pd.datetimeindex) – Time index sourced from the reV gen file.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.rep_profiles.rep_profiles.RepProfilesBase.html b/_autosummary/reV.rep_profiles.rep_profiles.RepProfilesBase.html new file mode 100644 index 000000000..dfa876857 --- /dev/null +++ b/_autosummary/reV.rep_profiles.rep_profiles.RepProfilesBase.html @@ -0,0 +1,742 @@ + + + + + + + reV.rep_profiles.rep_profiles.RepProfilesBase — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.rep_profiles.rep_profiles.RepProfilesBase

+
+
+class RepProfilesBase(gen_fpath, rev_summary, reg_cols=None, cf_dset='cf_profile', rep_method='meanoid', err_method='rmse', weight='gid_counts', n_profiles=1)[source]
+

Bases: ABC

+

Abstract utility framework for representative profile run classes.

+
+
Parameters:
+
    +
  • gen_fpath (str) – Filepath to reV gen output file to extract “cf_profile” from.

  • +
  • rev_summary (str | pd.DataFrame) – Aggregated rev supply curve summary file. Str filepath or full df. +Must include “res_gids”, “gen_gids”, and the “weight” column (if +weight is not None)

  • +
  • reg_cols (str | list | None) – Label(s) for a categorical region column(s) to extract profiles +for. e.g. “state” will extract a rep profile for each unique entry +in the “state” column in rev_summary.

  • +
  • cf_dset (str) – Dataset name to pull generation profiles from.

  • +
  • rep_method (str) – Method identifier for calculation of the representative profile.

  • +
  • err_method (str | None) – Method identifier for calculation of error from the representative +profile (e.g. “rmse”, “mae”, “mbe”). If this is None, the +representative meanoid / medianoid profile will be returned +directly

  • +
  • weight (str | None) – Column in rev_summary used to apply weighted mean to profiles. +The supply curve table data in the weight column should have +weight values corresponding to the res_gids in the same row.

  • +
  • n_profiles (int) – Number of representative profiles to save to fout.

  • +
+
+
+

Methods

+ + + + + + + + + +

run()

Abstract method for generic run method.

save_profiles(fout[, save_rev_summary, ...])

Initialize fout and save profiles.

+

Attributes

+ + + + + + + + + + + + +

meta

Meta data for the representative profiles.

profiles

Get the arrays of representative CF profiles corresponding to meta.

time_index

Get the time index for the rep profiles.

+
+
+property time_index
+

Get the time index for the rep profiles.

+
+
Returns:
+

time_index (pd.datetimeindex) – Time index sourced from the reV gen file.

+
+
+
+ +
+
+property meta
+

Meta data for the representative profiles.

+
+
Returns:
+

meta (pd.DataFrame) – Meta data for the representative profiles. At the very least, +this has columns for the region and res class.

+
+
+
+ +
+
+property profiles
+

Get the arrays of representative CF profiles corresponding to meta.

+
+
Returns:
+

profiles (dict) – dict of n_profile-keyed arrays with shape (time, n) for the +representative profiles for each region.

+
+
+
+ +
+
+save_profiles(fout, save_rev_summary=True, scaled_precision=False)[source]
+

Initialize fout and save profiles.

+
+
Parameters:
+
    +
  • fout (str) – None or filepath to output h5 file.

  • +
  • save_rev_summary (bool) – Flag to save full reV SC table to rep profile output.

  • +
  • scaled_precision (bool) – Flag to scale cf_profiles by 1000 and save as uint16.

  • +
+
+
+
+ +
+
+abstract run()[source]
+

Abstract method for generic run method.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.rep_profiles.rep_profiles.RepresentativeMethods.html b/_autosummary/reV.rep_profiles.rep_profiles.RepresentativeMethods.html new file mode 100644 index 000000000..630355985 --- /dev/null +++ b/_autosummary/reV.rep_profiles.rep_profiles.RepresentativeMethods.html @@ -0,0 +1,851 @@ + + + + + + + reV.rep_profiles.rep_profiles.RepresentativeMethods — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.rep_profiles.rep_profiles.RepresentativeMethods

+
+
+class RepresentativeMethods(profiles, weights=None, rep_method='meanoid', err_method='rmse')[source]
+

Bases: object

+

Class for organizing the methods to determine representative-ness

+
+
Parameters:
+
    +
  • profiles (np.ndarray) – (time, sites) timeseries array of cf profile data.

  • +
  • weights (np.ndarray | list) – 1D array of weighting factors (multiplicative) for profiles.

  • +
  • rep_method (str) – Method identifier for calculation of the representative profile.

  • +
  • err_method (str | None) – Method identifier for calculation of error from the representative +profile (e.g. “rmse”, “mae”, “mbe”). If this is None, the +representative meanoid / medianoid profile will be returned +directly

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + +

mae(profiles, baseline[, i_profile])

Calculate the mean absolute error of profiles vs.

mbe(profiles, baseline[, i_profile])

Calculate the mean bias error of profiles vs.

meanoid(profiles[, weights])

Find the mean profile across all sites.

medianoid(profiles)

Find the median profile across all sites.

nargmin(arr, n)

Get the index of the Nth min value in arr.

rmse(profiles, baseline[, i_profile])

Calculate the RMSE of profiles vs.

run(profiles[, weights, rep_method, ...])

Run representative profile methods.

+

Attributes

+ + + + + + + + + +

err_methods

Lookup table of error methods

rep_methods

Lookup table of representative methods

+
+
+property rep_methods
+

Lookup table of representative methods

+
+ +
+
+property err_methods
+

Lookup table of error methods

+
+ +
+
+static nargmin(arr, n)[source]
+

Get the index of the Nth min value in arr.

+
+
Parameters:
+
    +
  • arr (np.ndarray) – 1D array.

  • +
  • n (int) – If n is 0, this returns the location of the min value in arr. +If n is 1, this returns the location of the 2nd min value in arr.

  • +
+
+
Returns:
+

i (int) – Location of the Nth min value in arr.

+
+
+
+ +
+
+static meanoid(profiles, weights=None)[source]
+

Find the mean profile across all sites.

+
+
Parameters:
+
    +
  • profiles (np.ndarray) – (time, sites) timeseries array of cf profile data.

  • +
  • weights (np.ndarray | list) – 1D array of weighting factors (multiplicative) for profiles.

  • +
+
+
Returns:
+

arr (np.ndarray) – (time, 1) timeseries of the mean of all cf profiles across sites.

+
+
+
+ +
+
+static medianoid(profiles)[source]
+

Find the median profile across all sites.

+
+
Parameters:
+

profiles (np.ndarray) – (time, sites) timeseries array of cf profile data.

+
+
Returns:
+

arr (np.ndarray) – (time, 1) timeseries of the median at every timestep of all +cf profiles across sites.

+
+
+
+ +
+
+classmethod mbe(profiles, baseline, i_profile=0)[source]
+

Calculate the mean bias error of profiles vs. a baseline profile.

+
+
Parameters:
+
    +
  • profiles (np.ndarray) – (time, sites) timeseries array of cf profile data.

  • +
  • baseline (np.ndarray) – (time, 1) timeseries of the meanoid or medianoid to which +cf profiles should be compared.

  • +
  • i_profile (int) – The index of the represntative profile being saved +(for n_profiles). 0 is the most representative profile.

  • +
+
+
Returns:
+

    +
  • profile (np.ndarray) – (time, 1) array for the most representative profile

  • +
  • i_rep (int) – Column Index in profiles of the representative profile.

  • +
+

+
+
+
+ +
+
+classmethod mae(profiles, baseline, i_profile=0)[source]
+

Calculate the mean absolute error of profiles vs. a baseline profile

+
+
Parameters:
+
    +
  • profiles (np.ndarray) – (time, sites) timeseries array of cf profile data.

  • +
  • baseline (np.ndarray) – (time, 1) timeseries of the meanoid or medianoid to which +cf profiles should be compared.

  • +
  • i_profile (int) – The index of the represntative profile being saved +(for n_profiles). 0 is the most representative profile.

  • +
+
+
Returns:
+

    +
  • profile (np.ndarray) – (time, 1) array for the most representative profile

  • +
  • i_rep (int) – Column Index in profiles of the representative profile.

  • +
+

+
+
+
+ +
+
+classmethod rmse(profiles, baseline, i_profile=0)[source]
+

Calculate the RMSE of profiles vs. a baseline profile

+
+
Parameters:
+
    +
  • profiles (np.ndarray) – (time, sites) timeseries array of cf profile data.

  • +
  • baseline (np.ndarray) – (time, 1) timeseries of the meanoid or medianoid to which +cf profiles should be compared.

  • +
  • i_profile (int) – The index of the represntative profile being saved +(for n_profiles). 0 is the most representative profile.

  • +
+
+
Returns:
+

    +
  • profile (np.ndarray) – (time, 1) array for the most representative profile

  • +
  • i_rep (int) – Column Index in profiles of the representative profile.

  • +
+

+
+
+
+ +
+
+classmethod run(profiles, weights=None, rep_method='meanoid', err_method='rmse', n_profiles=1)[source]
+

Run representative profile methods.

+
+
Parameters:
+
    +
  • profiles (np.ndarray) – (time, sites) timeseries array of cf profile data.

  • +
  • weights (np.ndarray | list) – 1D array of weighting factors (multiplicative) for profiles.

  • +
  • rep_method (str) – Method identifier for calculation of the representative profile.

  • +
  • err_method (str | None) – Method identifier for calculation of error from the representative +profile (e.g. “rmse”, “mae”, “mbe”). If this is None, the +representative meanoid / medianoid profile will be returned +directly.

  • +
  • n_profiles (int) – Number of representative profiles to save to fout.

  • +
+
+
Returns:
+

    +
  • profiles (np.ndarray) – (time, n_profiles) array for the most representative profile(s)

  • +
  • i_reps (list | None) – List (length of n_profiles) with column Index in profiles of the +representative profile(s). If err_method is None, this value is +also set to None.

  • +
+

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.rep_profiles.rep_profiles.html b/_autosummary/reV.rep_profiles.rep_profiles.html new file mode 100644 index 000000000..97c686932 --- /dev/null +++ b/_autosummary/reV.rep_profiles.rep_profiles.html @@ -0,0 +1,650 @@ + + + + + + + reV.rep_profiles.rep_profiles — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.rep_profiles.rep_profiles

+

Representative profile extraction utilities.

+

Created on Thu Oct 31 12:49:23 2019

+

@author: gbuster

+

Classes

+ + + + + + + + + + + + + + + +

RegionRepProfile(gen_fpath, rev_summary[, ...])

Framework to handle rep profile for one resource region

RepProfiles(gen_fpath, rev_summary, reg_cols)

reV rep profiles class.

RepProfilesBase(gen_fpath, rev_summary[, ...])

Abstract utility framework for representative profile run classes.

RepresentativeMethods(profiles[, weights, ...])

Class for organizing the methods to determine representative-ness

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.aggregation.AbstractAggFileHandler.html b/_autosummary/reV.supply_curve.aggregation.AbstractAggFileHandler.html new file mode 100644 index 000000000..006a9c75a --- /dev/null +++ b/_autosummary/reV.supply_curve.aggregation.AbstractAggFileHandler.html @@ -0,0 +1,697 @@ + + + + + + + reV.supply_curve.aggregation.AbstractAggFileHandler — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.aggregation.AbstractAggFileHandler

+
+
+class AbstractAggFileHandler(excl_fpath, excl_dict=None, area_filter_kernel='queen', min_area=None)[source]
+

Bases: ABC

+

Simple framework to handle aggregation file context managers.

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +by default None

  • +
  • area_filter_kernel (str, optional) – Contiguous area filter method to use on final exclusions mask, +by default ‘queen’

  • +
  • min_area (float, optional) – Minimum required contiguous area filter in sq-km, +by default None

  • +
+
+
+

Methods

+ + + + + + +

close()

Close all file handlers.

+

Attributes

+ + + + + + + + + +

exclusions

Get the exclusions file handler object.

h5

Placeholder for h5 Resource handler

+
+
+abstract close()[source]
+

Close all file handlers.

+
+ +
+
+property exclusions
+

Get the exclusions file handler object.

+
+
Returns:
+

_excl (ExclusionMask) – Exclusions h5 handler object.

+
+
+
+ +
+
+property h5
+

Placeholder for h5 Resource handler

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.aggregation.AggFileHandler.html b/_autosummary/reV.supply_curve.aggregation.AggFileHandler.html new file mode 100644 index 000000000..05b8c4c3e --- /dev/null +++ b/_autosummary/reV.supply_curve.aggregation.AggFileHandler.html @@ -0,0 +1,712 @@ + + + + + + + reV.supply_curve.aggregation.AggFileHandler — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.aggregation.AggFileHandler

+
+
+class AggFileHandler(excl_fpath, h5_fpath, excl_dict=None, area_filter_kernel='queen', min_area=None, h5_handler=None)[source]
+

Bases: AbstractAggFileHandler

+

Framework to handle aggregation file context manager: +- exclusions .h5 file +- h5 file to be aggregated

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • h5_fpath (str) – Filepath to .h5 file to be aggregated

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +by default None

  • +
  • area_filter_kernel (str, optional) – Contiguous area filter method to use on final exclusions mask, +by default ‘queen’

  • +
  • min_area (float, optional) – Minimum required contiguous area filter in sq-km, by default None

  • +
  • h5_handler (rex.Resource | None) – Optional special handler similar to the rex.Resource handler which +is default.

  • +
+
+
+

Methods

+ + + + + + +

close()

Close all file handlers.

+

Attributes

+ + + + + + + + + +

exclusions

Get the exclusions file handler object.

h5

Get the h5 file handler object.

+
+
+DEFAULT_H5_HANDLER
+

alias of Resource

+
+ +
+
+property h5
+

Get the h5 file handler object.

+
+
Returns:
+

_h5 (Outputs) – reV h5 outputs handler object.

+
+
+
+ +
+
+close()[source]
+

Close all file handlers.

+
+ +
+
+property exclusions
+

Get the exclusions file handler object.

+
+
Returns:
+

_excl (ExclusionMask) – Exclusions h5 handler object.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.aggregation.Aggregation.html b/_autosummary/reV.supply_curve.aggregation.Aggregation.html new file mode 100644 index 000000000..72d0402f2 --- /dev/null +++ b/_autosummary/reV.supply_curve.aggregation.Aggregation.html @@ -0,0 +1,882 @@ + + + + + + + reV.supply_curve.aggregation.Aggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.aggregation.Aggregation

+
+
+class Aggregation(excl_fpath, tm_dset, *agg_dset, excl_dict=None, area_filter_kernel='queen', min_area=None, resolution=64, excl_area=None, gids=None, pre_extract_inclusions=False)[source]
+

Bases: BaseAggregation

+

Concrete but generalized aggregation framework to aggregate ANY reV h5 +file to a supply curve grid (based on an aggregated exclusion grid).

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • tm_dset (str) – Dataset name in the techmap file containing the +exclusions-to-resource mapping data.

  • +
  • agg_dset (str) – Dataset to aggreate, can supply multiple datasets. The datasets +should be scalar values for each site. This method cannot aggregate +timeseries data.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +by default None

  • +
  • area_filter_kernel (str, optional) – Contiguous area filter method to use on final exclusions mask, +by default “queen”

  • +
  • min_area (float, optional) – Minimum required contiguous area filter in sq-km, +by default None

  • +
  • resolution (int, optional) – SC resolution, must be input in combination with gid. Prefered +option is to use the row/col slices to define the SC point instead, +by default None

  • +
  • excl_area (float, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, +by default None

  • +
  • gids (list, optional) – List of supply curve point gids to get summary for (can use to +subset if running in parallel), or None for all gids in the SC +extent, by default None

  • +
  • pre_extract_inclusions (bool, optional) – Optional flag to pre-extract/compute the inclusion mask from the +provided excl_dict, by default False. Typically faster to compute +the inclusion mask on the fly with parallel workers.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + +

aggregate(h5_fpath[, agg_method, ...])

Aggregate with given agg_method

run(excl_fpath, h5_fpath, tm_dset, *agg_dset)

Get the supply curve points aggregation summary.

run_parallel(h5_fpath[, agg_method, ...])

Aggregate in parallel

run_serial(excl_fpath, h5_fpath, tm_dset, ...)

Standalone method to aggregate - can be parallelized.

save_agg_to_h5(h5_fpath, out_fpath, aggregation)

Save aggregated data to disc in .h5 format

+

Attributes

+ + + + + + + + + +

gids

1D array of supply curve point gids to aggregate

shape

Get the shape of the full exclusions raster.

+
+
+classmethod run_serial(excl_fpath, h5_fpath, tm_dset, *agg_dset, agg_method='mean', excl_dict=None, inclusion_mask=None, area_filter_kernel='queen', min_area=None, resolution=64, excl_area=0.0081, gids=None, gen_index=None)[source]
+

Standalone method to aggregate - can be parallelized.

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • h5_fpath (str) – Filepath to .h5 file to aggregate

  • +
  • tm_dset (str) – Dataset name in the techmap file containing the +exclusions-to-resource mapping data.

  • +
  • agg_dset (str) – Dataset to aggreate, can supply multiple datasets. The datasets +should be scalar values for each site. This method cannot aggregate +timeseries data.

  • +
  • agg_method (str, optional) – Aggregation method, either mean or sum/aggregate, by default “mean”

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +by default None

  • +
  • inclusion_mask (np.ndarray, optional) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. This must be either match the full exclusion shape or +be a list of single-sc-point exclusion masks corresponding to the +gids input, by default None

  • +
  • area_filter_kernel (str, optional) – Contiguous area filter method to use on final exclusions mask, +by default “queen”

  • +
  • min_area (float, optional) – Minimum required contiguous area filter in sq-km, +by default None

  • +
  • resolution (int, optional) – SC resolution, must be input in combination with gid. Prefered +option is to use the row/col slices to define the SC point instead, +by default 0.0081

  • +
  • excl_area (float, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, +by default None

  • +
  • gids (list, optional) – List of supply curve point gids to get summary for (can use to +subset if running in parallel), or None for all gids in the SC +extent, by default None

  • +
  • gen_index (np.ndarray, optional) – Array of generation gids with array index equal to resource gid. +Array value is -1 if the resource index was not used in the +generation run, by default None

  • +
+
+
Returns:
+

agg_out (dict) – Aggregated values for each aggregation dataset

+
+
+
+ +
+
+run_parallel(h5_fpath, agg_method='mean', excl_area=None, max_workers=None, sites_per_worker=100)[source]
+

Aggregate in parallel

+
+
Parameters:
+
    +
  • h5_fpath (str) – Filepath to .h5 file to aggregate

  • +
  • agg_method (str, optional) – Aggregation method, either mean or sum/aggregate, by default “mean”

  • +
  • excl_area (float, optional) – Area of an exclusion cell (square km), by default None

  • +
  • max_workers (int, optional) – Number of cores to run summary on. None is all available cpus, +by default None

  • +
  • sites_per_worker (int, optional) – Number of SC points to process on a single parallel worker, +by default 100

  • +
+
+
Returns:
+

agg_out (dict) – Aggregated values for each aggregation dataset

+
+
+
+ +
+
+aggregate(h5_fpath, agg_method='mean', max_workers=None, sites_per_worker=100)[source]
+

Aggregate with given agg_method

+
+
Parameters:
+
    +
  • h5_fpath (str) – Filepath to .h5 file to aggregate

  • +
  • agg_method (str, optional) – Aggregation method, either mean or sum/aggregate, by default “mean”

  • +
  • max_workers (int, optional) – Number of cores to run summary on. None is all available cpus, +by default None

  • +
  • sites_per_worker (int, optional) – Number of SC points to process on a single parallel worker, +by default 100

  • +
+
+
Returns:
+

agg (dict) – Aggregated values for each aggregation dataset

+
+
+
+ +
+
+property gids
+

1D array of supply curve point gids to aggregate

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+static save_agg_to_h5(h5_fpath, out_fpath, aggregation)[source]
+

Save aggregated data to disc in .h5 format

+
+
Parameters:
+
    +
  • out_fpath (str) – Output .h5 file path

  • +
  • aggregation (dict) – Aggregated values for each aggregation dataset

  • +
+
+
+
+ +
+
+property shape
+

Get the shape of the full exclusions raster.

+
+
Returns:
+

tuple

+
+
+
+ +
+
+classmethod run(excl_fpath, h5_fpath, tm_dset, *agg_dset, excl_dict=None, area_filter_kernel='queen', min_area=None, resolution=64, excl_area=None, gids=None, pre_extract_inclusions=False, agg_method='mean', max_workers=None, sites_per_worker=100, out_fpath=None)[source]
+

Get the supply curve points aggregation summary.

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • h5_fpath (str) – Filepath to .h5 file to aggregate

  • +
  • tm_dset (str) – Dataset name in the techmap file containing the +exclusions-to-resource mapping data.

  • +
  • agg_dset (str) – Dataset to aggreate, can supply multiple datasets. The datasets +should be scalar values for each site. This method cannot aggregate +timeseries data.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +by default None

  • +
  • area_filter_kernel (str, optional) – Contiguous area filter method to use on final exclusions mask, +by default “queen”

  • +
  • min_area (float, optional) – Minimum required contiguous area filter in sq-km, +by default None

  • +
  • resolution (int, optional) – SC resolution, must be input in combination with gid. Prefered +option is to use the row/col slices to define the SC point instead, +by default None

  • +
  • excl_area (float, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, +by default None

  • +
  • gids (list, optional) – List of supply curve point gids to get summary for (can use to +subset if running in parallel), or None for all gids in the SC +extent, by default None

  • +
  • pre_extract_inclusions (bool, optional) – Optional flag to pre-extract/compute the inclusion mask from the +provided excl_dict, by default False. Typically faster to compute +the inclusion mask on the fly with parallel workers.

  • +
  • agg_method (str, optional) – Aggregation method, either mean or sum/aggregate, by default “mean”

  • +
  • max_workers (int, optional) – Number of cores to run summary on. None is all available cpus, +by default None

  • +
  • sites_per_worker (int, optional) – Number of SC points to process on a single parallel worker, +by default 100

  • +
  • out_fpath (str, optional) – Output .h5 file path, by default None

  • +
+
+
Returns:
+

agg (dict) – Aggregated values for each aggregation dataset

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.aggregation.BaseAggregation.html b/_autosummary/reV.supply_curve.aggregation.BaseAggregation.html new file mode 100644 index 000000000..5a9ea444f --- /dev/null +++ b/_autosummary/reV.supply_curve.aggregation.BaseAggregation.html @@ -0,0 +1,707 @@ + + + + + + + reV.supply_curve.aggregation.BaseAggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.aggregation.BaseAggregation

+
+
+class BaseAggregation(excl_fpath, tm_dset, excl_dict=None, area_filter_kernel='queen', min_area=None, resolution=64, excl_area=None, gids=None, pre_extract_inclusions=False)[source]
+

Bases: ABC

+

Abstract supply curve points aggregation framework based on only an +exclusion file and techmap.

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • tm_dset (str) – Dataset name in the techmap file containing the +exclusions-to-resource mapping data.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +by default None

  • +
  • area_filter_kernel (str, optional) – Contiguous area filter method to use on final exclusions mask, +by default “queen”

  • +
  • min_area (float, optional) – Minimum required contiguous area filter in sq-km, +by default None

  • +
  • resolution (int, optional) – SC resolution, must be input in combination with gid. Prefered +option is to use the row/col slices to define the SC point instead, +by default None

  • +
  • excl_area (float, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • gids (list, optional) – List of supply curve point gids to get summary for (can use to +subset if running in parallel), or None for all gids in the SC +extent, by default None

  • +
  • pre_extract_inclusions (bool, optional) – Optional flag to pre-extract/compute the inclusion mask from the +provided excl_dict, by default False. Typically faster to compute +the inclusion mask on the fly with parallel workers.

  • +
+
+
+

Methods

+ + + +
+

Attributes

+ + + + + + + + + +

gids

1D array of supply curve point gids to aggregate

shape

Get the shape of the full exclusions raster.

+
+
+property gids
+

1D array of supply curve point gids to aggregate

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property shape
+

Get the shape of the full exclusions raster.

+
+
Returns:
+

tuple

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.aggregation.html b/_autosummary/reV.supply_curve.aggregation.html new file mode 100644 index 000000000..2a1e2ba10 --- /dev/null +++ b/_autosummary/reV.supply_curve.aggregation.html @@ -0,0 +1,648 @@ + + + + + + + reV.supply_curve.aggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.aggregation

+

reV aggregation framework.

+

Classes

+ + + + + + + + + + + + + + + +

AbstractAggFileHandler(excl_fpath[, ...])

Simple framework to handle aggregation file context managers.

AggFileHandler(excl_fpath, h5_fpath[, ...])

Framework to handle aggregation file context manager: - exclusions .h5 file - h5 file to be aggregated

Aggregation(excl_fpath, tm_dset, *agg_dset)

Concrete but generalized aggregation framework to aggregate ANY reV h5 file to a supply curve grid (based on an aggregated exclusion grid).

BaseAggregation(excl_fpath, tm_dset[, ...])

Abstract supply curve points aggregation framework based on only an exclusion file and techmap.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.cli_sc_aggregation.html b/_autosummary/reV.supply_curve.cli_sc_aggregation.html new file mode 100644 index 000000000..074f16668 --- /dev/null +++ b/_autosummary/reV.supply_curve.cli_sc_aggregation.html @@ -0,0 +1,631 @@ + + + + + + + reV.supply_curve.cli_sc_aggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.cli_sc_aggregation

+

reV Supply Curve Aggregation CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.cli_supply_curve.html b/_autosummary/reV.supply_curve.cli_supply_curve.html new file mode 100644 index 000000000..e0a4aefaa --- /dev/null +++ b/_autosummary/reV.supply_curve.cli_supply_curve.html @@ -0,0 +1,631 @@ + + + + + + + reV.supply_curve.cli_supply_curve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.cli_supply_curve

+

reV Supply Curve CLI utility functions.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.html b/_autosummary/reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.html new file mode 100644 index 000000000..52f4a11d3 --- /dev/null +++ b/_autosummary/reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.html @@ -0,0 +1,867 @@ + + + + + + + reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms

+
+
+class CompetitiveWindFarms(wind_dirs, sc_points, n_dirs=2, offshore=False)[source]
+

Bases: object

+

Handle competitive wind farm exclusion during supply curve sorting

+
+
Parameters:
+
    +
  • wind_dirs (pandas.DataFrame | str) – path to .csv or reVX.wind_dirs.wind_dirs.WindDirs output with +the neighboring supply curve point gids and power-rose value at +each cardinal direction

  • +
  • sc_points (pandas.DataFrame | str) – Supply curve point summary table

  • +
  • n_dirs (int, optional) – Number of prominent directions to use, by default 2

  • +
  • offshore (bool) – Flag as to whether offshore farms should be included during +CompetitiveWindFarms

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

check_sc_gid(sc_gid)

Check to see if sc_gid is valid, if so return associated sc_point_gids

exclude_sc_point_gid(sc_point_gid)

Exclude supply curve point gid, return False if gid is not present in list of available gids to avoid key errors elsewhere

map_downwind(sc_point_gid)

Map given sc_point_gid to downwind neighbors

map_sc_gid_to_sc_point_gid(sc_gid)

Map given sc_gid to equivalent sc_point_gid

map_sc_point_gid_to_sc_gid(sc_point_gid)

Map given sc_point_gid to equivalent sc_gid(s)

map_upwind(sc_point_gid)

Map given sc_point_gid to upwind neighbors

remove_noncompetitive_farm(sc_points[, ...])

Remove neighboring sc points for given number of prominent wind directions

run(wind_dirs, sc_points[, n_dirs, ...])

Exclude given number of neighboring Supply Point gids based on most prominent wind directions

+

Attributes

+ + + + + + + + + + + + +

mask

Supply curve point boolean mask, used for efficient exclusion False == excluded sc_point_gid

sc_gids

Un-masked sc_gids

sc_point_gids

Un-masked sc_point_gids

+
+
+property mask
+

Supply curve point boolean mask, used for efficient exclusion +False == excluded sc_point_gid

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property sc_point_gids
+

Un-masked sc_point_gids

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property sc_gids
+

Un-masked sc_gids

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+map_sc_point_gid_to_sc_gid(sc_point_gid)[source]
+

Map given sc_point_gid to equivalent sc_gid(s)

+
+
Parameters:
+

sc_point_gid (int) – Supply curve point gid to map to equivalent supply curve gid(s)

+
+
Returns:
+

int | list – Equivalent supply curve gid(s)

+
+
+
+ +
+
+map_sc_gid_to_sc_point_gid(sc_gid)[source]
+

Map given sc_gid to equivalent sc_point_gid

+
+
Parameters:
+

sc_gid (int) – Supply curve gid to map to equivalent supply point curve gid

+
+
Returns:
+

int – Equivalent supply point curve gid

+
+
+
+ +
+
+check_sc_gid(sc_gid)[source]
+

Check to see if sc_gid is valid, if so return associated +sc_point_gids

+
+
Parameters:
+

sc_gid (int) – Supply curve gid to map to equivalent supply point curve gid

+
+
Returns:
+

int | None – Equivalent supply point curve gid or None if sc_gid is invalid +(offshore)

+
+
+
+ +
+
+map_upwind(sc_point_gid)[source]
+

Map given sc_point_gid to upwind neighbors

+
+
Parameters:
+

sc_point_gid (int) – Supply point curve gid to get upwind neighbors

+
+
Returns:
+

int | list – upwind neighborings

+
+
+
+ +
+
+map_downwind(sc_point_gid)[source]
+

Map given sc_point_gid to downwind neighbors

+
+
Parameters:
+

sc_point_gid (int) – Supply point curve gid to get downwind neighbors

+
+
Returns:
+

int | list – downwind neighborings

+
+
+
+ +
+
+exclude_sc_point_gid(sc_point_gid)[source]
+

Exclude supply curve point gid, return False if gid is not present +in list of available gids to avoid key errors elsewhere

+
+
Parameters:
+

sc_point_gid (int) – supply curve point gid to mask

+
+
Returns:
+

bool – Flag if gid is valid and was masked

+
+
+
+ +
+
+remove_noncompetitive_farm(sc_points, sort_on='total_lcoe', downwind=False)[source]
+

Remove neighboring sc points for given number of prominent wind +directions

+
+
Parameters:
+
    +
  • sc_points (pandas.DataFrame | str) – Supply curve point summary table

  • +
  • sort_on (str, optional) – column to sort on before excluding neighbors, +by default ‘total_lcoe’

  • +
  • downwind (bool, optional) – Flag to remove downwind neighbors as well as upwind neighbors, +by default False

  • +
+
+
Returns:
+

sc_points (pandas.DataFrame) – Updated supply curve points after removing non-competative +wind farms

+
+
+
+ +
+
+classmethod run(wind_dirs, sc_points, n_dirs=2, offshore=False, sort_on='total_lcoe', downwind=False, out_fpath=None)[source]
+

Exclude given number of neighboring Supply Point gids based on most +prominent wind directions

+
+
Parameters:
+
    +
  • wind_dirs (pandas.DataFrame | str) – path to .csv or reVX.wind_dirs.wind_dirs.WindDirs output with +the neighboring supply curve point gids and power-rose value at +each cardinal direction

  • +
  • sc_points (pandas.DataFrame | str) – Supply curve point summary table

  • +
  • n_dirs (int, optional) – Number of prominent directions to use, by default 2

  • +
  • offshore (bool) – Flag as to whether offshore farms should be included during +CompetitiveWindFarms

  • +
  • sort_on (str, optional) – column to sort on before excluding neighbors, +by default ‘total_lcoe’

  • +
  • downwind (bool, optional) – Flag to remove downwind neighbors as well as upwind neighbors, +by default False

  • +
  • out_fpath (str, optional) – Path to .csv file to save updated sc_points to, +by default None

  • +
+
+
Returns:
+

sc_points (pandas.DataFrame) – Updated supply curve points after removing non-competative +wind farms

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.competitive_wind_farms.html b/_autosummary/reV.supply_curve.competitive_wind_farms.html new file mode 100644 index 000000000..178f7a46e --- /dev/null +++ b/_autosummary/reV.supply_curve.competitive_wind_farms.html @@ -0,0 +1,639 @@ + + + + + + + reV.supply_curve.competitive_wind_farms — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.competitive_wind_farms

+

Competitive Wind Farms exclusion handler

+

Classes

+ + + + + + +

CompetitiveWindFarms(wind_dirs, sc_points[, ...])

Handle competitive wind farm exclusion during supply curve sorting

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.exclusions.ExclusionMask.html b/_autosummary/reV.supply_curve.exclusions.ExclusionMask.html new file mode 100644 index 000000000..38aa33620 --- /dev/null +++ b/_autosummary/reV.supply_curve.exclusions.ExclusionMask.html @@ -0,0 +1,839 @@ + + + + + + + reV.supply_curve.exclusions.ExclusionMask — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.exclusions.ExclusionMask

+
+
+class ExclusionMask(excl_h5, layers=None, min_area=None, kernel='queen', hsds=False, check_layers=False)[source]
+

Bases: object

+

Class to create final exclusion mask

+
+
Parameters:
+
    +
  • excl_h5 (str | list | tuple) – Path to one or more exclusions .h5 files

  • +
  • layers (list | NoneType) – list of LayerMask instances for each exclusion layer to combine

  • +
  • min_area (float | NoneType) – Minimum required contiguous area in sq-km

  • +
  • kernel (str) – Contiguous filter method to use on final exclusion

  • +
  • hsds (bool) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS

  • +
  • check_layers (bool) – Run a pre-flight check on each layer to ensure they contain +un-excluded values

  • +
+
+
+

Methods

+ + + + + + + + + + + + +

add_layer(layer[, replace])

Add layer to be combined

close()

Close h5 instance

run(excl_h5[, layers, min_area, kernel, hsds])

Create inclusion mask from given layers

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

FILTER_KERNELS

excl_h5

Open ExclusionLayers instance

excl_layers

List of available exclusion layers in exclusions .h5

latitude

Latitude coordinates array

layer_names

List of layers to combines

layers

List of LayerMask instances for each exclusion layer to combine

longitude

Longitude coordinates array

mask

Inclusion mask for entire exclusion domain

nodata_lookup

Get a dictionary lookup of the nodata values for each layer name.

shape

Get the exclusions shape.

+
+
+close()[source]
+

Close h5 instance

+
+ +
+
+property shape
+

Get the exclusions shape.

+
+
Returns:
+

shape (tuple) – (rows, cols) shape tuple

+
+
+
+ +
+
+property excl_h5
+

Open ExclusionLayers instance

+
+
Returns:
+

_excl_h5 (ExclusionLayers)

+
+
+
+ +
+
+property excl_layers
+

List of available exclusion layers in exclusions .h5

+
+
Returns:
+

_excl_layers (list)

+
+
+
+ +
+
+property layer_names
+

List of layers to combines

+
+
Returns:
+

list

+
+
+
+ +
+
+property layers
+

List of LayerMask instances for each exclusion layer to combine

+
+
Returns:
+

list

+
+
+
+ +
+
+property mask
+

Inclusion mask for entire exclusion domain

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property latitude
+

Latitude coordinates array

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property longitude
+

Longitude coordinates array

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+add_layer(layer, replace=False)[source]
+

Add layer to be combined

+
+
Parameters:
+

layer (LayerMask) – LayerMask instance to add to set of layers to be combined

+
+
+
+ +
+
+property nodata_lookup
+

Get a dictionary lookup of the nodata values for each layer name.

+
+
Returns:
+

nodata (dict) – Lookup keyed by layer name and values are nodata values for the +respective layers.

+
+
+
+ +
+
+classmethod run(excl_h5, layers=None, min_area=None, kernel='queen', hsds=False)[source]
+

Create inclusion mask from given layers

+
+
Parameters:
+
    +
  • excl_h5 (str | list | tuple) – Path to one or more exclusions .h5 files

  • +
  • layers (list | NoneType) – list of LayerMask instances for each exclusion layer to combine

  • +
  • min_area (float | NoneType) – Minimum required contiguous area in sq-km

  • +
  • kernel (str) – Contiguous filter method to use on final exclusion

  • +
  • hsds (bool) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS

  • +
+
+
Returns:
+

mask (ndarray) – Full inclusion mask

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.exclusions.ExclusionMaskFromDict.html b/_autosummary/reV.supply_curve.exclusions.ExclusionMaskFromDict.html new file mode 100644 index 000000000..db936d827 --- /dev/null +++ b/_autosummary/reV.supply_curve.exclusions.ExclusionMaskFromDict.html @@ -0,0 +1,871 @@ + + + + + + + reV.supply_curve.exclusions.ExclusionMaskFromDict — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.exclusions.ExclusionMaskFromDict

+
+
+class ExclusionMaskFromDict(excl_h5, layers_dict=None, min_area=None, kernel='queen', hsds=False, check_layers=False)[source]
+

Bases: ExclusionMask

+

Class to initialize ExclusionMask from a dictionary defining layers

+
+
Parameters:
+
    +
  • excl_h5 (str | list | tuple) – Path to one or more exclusions .h5 files

  • +
  • layers_dict (dict | NoneType) – Dictionary of LayerMask arugments {layer: {kwarg: value}}

  • +
  • min_area (float | NoneType) – Minimum required contiguous area in sq-km

  • +
  • kernel (str) – Contiguous filter method to use on final exclusion

  • +
  • hsds (bool) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS

  • +
  • check_layers (bool) – Run a pre-flight check on each layer to ensure they contain +un-excluded values

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + +

add_layer(layer[, replace])

Add layer to be combined

close()

Close h5 instance

extract_inclusion_mask(excl_fpath, tm_dset)

Extract the full inclusion mask from excl_fpath using the given exclusion layers and whether or not to run a minimum area filter

run(excl_h5[, layers_dict, min_area, ...])

Create inclusion mask from given layers dictionary

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

FILTER_KERNELS

excl_h5

Open ExclusionLayers instance

excl_layers

List of available exclusion layers in exclusions .h5

latitude

Latitude coordinates array

layer_names

List of layers to combines

layers

List of LayerMask instances for each exclusion layer to combine

longitude

Longitude coordinates array

mask

Inclusion mask for entire exclusion domain

nodata_lookup

Get a dictionary lookup of the nodata values for each layer name.

shape

Get the exclusions shape.

+
+
+classmethod extract_inclusion_mask(excl_fpath, tm_dset, excl_dict=None, area_filter_kernel='queen', min_area=None)[source]
+

Extract the full inclusion mask from excl_fpath using the given +exclusion layers and whether or not to run a minimum area filter

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • tm_dset (str) – Dataset name in the techmap file containing the +exclusions-to-resource mapping data.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class.

  • +
  • area_filter_kernel (str, optional) – Contiguous area filter method to use on final exclusions mask, +by default “queen”

  • +
  • min_area (float, optional) – Minimum required contiguous area filter in sq-km, +by default None

  • +
+
+
Returns:
+

inclusion_mask (ndarray) – Pre-computed 2D inclusion mask (normalized with expected range: +[0, 1], where 1 is included and 0 is excluded)

+
+
+
+ +
+
+classmethod run(excl_h5, layers_dict=None, min_area=None, kernel='queen', hsds=False)[source]
+

Create inclusion mask from given layers dictionary

+
+
Parameters:
+
    +
  • excl_h5 (str | list | tuple) – Path to one or more exclusions .h5 files

  • +
  • layers_dict (dict | NoneType) – Dictionary of LayerMask arugments {layer: {kwarg: value}}

  • +
  • min_area (float | NoneType) – Minimum required contiguous area in sq-km

  • +
  • kernel (str) – Contiguous filter method to use on final exclusion

  • +
  • hsds (bool) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS

  • +
+
+
Returns:
+

mask (ndarray) – Full inclusion mask

+
+
+
+ +
+
+add_layer(layer, replace=False)
+

Add layer to be combined

+
+
Parameters:
+

layer (LayerMask) – LayerMask instance to add to set of layers to be combined

+
+
+
+ +
+
+close()
+

Close h5 instance

+
+ +
+
+property excl_h5
+

Open ExclusionLayers instance

+
+
Returns:
+

_excl_h5 (ExclusionLayers)

+
+
+
+ +
+
+property excl_layers
+

List of available exclusion layers in exclusions .h5

+
+
Returns:
+

_excl_layers (list)

+
+
+
+ +
+
+property latitude
+

Latitude coordinates array

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property layer_names
+

List of layers to combines

+
+
Returns:
+

list

+
+
+
+ +
+
+property layers
+

List of LayerMask instances for each exclusion layer to combine

+
+
Returns:
+

list

+
+
+
+ +
+
+property longitude
+

Longitude coordinates array

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property mask
+

Inclusion mask for entire exclusion domain

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property nodata_lookup
+

Get a dictionary lookup of the nodata values for each layer name.

+
+
Returns:
+

nodata (dict) – Lookup keyed by layer name and values are nodata values for the +respective layers.

+
+
+
+ +
+
+property shape
+

Get the exclusions shape.

+
+
Returns:
+

shape (tuple) – (rows, cols) shape tuple

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.exclusions.FrictionMask.html b/_autosummary/reV.supply_curve.exclusions.FrictionMask.html new file mode 100644 index 000000000..71bb89a19 --- /dev/null +++ b/_autosummary/reV.supply_curve.exclusions.FrictionMask.html @@ -0,0 +1,835 @@ + + + + + + + reV.supply_curve.exclusions.FrictionMask — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.exclusions.FrictionMask

+
+
+class FrictionMask(fric_h5, fric_dset, hsds=False, check_layers=False)[source]
+

Bases: ExclusionMask

+

Class to handle exclusion-style friction layer.

+
+
Parameters:
+
    +
  • fric_h5 (str) – Path to friction layer .h5 file (same format as exclusions file)

  • +
  • fric_dset (str) – Friction layer dataset in fric_h5

  • +
  • hsds (bool) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS

  • +
  • check_layers (bool) – Run a pre-flight check on each layer to ensure they contain +un-excluded values

  • +
+
+
+

Methods

+ + + + + + + + + + + + +

add_layer(layer[, replace])

Add layer to be combined

close()

Close h5 instance

run(excl_h5, fric_dset[, hsds])

Create inclusion mask from given layers dictionary

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

FILTER_KERNELS

excl_h5

Open ExclusionLayers instance

excl_layers

List of available exclusion layers in exclusions .h5

latitude

Latitude coordinates array

layer_names

List of layers to combines

layers

List of LayerMask instances for each exclusion layer to combine

longitude

Longitude coordinates array

mask

Inclusion mask for entire exclusion domain

nodata_lookup

Get a dictionary lookup of the nodata values for each layer name.

shape

Get the exclusions shape.

+
+
+add_layer(layer, replace=False)
+

Add layer to be combined

+
+
Parameters:
+

layer (LayerMask) – LayerMask instance to add to set of layers to be combined

+
+
+
+ +
+
+close()
+

Close h5 instance

+
+ +
+
+property excl_h5
+

Open ExclusionLayers instance

+
+
Returns:
+

_excl_h5 (ExclusionLayers)

+
+
+
+ +
+
+property excl_layers
+

List of available exclusion layers in exclusions .h5

+
+
Returns:
+

_excl_layers (list)

+
+
+
+ +
+
+property latitude
+

Latitude coordinates array

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property layer_names
+

List of layers to combines

+
+
Returns:
+

list

+
+
+
+ +
+
+property layers
+

List of LayerMask instances for each exclusion layer to combine

+
+
Returns:
+

list

+
+
+
+ +
+
+property longitude
+

Longitude coordinates array

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property mask
+

Inclusion mask for entire exclusion domain

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property nodata_lookup
+

Get a dictionary lookup of the nodata values for each layer name.

+
+
Returns:
+

nodata (dict) – Lookup keyed by layer name and values are nodata values for the +respective layers.

+
+
+
+ +
+
+property shape
+

Get the exclusions shape.

+
+
Returns:
+

shape (tuple) – (rows, cols) shape tuple

+
+
+
+ +
+
+classmethod run(excl_h5, fric_dset, hsds=False)[source]
+

Create inclusion mask from given layers dictionary

+
+
Parameters:
+
    +
  • fric_h5 (str) – Path to friction layer .h5 file (same format as exclusions file)

  • +
  • fric_dset (str) – Friction layer dataset in fric_h5

  • +
  • hsds (bool) – Boolean flag to use h5pyd to handle .h5 ‘files’ hosted on AWS +behind HSDS

  • +
+
+
Returns:
+

mask (ndarray) – Full inclusion mask

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.exclusions.LayerMask.html b/_autosummary/reV.supply_curve.exclusions.LayerMask.html new file mode 100644 index 000000000..b8dbc3448 --- /dev/null +++ b/_autosummary/reV.supply_curve.exclusions.LayerMask.html @@ -0,0 +1,822 @@ + + + + + + + reV.supply_curve.exclusions.LayerMask — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.exclusions.LayerMask

+
+
+class LayerMask(layer, exclude_values=None, exclude_range=(None, None), include_values=None, include_range=(None, None), include_weights=None, force_include_values=None, force_include_range=None, use_as_weights=False, weight=1.0, exclude_nodata=False, nodata_value=None, **kwargs)[source]
+

Bases: object

+

Class to convert exclusion layer to inclusion layer mask

+
+
Parameters:
+
    +
  • layer (str) – Layer name.

  • +
  • exclude_values (int | float | list, optional) – Single value or list of values to exclude.

    +
    +

    Important

    +

    The keyword arguments exclude_values, +exclude_range, include_values, include_range, +include_weights, force_include_values, and +force_include_range are all mutually exclusive. Users +should supply value(s) for exactly one of these arguments.

    +
    +

    By default, None.

    +
  • +
  • exclude_range (list | tuple, optional) – Two-item list of (min threshold, max threshold) for values +to exclude. Mutually exclusive with other inputs - see info +in the description of exclude_values. +By default, None.

  • +
  • include_values (int | float | list, optional) – Single value or list of values to include. Mutually +exclusive with other inputs - see info in the description of +exclude_values. By default, None.

  • +
  • include_range (list | tuple, optional) – Two-item list of (min threshold, max threshold) for values +to include. Mutually exclusive with other inputs - see info +in the description of exclude_values. +By default, None.

  • +
  • include_weights (dict, optional) – A dictionary of {value: weight} pairs, where the +value in the layer that should be included with the +given weight. Mutually exclusive with other inputs - see +info in the description of exclude_values. +By default, None.

  • +
  • force_include_values (int | float | list, optional) – Force the inclusion of the given value(s). Mutually +exclusive with other inputs - see info in the description of +exclude_values. By default, None.

  • +
  • force_include_range (list | tuple, optional) – Force the inclusion of given values in the range +(min threshold, max threshold). Mutually exclusive with +other inputs - see info in the description of +exclude_values. By default, None.

  • +
  • use_as_weights (bool, optional) – Option to use layer as final inclusion weights. If True, +all inclusion/exclusions specifications for the layer are +ignored and the raw values (scaled by the weight input) +are used as weights. By default, False.

  • +
  • weight (float, optional) – Weight applied to exclusion layer after it is calculated. +Can be used, for example, to turn a binary exclusion layer +(i.e. data with 0 or 1 values and exclude_values=1 +input) into partial exclusions by setting the weight to +a fraction (e.g. 0.5 for 50% exclusions). By default, 1.

  • +
  • exclude_nodata (bool, optional) – Flag to exclude nodata values (nodata_value). If +nodata_value=None the nodata_value is inferred by +reV.supply_curve.exclusions.ExclusionMask. +By default, False.

  • +
  • nodata_value (int | float, optional) – Nodata value for the layer. If None, the value will be +inferred when LayerMask is added to +reV.supply_curve.exclusions.ExclusionMask. +By default, None.

  • +
  • **kwargs – Optional inputs to maintain legacy kwargs of inclusion_* +instead of include_*.

  • +
+
+
+

Methods

+ + + +
+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

exclude_values

Values to exclude

force_include

Flag to force include mask

include_values

Values to include

include_weights

Mapping of values to include and at what weights

mask_type

Type of exclusion mask for this layer

max_value

Maximum value to include/exclude if include_range or exclude_range was input.

min_value

Minimum value to include/exclude if include_range or exclude_range was input.

name

Layer name to extract from exclusions .h5 file

+
+
+property name
+

Layer name to extract from exclusions .h5 file

+
+
Returns:
+

_name (str)

+
+
+
+ +
+
+property min_value
+

Minimum value to include/exclude if include_range or exclude_range +was input.

+
+
Returns:
+

float

+
+
+
+ +
+
+property max_value
+

Maximum value to include/exclude if include_range or exclude_range +was input.

+
+
Returns:
+

float

+
+
+
+ +
+
+property exclude_values
+

Values to exclude

+
+
Returns:
+

_exclude_values (list)

+
+
+
+ +
+
+property include_values
+

Values to include

+
+
Returns:
+

_include_values (list)

+
+
+
+ +
+
+property include_weights
+

Mapping of values to include and at what weights

+
+
Returns:
+

dict

+
+
+
+ +
+
+property force_include
+

Flag to force include mask

+
+
Returns:
+

_force_include (bool)

+
+
+
+ +
+
+property mask_type
+

Type of exclusion mask for this layer

+
+
Returns:
+

str

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.exclusions.html b/_autosummary/reV.supply_curve.exclusions.html new file mode 100644 index 000000000..c0971f817 --- /dev/null +++ b/_autosummary/reV.supply_curve.exclusions.html @@ -0,0 +1,648 @@ + + + + + + + reV.supply_curve.exclusions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.exclusions

+

Generate reV inclusion mask from exclusion layers

+

Classes

+ + + + + + + + + + + + + + + +

ExclusionMask(excl_h5[, layers, min_area, ...])

Class to create final exclusion mask

ExclusionMaskFromDict(excl_h5[, ...])

Class to initialize ExclusionMask from a dictionary defining layers

FrictionMask(fric_h5, fric_dset[, hsds, ...])

Class to handle exclusion-style friction layer.

LayerMask(layer[, exclude_values, ...])

Class to convert exclusion layer to inclusion layer mask

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.extent.SupplyCurveExtent.html b/_autosummary/reV.supply_curve.extent.SupplyCurveExtent.html new file mode 100644 index 000000000..038f18019 --- /dev/null +++ b/_autosummary/reV.supply_curve.extent.SupplyCurveExtent.html @@ -0,0 +1,1076 @@ + + + + + + + reV.supply_curve.extent.SupplyCurveExtent — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.extent.SupplyCurveExtent

+
+
+class SupplyCurveExtent(f_excl, resolution=64)[source]
+

Bases: object

+

Supply curve full extent framework. This class translates the 90m +exclusion grid to the aggregated supply curve resolution.

+
+
Parameters:
+
    +
  • f_excl (str | list | tuple | ExclusionLayers) – File path(s) to the exclusions grid, or pre-initialized +ExclusionLayers. The exclusions dictate the SC analysis extent.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

close()

Close all file handlers.

get_coord(gid)

Get the centroid coordinate for the supply curve gid point.

get_excl_points(dset, gid)

Get the exclusions data corresponding to a supply curve gid.

get_excl_slices(gid)

Get the row and column slices of the exclusions grid corresponding to the supply curve point gid.

get_flat_excl_ind(gid)

Get the index values of the flattened exclusions grid corresponding to the supply curve point gid.

get_sc_row_col_ind(gid)

Get the supply curve grid row and column index values corresponding to a supply curve gid.

get_slice_lookup(sc_point_gids)

Get exclusion slices for all requested supply curve point gids

valid_sc_points(tm_dset)

Determine which sc_point_gids contain resource gids and are thus valid supply curve points

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

col_indices

Get a 1D array of col indices for every gid.

cols_of_excl

List representing the supply curve points columns and which exclusions columns belong to each supply curve column.

excl_col_slices

List representing the supply curve points cols and which exclusions cols belong to each supply curve col.

excl_cols

Get the unique column indices identifying the exclusion points.

excl_row_slices

List representing the supply curve points rows and which exclusions rows belong to each supply curve row.

excl_rows

Get the unique row indices identifying the exclusion points.

excl_shape

Get the shape tuple of the exclusion file raster.

exclusions

Get the exclusions object.

lat_lon

2D array of lat, lon coordinates for all sc points

latitude

Get supply curve point latitudes

longitude

Get supply curve point longitudes

n_cols

Get the number of supply curve grid columns.

n_rows

Get the number of supply curve grid rows.

points

Get the summary dataframe of supply curve points.

resolution

Get the 1D resolution.

row_indices

Get a 1D array of row indices for every gid.

rows_of_excl

List representing the supply curve points rows and which exclusions rows belong to each supply curve row.

shape

Get the Supply curve shape tuple (n_rows, n_cols).

+
+
+close()[source]
+

Close all file handlers.

+
+ +
+
+property shape
+

Get the Supply curve shape tuple (n_rows, n_cols).

+
+
Returns:
+

shape (tuple) – 2-entry tuple representing the full supply curve extent.

+
+
+
+ +
+
+property exclusions
+

Get the exclusions object.

+
+
Returns:
+

_excls (ExclusionLayers) – ExclusionLayers h5 handler object.

+
+
+
+ +
+
+property resolution
+

Get the 1D resolution.

+
+
Returns:
+

_res (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

+
+
+
+ +
+
+property excl_shape
+

Get the shape tuple of the exclusion file raster.

+
+
Returns:
+

tuple

+
+
+
+ +
+
+property excl_rows
+

Get the unique row indices identifying the exclusion points.

+
+
Returns:
+

excl_rows (np.ndarray) – Array of exclusion row indices.

+
+
+
+ +
+
+property excl_cols
+

Get the unique column indices identifying the exclusion points.

+
+
Returns:
+

excl_cols (np.ndarray) – Array of exclusion column indices.

+
+
+
+ +
+
+property rows_of_excl
+

List representing the supply curve points rows and which +exclusions rows belong to each supply curve row.

+
+
Returns:
+

_rows_of_excl (list) – List representing the supply curve points rows. Each list entry +contains the exclusion row indices that are included in the sc +point.

+
+
+
+ +
+
+property cols_of_excl
+

List representing the supply curve points columns and which +exclusions columns belong to each supply curve column.

+
+
Returns:
+

_cols_of_excl (list) – List representing the supply curve points columns. Each list entry +contains the exclusion column indices that are included in the sc +point.

+
+
+
+ +
+
+property excl_row_slices
+

List representing the supply curve points rows and which +exclusions rows belong to each supply curve row.

+
+
Returns:
+

_excl_row_slices (list) – List representing the supply curve points rows. Each list entry +contains the exclusion row slice that are included in the sc +point.

+
+
+
+ +
+
+property excl_col_slices
+

List representing the supply curve points cols and which +exclusions cols belong to each supply curve col.

+
+
Returns:
+

_excl_col_slices (list) – List representing the supply curve points cols. Each list entry +contains the exclusion col slice that are included in the sc +point.

+
+
+
+ +
+
+property n_rows
+

Get the number of supply curve grid rows.

+
+
Returns:
+

n_rows (int) – Number of row entries in the full supply curve grid.

+
+
+
+ +
+
+property n_cols
+

Get the number of supply curve grid columns.

+
+
Returns:
+

n_cols (int) – Number of column entries in the full supply curve grid.

+
+
+
+ +
+
+property latitude
+

Get supply curve point latitudes

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property longitude
+

Get supply curve point longitudes

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property lat_lon
+

2D array of lat, lon coordinates for all sc points

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property row_indices
+

Get a 1D array of row indices for every gid. That is, this property +has length == len(gids) and row_indices[sc_gid] yields the row index of +the target supply curve gid

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property col_indices
+

Get a 1D array of col indices for every gid. That is, this property +has length == len(gids) and col_indices[sc_gid] yields the col index of +the target supply curve gid

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property points
+

Get the summary dataframe of supply curve points.

+
+
Returns:
+

_points (pd.DataFrame) – Supply curve points with columns for attributes of each sc point.

+
+
+
+ +
+
+get_sc_row_col_ind(gid)[source]
+

Get the supply curve grid row and column index values corresponding +to a supply curve gid.

+
+
Parameters:
+

gid (int) – Supply curve point gid.

+
+
Returns:
+

    +
  • row_ind (int) – Row index that the gid is located at in the sc grid.

  • +
  • col_ind (int) – Column index that the gid is located at in the sc grid.

  • +
+

+
+
+
+ +
+
+get_excl_slices(gid)[source]
+

Get the row and column slices of the exclusions grid corresponding +to the supply curve point gid.

+
+
Parameters:
+

gid (int) – Supply curve point gid.

+
+
Returns:
+

    +
  • row_slice (slice) – Exclusions grid row slice corresponding to the sc point gid.

  • +
  • col_slice (slice) – Exclusions grid col slice corresponding to the sc point gid.

  • +
+

+
+
+
+ +
+
+get_flat_excl_ind(gid)[source]
+

Get the index values of the flattened exclusions grid corresponding +to the supply curve point gid.

+
+
Parameters:
+

gid (int) – Supply curve point gid.

+
+
Returns:
+

excl_ind (np.ndarray) – Index values of the flattened exclusions grid corresponding to +the SC gid.

+
+
+
+ +
+
+get_excl_points(dset, gid)[source]
+

Get the exclusions data corresponding to a supply curve gid.

+
+
Parameters:
+
    +
  • dset (str | int) – Used as the first arg in the exclusions __getitem__ slice. +String can be “meta”, integer can be layer number.

  • +
  • gid (int) – Supply curve point gid.

  • +
+
+
Returns:
+

excl_points (pd.DataFrame) – Exclusions data reduced to just the exclusion points associated +with the requested supply curve gid.

+
+
+
+ +
+
+get_coord(gid)[source]
+

Get the centroid coordinate for the supply curve gid point.

+
+
Parameters:
+

gid (int) – Supply curve point gid.

+
+
Returns:
+

coord (tuple) – Two entry coordinate tuple: (latitude, longitude)

+
+
+
+ +
+
+valid_sc_points(tm_dset)[source]
+

Determine which sc_point_gids contain resource gids and are thus +valid supply curve points

+
+
Parameters:
+

tm_dset (str) – Techmap dataset name

+
+
Returns:
+

valid_gids (ndarray) – Vector of valid sc_point_gids that contain resource gis

+
+
+
+ +
+
+get_slice_lookup(sc_point_gids)[source]
+

Get exclusion slices for all requested supply curve point gids

+
+
Parameters:
+

sc_point_gids (list | ndarray) – List or 1D array of sc_point_gids to get exclusion slices for

+
+
Returns:
+

dict – lookup mapping sc_point_gid to exclusion slice

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.extent.html b/_autosummary/reV.supply_curve.extent.html new file mode 100644 index 000000000..64dffa80b --- /dev/null +++ b/_autosummary/reV.supply_curve.extent.html @@ -0,0 +1,639 @@ + + + + + + + reV.supply_curve.extent — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.extent

+

reV supply curve extent

+

Classes

+ + + + + + +

SupplyCurveExtent(f_excl[, resolution])

Supply curve full extent framework.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.html b/_autosummary/reV.supply_curve.html new file mode 100644 index 000000000..85c6d783b --- /dev/null +++ b/_autosummary/reV.supply_curve.html @@ -0,0 +1,664 @@ + + + + + + + reV.supply_curve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve

+

reV Supply Curve

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

reV.supply_curve.aggregation

reV aggregation framework.

reV.supply_curve.cli_sc_aggregation

reV Supply Curve Aggregation CLI utility functions.

reV.supply_curve.cli_supply_curve

reV Supply Curve CLI utility functions.

reV.supply_curve.competitive_wind_farms

Competitive Wind Farms exclusion handler

reV.supply_curve.exclusions

Generate reV inclusion mask from exclusion layers

reV.supply_curve.extent

reV supply curve extent

reV.supply_curve.points

reV supply curve points frameworks.

reV.supply_curve.sc_aggregation

reV supply curve aggregation framework.

reV.supply_curve.supply_curve

reV supply curve module - Calculation of LCOT - Supply Curve creation

reV.supply_curve.tech_mapping

reV tech mapping framework.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.points.AbstractSupplyCurvePoint.html b/_autosummary/reV.supply_curve.points.AbstractSupplyCurvePoint.html new file mode 100644 index 000000000..5c7259e23 --- /dev/null +++ b/_autosummary/reV.supply_curve.points.AbstractSupplyCurvePoint.html @@ -0,0 +1,747 @@ + + + + + + + reV.supply_curve.points.AbstractSupplyCurvePoint — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.points.AbstractSupplyCurvePoint

+
+
+class AbstractSupplyCurvePoint(gid, exclusion_shape, resolution=64)[source]
+

Bases: ABC

+

Abstract SC point based on only the point gid, SC shape, and resolution.

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols).

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
+
+
+

Methods

+ + + + + + +

get_agg_slices(gid, shape, resolution)

Get the row, col slices of an aggregation gid.

+

Attributes

+ + + + + + + + + + + + + + + + + + +

cols

Get the cols of the exclusions layer associated with this SC point.

gid

supply curve point gid

resolution

Get the supply curve grid aggregation resolution

rows

Get the rows of the exclusions layer associated with this SC point.

sc_point_gid

Supply curve point gid

+
+
+property gid
+

supply curve point gid

+
+ +
+
+property sc_point_gid
+

Supply curve point gid

+
+
Returns:
+

int

+
+
+
+ +
+
+property resolution
+

Get the supply curve grid aggregation resolution

+
+ +
+
+property rows
+

Get the rows of the exclusions layer associated with this SC point.

+
+
Returns:
+

rows (slice) – Row slice to index the high-res layer (exclusions layer) for the +gid in the agg layer (supply curve layer).

+
+
+
+ +
+
+property cols
+

Get the cols of the exclusions layer associated with this SC point.

+
+
Returns:
+

cols (slice) – Column slice to index the high-res layer (exclusions layer) for the +gid in the agg layer (supply curve layer).

+
+
+
+ +
+
+static get_agg_slices(gid, shape, resolution)[source]
+

Get the row, col slices of an aggregation gid.

+
+
Parameters:
+
    +
  • gid (int) – Gid of interest in the aggregated layer.

  • +
  • shape (tuple) – (row, col) shape tuple of the underlying high-res layer.

  • +
  • resolution (int) – Resolution of the aggregation: number of pixels in 1D being +aggregated.

  • +
+
+
Returns:
+

    +
  • row_slice (slice) – Row slice to index the high-res layer for the gid in the agg layer.

  • +
  • col_slice (slice) – Col slice to index the high-res layer for the gid in the agg layer.

  • +
+

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.points.AggregationSupplyCurvePoint.html b/_autosummary/reV.supply_curve.points.AggregationSupplyCurvePoint.html new file mode 100644 index 000000000..a90843096 --- /dev/null +++ b/_autosummary/reV.supply_curve.points.AggregationSupplyCurvePoint.html @@ -0,0 +1,1227 @@ + + + + + + + reV.supply_curve.points.AggregationSupplyCurvePoint — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.points.AggregationSupplyCurvePoint

+
+
+class AggregationSupplyCurvePoint(gid, excl, agg_h5, tm_dset, excl_dict=None, inclusion_mask=None, resolution=64, excl_area=None, exclusion_shape=None, close=True, gen_index=None, apply_exclusions=True)[source]
+

Bases: SupplyCurvePoint

+

Generic single SC point to aggregate data from an h5 file.

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • agg_h5 (str | Resource) – Filepath to .h5 file to aggregate or Resource handler

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • inclusion_mask (np.ndarray) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. The shape of this will be checked against the input +resolution.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • excl_area (float | None, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
  • gen_index (np.ndarray) – Array of generation gids with array index equal to resource gid. +Array value is -1 if the resource index was not used in the +generation run.

  • +
  • apply_exclusions (bool) – Flag to apply exclusions to the resource / generation gid’s on +initialization.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

agg_data_layers(summary, data_layers)

Perform additional data layer aggregation.

aggregate(arr)

Calc sum (aggregation) of the resource data.

close()

Close all file handlers.

exclusion_weighted_mean(arr[, drop_nan])

Calc the exclusions-weighted mean value of an array of resource data.

get_agg_slices(gid, shape, resolution)

Get the row, col slices of an aggregation gid.

mean_wind_dirs(arr)

Calc the mean wind directions at every time-step

run(gid, excl, agg_h5, tm_dset, *agg_dset[, ...])

Compute exclusions weight mean for the sc point from data

sc_mean(gid, excl, tm_dset, data[, ...])

Compute exclusions weight mean for the sc point from data

sc_sum(gid, excl, tm_dset, data[, ...])

Compute the aggregate (sum) of data for the sc point

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

area

Get the non-excluded resource area of the supply curve point in the current resource class.

bool_mask

Get a boolean inclusion mask (True if excl point is not excluded).

centroid

Get the supply curve point centroid coordinate.

cols

Get the cols of the exclusions layer associated with this SC point.

country

Get the SC point country based on the resource meta data.

county

Get the SC point county based on the resource meta data.

elevation

Get the SC point elevation based on the resource meta data.

exclusions

Get the exclusions object.

gid

supply curve point gid

gid_counts

Get the sum of the inclusion values in each resource/generation gid corresponding to this sc point.

h5

h5 Resource handler object

h5_gid_set

Get list of unique h5 gids corresponding to this sc point.

include_mask

[0, 1] where 1 is included and 0 is excluded).

include_mask_flat

Get the flattened inclusion mask (normalized with expected range: [0, 1] where 1 is included and 0 is excluded).

latitude

Get the SC point latitude

longitude

Get the SC point longitude

n_gids

Get the total number of not fully excluded pixels associated with the available resource/generation gids at the given sc gid.

offshore

Get the SC point offshore flag based on the resource meta data (if offshore column is present).

pixel_area

The area in km2 of a single exclusion pixel.

resolution

Get the supply curve grid aggregation resolution

rows

Get the rows of the exclusions layer associated with this SC point.

sc_point_gid

Supply curve point gid

state

Get the SC point state based on the resource meta data.

summary

Supply curve point's meta data summary

timezone

Get the SC point timezone based on the resource meta data.

+
+
+close()[source]
+

Close all file handlers.

+
+ +
+
+property h5
+

h5 Resource handler object

+
+
Returns:
+

_h5 (Resource) – Resource h5 handler object.

+
+
+
+ +
+
+property country
+

Get the SC point country based on the resource meta data.

+
+ +
+
+property state
+

Get the SC point state based on the resource meta data.

+
+ +
+
+property county
+

Get the SC point county based on the resource meta data.

+
+ +
+
+property elevation
+

Get the SC point elevation based on the resource meta data.

+
+ +
+
+property timezone
+

Get the SC point timezone based on the resource meta data.

+
+ +
+
+property offshore
+

Get the SC point offshore flag based on the resource meta data +(if offshore column is present).

+
+ +
+
+property h5_gid_set
+

Get list of unique h5 gids corresponding to this sc point.

+
+
Returns:
+

h5_gids (list) – List of h5 gids.

+
+
+
+ +
+
+property gid_counts
+

Get the sum of the inclusion values in each resource/generation gid +corresponding to this sc point. The sum of the gid counts can be less +than the value provided by n_gids if fractional exclusion/inclusions +are provided.

+
+
Returns:
+

gid_counts (list)

+
+
+
+ +
+
+property summary
+

Supply curve point’s meta data summary

+
+
Returns:
+

pandas.Series – List of supply curve point’s meta data

+
+
+
+ +
+
+classmethod run(gid, excl, agg_h5, tm_dset, *agg_dset, agg_method='mean', excl_dict=None, inclusion_mask=None, resolution=64, excl_area=None, exclusion_shape=None, close=True, gen_index=None)[source]
+

Compute exclusions weight mean for the sc point from data

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • agg_h5 (str | Resource) – Filepath to .h5 file to aggregate or Resource handler

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • agg_dset (str) – Dataset to aggreate, can supply multiple datasets or no datasets. +The datasets should be scalar values for each site. This method +cannot aggregate timeseries data.

  • +
  • agg_method (str) – Aggregation method, either mean or sum/aggregate

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • inclusion_mask (np.ndarray) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. The shape of this will be checked against the input +resolution.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • excl_area (float | None, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
  • gen_index (np.ndarray) – Array of generation gids with array index equal to resource gid. +Array value is -1 if the resource index was not used in the +generation run.

  • +
+
+
Returns:
+

out (dict) – Given datasets and meta data aggregated to supply curve points

+
+
+
+ +
+
+agg_data_layers(summary, data_layers)
+

Perform additional data layer aggregation. If there is no valid data +in the included area, the data layer will be taken from the full SC +point extent (ignoring exclusions). If there is still no valid data, +a warning will be raised and the data layer will have a NaN/None value.

+
+
Parameters:
+
    +
  • summary (dict) – Dictionary of summary outputs for this sc point.

  • +
  • data_layers (None | dict) – Aggregation data layers. Must be a dictionary keyed by data label +name. Each value must be another dictionary with “dset”, “method”, +and “fpath”.

  • +
+
+
Returns:
+

summary (dict) – Dictionary of summary outputs for this sc point. A new entry for +each data layer is added.

+
+
+
+ +
+
+aggregate(arr)
+

Calc sum (aggregation) of the resource data.

+
+
Parameters:
+

arr (np.ndarray) – Array of resource data.

+
+
Returns:
+

agg (float) – Sum of arr masked by the binary exclusions

+
+
+
+ +
+
+property area
+

Get the non-excluded resource area of the supply curve point in the +current resource class.

+
+
Returns:
+

area (float) – Non-excluded resource/generation area in square km.

+
+
+
+ +
+
+property bool_mask
+

Get a boolean inclusion mask (True if excl point is not excluded).

+
+
Returns:
+

mask (np.ndarray) – Mask with length equal to the flattened exclusion shape

+
+
+
+ +
+
+property centroid
+

Get the supply curve point centroid coordinate.

+
+
Returns:
+

centroid (tuple) – SC point centroid (lat, lon).

+
+
+
+ +
+
+property cols
+

Get the cols of the exclusions layer associated with this SC point.

+
+
Returns:
+

cols (slice) – Column slice to index the high-res layer (exclusions layer) for the +gid in the agg layer (supply curve layer).

+
+
+
+ +
+
+exclusion_weighted_mean(arr, drop_nan=True)
+

Calc the exclusions-weighted mean value of an array of resource data.

+
+
Parameters:
+
    +
  • arr (np.ndarray) – Array of resource data.

  • +
  • drop_nan (bool) – Flag to drop nan values from the mean calculation (only works for +1D arr input, profiles should not have NaN’s)

  • +
+
+
Returns:
+

mean (float | np.ndarray) – Mean of arr masked by the binary exclusions then weighted by +the non-zero exclusions. This will be a 1D numpy array if the +input data is a 2D numpy array (averaged along axis=1)

+
+
+
+ +
+
+property exclusions
+

Get the exclusions object.

+
+
Returns:
+

_excls (ExclusionMask) – ExclusionMask h5 handler object.

+
+
+
+ +
+
+static get_agg_slices(gid, shape, resolution)
+

Get the row, col slices of an aggregation gid.

+
+
Parameters:
+
    +
  • gid (int) – Gid of interest in the aggregated layer.

  • +
  • shape (tuple) – (row, col) shape tuple of the underlying high-res layer.

  • +
  • resolution (int) – Resolution of the aggregation: number of pixels in 1D being +aggregated.

  • +
+
+
Returns:
+

    +
  • row_slice (slice) – Row slice to index the high-res layer for the gid in the agg layer.

  • +
  • col_slice (slice) – Col slice to index the high-res layer for the gid in the agg layer.

  • +
+

+
+
+
+ +
+
+property gid
+

supply curve point gid

+
+ +
+
+property include_mask
+

[0, 1] +where 1 is included and 0 is excluded).

+
+
Returns:
+

np.ndarray

+
+
Type:
+

Get the 2D inclusion mask (normalized with expected range

+
+
+
+ +
+
+property include_mask_flat
+

Get the flattened inclusion mask (normalized with expected +range: [0, 1] where 1 is included and 0 is excluded).

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property latitude
+

Get the SC point latitude

+
+ +
+
+property longitude
+

Get the SC point longitude

+
+ +
+
+mean_wind_dirs(arr)
+

Calc the mean wind directions at every time-step

+
+
Parameters:
+

arr (np.ndarray) – Array of wind direction data.

+
+
Returns:
+

mean_wind_dirs (np.ndarray | float) – Mean wind direction of arr masked by the binary exclusions

+
+
+
+ +
+
+property n_gids
+

Get the total number of not fully excluded pixels associated with the +available resource/generation gids at the given sc gid.

+
+
Returns:
+

n_gids (list)

+
+
+
+ +
+
+property pixel_area
+

The area in km2 of a single exclusion pixel. If this value was not +provided on initialization, it is determined from the profile of the +exclusion file.

+
+
Returns:
+

float

+
+
+
+ +
+
+property resolution
+

Get the supply curve grid aggregation resolution

+
+ +
+
+property rows
+

Get the rows of the exclusions layer associated with this SC point.

+
+
Returns:
+

rows (slice) – Row slice to index the high-res layer (exclusions layer) for the +gid in the agg layer (supply curve layer).

+
+
+
+ +
+
+classmethod sc_mean(gid, excl, tm_dset, data, excl_dict=None, resolution=64, exclusion_shape=None, close=True)
+

Compute exclusions weight mean for the sc point from data

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • data (ndarray | ResourceDataset) – Array of data or open dataset handler to apply exclusions too

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit

  • +
+
+
Returns:
+

ndarray – Exclusions weighted means of data for supply curve point

+
+
+
+ +
+
+property sc_point_gid
+

Supply curve point gid

+
+
Returns:
+

int

+
+
+
+ +
+
+classmethod sc_sum(gid, excl, tm_dset, data, excl_dict=None, resolution=64, exclusion_shape=None, close=True)
+

Compute the aggregate (sum) of data for the sc point

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • data (ndarray | ResourceDataset) – Array of data or open dataset handler to apply exclusions too

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
+
+
Returns:
+

ndarray – Sum / aggregation of data for supply curve point

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.points.GenerationSupplyCurvePoint.html b/_autosummary/reV.supply_curve.points.GenerationSupplyCurvePoint.html new file mode 100644 index 000000000..47ed5193e --- /dev/null +++ b/_autosummary/reV.supply_curve.points.GenerationSupplyCurvePoint.html @@ -0,0 +1,1639 @@ + + + + + + + reV.supply_curve.points.GenerationSupplyCurvePoint — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.points.GenerationSupplyCurvePoint

+
+
+class GenerationSupplyCurvePoint(gid, excl, gen, tm_dset, gen_index, excl_dict=None, inclusion_mask=None, res_class_dset=None, res_class_bin=None, excl_area=None, power_density=None, cf_dset='cf_mean-means', lcoe_dset='lcoe_fcr-means', h5_dsets=None, resolution=64, exclusion_shape=None, close=False, friction_layer=None, recalc_lcoe=True, apply_exclusions=True)[source]
+

Bases: AggregationSupplyCurvePoint

+

Supply curve point summary framework that ties a reV SC point to its +respective generation and resource data.

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • gen (str | reV.handlers.Outputs) – Filepath to .h5 reV generation output results or reV Outputs file +handler.

  • +
  • tm_dset (str) – Dataset name in the techmap file containing the +exclusions-to-resource mapping data.

  • +
  • gen_index (np.ndarray) – Array of generation gids with array index equal to resource gid. +Array value is -1 if the resource index was not used in the +generation run.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • inclusion_mask (np.ndarray) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. The shape of this will be checked against the input +resolution.

  • +
  • res_class_dset (str | np.ndarray | None) – Dataset in the generation file dictating resource classes. +Can be pre-extracted resource data in np.ndarray. +None if no resource classes.

  • +
  • res_class_bin (list | None) – Two-entry lists dictating the single resource class bin. +None if no resource classes.

  • +
  • excl_area (float | None, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • power_density (float | None | pd.DataFrame) – Constant power density float, None, or opened dataframe with +(resource) “gid” and “power_density columns”.

  • +
  • cf_dset (str | np.ndarray) – Dataset name from gen containing capacity factor mean values. +Can be pre-extracted generation output data in np.ndarray.

  • +
  • lcoe_dset (str | np.ndarray) – Dataset name from gen containing LCOE mean values. +Can be pre-extracted generation output data in np.ndarray.

  • +
  • h5_dsets (None | list | dict) – Optional list of dataset names to summarize from the gen/econ h5 +files. Can also be pre-extracted data dictionary where keys are +the dataset names and values are the arrays of data from the +h5 files.

  • +
  • resolution (int | None) – SC resolution, must be input in combination with gid.

  • +
  • exclusion_shape (tuple) – Shape of the exclusions extent (rows, cols). Inputing this will +speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
  • friction_layer (None | FrictionMask) – Friction layer with scalar friction values if valid friction inputs +were entered. Otherwise, None to not apply friction layer.

  • +
  • recalc_lcoe (bool) – Flag to re-calculate the LCOE from the multi-year mean capacity +factor and annual energy production data. This requires several +datasets to be aggregated in the h5_dsets input: system_capacity, +fixed_charge_rate, capital_cost, fixed_operating_cost, +and variable_operating_cost.

  • +
  • apply_exclusions (bool) – Flag to apply exclusions to the resource / generation gid’s on +initialization.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

agg_data_layers(summary, data_layers)

Perform additional data layer aggregation.

aggregate(arr)

Calc sum (aggregation) of the resource data.

close()

Close all file handlers.

economies_of_scale(cap_cost_scale, summary)

Apply economies of scale to this point summary

exclusion_weighted_mean(flat_arr)

Calc the exclusions-weighted mean value of a flat array of gen data.

get_agg_slices(gid, shape, resolution)

Get the row, col slices of an aggregation gid.

mean_wind_dirs(arr)

Calc the mean wind directions at every time-step

point_summary([args])

Get a summary dictionary of a single supply curve point.

run(gid, excl, agg_h5, tm_dset, *agg_dset[, ...])

Compute exclusions weight mean for the sc point from data

sc_mean(gid, excl, tm_dset, data[, ...])

Compute exclusions weight mean for the sc point from data

sc_sum(gid, excl, tm_dset, data[, ...])

Compute the aggregate (sum) of data for the sc point

summarize(gid, excl_fpath, gen_fpath, ...[, ...])

Get a summary dictionary of a single supply curve point.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

POWER_DENSITY

area

Get the non-excluded resource area of the supply curve point in the current resource class.

bool_mask

Get a boolean inclusion mask (True if excl point is not excluded).

capacity

Get the estimated capacity in MW of the supply curve point in the current resource class with the applied exclusions.

capacity_ac

Get the AC estimated capacity in MW of the supply curve point in the current resource class with the applied exclusions.

centroid

Get the supply curve point centroid coordinate.

cols

Get the cols of the exclusions layer associated with this SC point.

country

Get the SC point country based on the resource meta data.

county

Get the SC point county based on the resource meta data.

elevation

Get the SC point elevation based on the resource meta data.

exclusions

Get the exclusions object.

friction_data

Get the friction data for the full SC point (no exclusions)

gen

Get the generation output object.

gen_data

Get the generation capacity factor data array.

gen_gid_set

Get list of unique generation gids corresponding to this sc point.

gid

supply curve point gid

gid_counts

Get the number of exclusion pixels in each resource/generation gid corresponding to this sc point.

h5

h5 Resource handler object

h5_dsets_data

Get any additional/supplemental h5 dataset data to summarize.

h5_gid_set

Get list of unique h5 gids corresponding to this sc point.

include_mask

[0, 1] where 1 is included and 0 is excluded).

include_mask_flat

Get the flattened inclusion mask (normalized with expected range: [0, 1] where 1 is included and 0 is excluded).

latitude

Get the SC point latitude

lcoe_data

Get the LCOE data array.

longitude

Get the SC point longitude

mean_cf

Get the mean capacity factor for the non-excluded data.

mean_friction

Get the mean friction scalar for the non-excluded data.

mean_h5_dsets_data

Get the mean supplemental h5 datasets data (optional)

mean_lcoe

Get the mean LCOE for the non-excluded data.

mean_lcoe_friction

Get the mean LCOE for the non-excluded data, multiplied by the mean_friction scalar value.

mean_res

Get the mean resource for the non-excluded data.

n_gids

Get the total number of not fully excluded pixels associated with the available resource/generation gids at the given sc gid.

offshore

Get the SC point offshore flag based on the resource meta data (if offshore column is present).

pixel_area

The area in km2 of a single exclusion pixel.

power_density

Get the estimated power density either from input or infered from generation output meta.

power_density_ac

Get the estimated AC power density either from input or inferred from generation output meta.

res_data

Get the resource data array.

res_gid_set

Get list of unique resource gids corresponding to this sc point.

resolution

Get the supply curve grid aggregation resolution

rows

Get the rows of the exclusions layer associated with this SC point.

sc_point_gid

Supply curve point gid

state

Get the SC point state based on the resource meta data.

summary

Supply curve point's meta data summary

timezone

Get the SC point timezone based on the resource meta data.

+
+
+exclusion_weighted_mean(flat_arr)[source]
+

Calc the exclusions-weighted mean value of a flat array of gen data.

+
+
Parameters:
+

flat_arr (np.ndarray) – Flattened array of resource/generation/econ data. Must be +index-able with the self._gen_gids array (must be a 1D array with +an entry for every site in the generation extent).

+
+
Returns:
+

mean (float) – Mean of flat_arr masked by the binary exclusions then weighted by +the non-zero exclusions.

+
+
+
+ +
+
+property gen
+

Get the generation output object.

+
+
Returns:
+

_gen (Resource) – reV generation Resource object

+
+
+
+ +
+
+property res_gid_set
+

Get list of unique resource gids corresponding to this sc point.

+
+
Returns:
+

res_gids (list) – List of resource gids.

+
+
+
+ +
+
+property gen_gid_set
+

Get list of unique generation gids corresponding to this sc point.

+
+
Returns:
+

gen_gids (list) – List of generation gids.

+
+
+
+ +
+
+property h5_gid_set
+

Get list of unique h5 gids corresponding to this sc point. +Same as gen_gid_set

+
+
Returns:
+

h5_gids (list) – List of h5 gids.

+
+
+
+ +
+
+property gid_counts
+

Get the number of exclusion pixels in each resource/generation gid +corresponding to this sc point.

+
+
Returns:
+

gid_counts (list) – List of exclusion pixels in each resource/generation gid.

+
+
+
+ +
+
+property res_data
+

Get the resource data array.

+
+
Returns:
+

_res_data (np.ndarray) – Multi-year-mean resource data array for all sites in the +generation data output file.

+
+
+
+ +
+
+property gen_data
+

Get the generation capacity factor data array.

+
+
Returns:
+

_gen_data (np.ndarray) – Multi-year-mean capacity factor data array for all sites in the +generation data output file.

+
+
+
+ +
+
+property lcoe_data
+

Get the LCOE data array.

+
+
Returns:
+

_lcoe_data (np.ndarray) – Multi-year-mean LCOE data array for all sites in the +generation data output file.

+
+
+
+ +
+
+property mean_cf
+

Get the mean capacity factor for the non-excluded data. Capacity +factor is weighted by the exclusions (usually 0 or 1, but 0.5 +exclusions will weight appropriately).

+
+
Returns:
+

mean_cf (float | None) – Mean capacity factor value for the non-excluded data.

+
+
+
+ +
+
+property mean_lcoe
+

Get the mean LCOE for the non-excluded data.

+
+
Returns:
+

mean_lcoe (float | None) – Mean LCOE value for the non-excluded data.

+
+
+
+ +
+
+property mean_res
+

Get the mean resource for the non-excluded data.

+
+
Returns:
+

mean_res (float | None) – Mean resource for the non-excluded data.

+
+
+
+ +
+
+property mean_lcoe_friction
+

Get the mean LCOE for the non-excluded data, multiplied by the +mean_friction scalar value.

+
+
Returns:
+

mean_lcoe_friction (float | None) – Mean LCOE value for the non-excluded data multiplied by the +mean friction scalar value.

+
+
+
+ +
+
+property mean_friction
+

Get the mean friction scalar for the non-excluded data.

+
+
Returns:
+

friction (None | float) – Mean value of the friction data layer for the non-excluded data. +If friction layer is not input to this class, None is returned.

+
+
+
+ +
+
+property friction_data
+

Get the friction data for the full SC point (no exclusions)

+
+
Returns:
+

friction_data (None | np.ndarray) – 2D friction data layer corresponding to the exclusions grid in +the SC domain. If friction layer is not input to this class, +None is returned.

+
+
+
+ +
+
+property power_density
+

Get the estimated power density either from input or infered from +generation output meta.

+
+
Returns:
+

_power_density (float) – Estimated power density in MW/km2

+
+
+
+ +
+
+property power_density_ac
+

Get the estimated AC power density either from input or +inferred from generation output meta.

+

This value is only available for solar runs with a “dc_ac_ratio” +dataset in the generation file. If these conditions are not met, +this value is None.

+
+
Returns:
+

_power_density_ac (float | None) – Estimated AC power density in MW/km2

+
+
+
+ +
+
+property capacity
+

Get the estimated capacity in MW of the supply curve point in the +current resource class with the applied exclusions.

+
+
Returns:
+

capacity (float) – Estimated capacity in MW of the supply curve point in the +current resource class with the applied exclusions.

+
+
+
+ +
+
+property capacity_ac
+

Get the AC estimated capacity in MW of the supply curve point in the +current resource class with the applied exclusions.

+

This values is provided only for solar inputs that have +the “dc_ac_ratio” dataset in the generation file. If these +conditions are not met, this value is None.

+
+
Returns:
+

capacity (float | None) – Estimated AC capacity in MW of the supply curve point in the +current resource class with the applied exclusions. Only not +None for solar runs with “dc_ac_ratio” dataset in the +generation file

+
+
+
+ +
+
+property h5_dsets_data
+

Get any additional/supplemental h5 dataset data to summarize.

+
+
Returns:
+

h5_dsets_data (dict | None)

+
+
+
+ +
+
+property mean_h5_dsets_data
+

Get the mean supplemental h5 datasets data (optional)

+
+
Returns:
+

mean_h5_dsets_data (dict | None) – Mean dataset values for the non-excluded data for the optional +h5_dsets input.

+
+
+
+ +
+
+point_summary(args=None)[source]
+

Get a summary dictionary of a single supply curve point.

+
+
Parameters:
+

args (tuple | list | None) – List of summary arguments to include. None defaults to all +available args defined in the class attr.

+
+
Returns:
+

summary (dict) – Dictionary of summary outputs for this sc point.

+
+
+
+ +
+
+static economies_of_scale(cap_cost_scale, summary)[source]
+

Apply economies of scale to this point summary

+
+
Parameters:
+
    +
  • cap_cost_scale (str) – LCOE scaling equation to implement “economies of scale”. +Equation must be in python string format and return a scalar +value to multiply the capital cost by. Independent variables in +the equation should match the names of the columns in the reV +supply curve aggregation table.

  • +
  • summary (dict) – Dictionary of summary outputs for this sc point.

  • +
+
+
Returns:
+

summary (dict) – Dictionary of summary outputs for this sc point.

+
+
+
+ +
+
+agg_data_layers(summary, data_layers)
+

Perform additional data layer aggregation. If there is no valid data +in the included area, the data layer will be taken from the full SC +point extent (ignoring exclusions). If there is still no valid data, +a warning will be raised and the data layer will have a NaN/None value.

+
+
Parameters:
+
    +
  • summary (dict) – Dictionary of summary outputs for this sc point.

  • +
  • data_layers (None | dict) – Aggregation data layers. Must be a dictionary keyed by data label +name. Each value must be another dictionary with “dset”, “method”, +and “fpath”.

  • +
+
+
Returns:
+

summary (dict) – Dictionary of summary outputs for this sc point. A new entry for +each data layer is added.

+
+
+
+ +
+
+aggregate(arr)
+

Calc sum (aggregation) of the resource data.

+
+
Parameters:
+

arr (np.ndarray) – Array of resource data.

+
+
Returns:
+

agg (float) – Sum of arr masked by the binary exclusions

+
+
+
+ +
+
+property area
+

Get the non-excluded resource area of the supply curve point in the +current resource class.

+
+
Returns:
+

area (float) – Non-excluded resource/generation area in square km.

+
+
+
+ +
+
+property bool_mask
+

Get a boolean inclusion mask (True if excl point is not excluded).

+
+
Returns:
+

mask (np.ndarray) – Mask with length equal to the flattened exclusion shape

+
+
+
+ +
+
+property centroid
+

Get the supply curve point centroid coordinate.

+
+
Returns:
+

centroid (tuple) – SC point centroid (lat, lon).

+
+
+
+ +
+
+close()
+

Close all file handlers.

+
+ +
+
+property cols
+

Get the cols of the exclusions layer associated with this SC point.

+
+
Returns:
+

cols (slice) – Column slice to index the high-res layer (exclusions layer) for the +gid in the agg layer (supply curve layer).

+
+
+
+ +
+
+property country
+

Get the SC point country based on the resource meta data.

+
+ +
+
+property county
+

Get the SC point county based on the resource meta data.

+
+ +
+
+property elevation
+

Get the SC point elevation based on the resource meta data.

+
+ +
+
+property exclusions
+

Get the exclusions object.

+
+
Returns:
+

_excls (ExclusionMask) – ExclusionMask h5 handler object.

+
+
+
+ +
+
+static get_agg_slices(gid, shape, resolution)
+

Get the row, col slices of an aggregation gid.

+
+
Parameters:
+
    +
  • gid (int) – Gid of interest in the aggregated layer.

  • +
  • shape (tuple) – (row, col) shape tuple of the underlying high-res layer.

  • +
  • resolution (int) – Resolution of the aggregation: number of pixels in 1D being +aggregated.

  • +
+
+
Returns:
+

    +
  • row_slice (slice) – Row slice to index the high-res layer for the gid in the agg layer.

  • +
  • col_slice (slice) – Col slice to index the high-res layer for the gid in the agg layer.

  • +
+

+
+
+
+ +
+
+property gid
+

supply curve point gid

+
+ +
+
+property h5
+

h5 Resource handler object

+
+
Returns:
+

_h5 (Resource) – Resource h5 handler object.

+
+
+
+ +
+
+property include_mask
+

[0, 1] +where 1 is included and 0 is excluded).

+
+
Returns:
+

np.ndarray

+
+
Type:
+

Get the 2D inclusion mask (normalized with expected range

+
+
+
+ +
+
+property include_mask_flat
+

Get the flattened inclusion mask (normalized with expected +range: [0, 1] where 1 is included and 0 is excluded).

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property latitude
+

Get the SC point latitude

+
+ +
+
+property longitude
+

Get the SC point longitude

+
+ +
+
+mean_wind_dirs(arr)
+

Calc the mean wind directions at every time-step

+
+
Parameters:
+

arr (np.ndarray) – Array of wind direction data.

+
+
Returns:
+

mean_wind_dirs (np.ndarray | float) – Mean wind direction of arr masked by the binary exclusions

+
+
+
+ +
+
+property n_gids
+

Get the total number of not fully excluded pixels associated with the +available resource/generation gids at the given sc gid.

+
+
Returns:
+

n_gids (list)

+
+
+
+ +
+
+property offshore
+

Get the SC point offshore flag based on the resource meta data +(if offshore column is present).

+
+ +
+
+property pixel_area
+

The area in km2 of a single exclusion pixel. If this value was not +provided on initialization, it is determined from the profile of the +exclusion file.

+
+
Returns:
+

float

+
+
+
+ +
+
+property resolution
+

Get the supply curve grid aggregation resolution

+
+ +
+
+property rows
+

Get the rows of the exclusions layer associated with this SC point.

+
+
Returns:
+

rows (slice) – Row slice to index the high-res layer (exclusions layer) for the +gid in the agg layer (supply curve layer).

+
+
+
+ +
+
+classmethod run(gid, excl, agg_h5, tm_dset, *agg_dset, agg_method='mean', excl_dict=None, inclusion_mask=None, resolution=64, excl_area=None, exclusion_shape=None, close=True, gen_index=None)
+

Compute exclusions weight mean for the sc point from data

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • agg_h5 (str | Resource) – Filepath to .h5 file to aggregate or Resource handler

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • agg_dset (str) – Dataset to aggreate, can supply multiple datasets or no datasets. +The datasets should be scalar values for each site. This method +cannot aggregate timeseries data.

  • +
  • agg_method (str) – Aggregation method, either mean or sum/aggregate

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • inclusion_mask (np.ndarray) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. The shape of this will be checked against the input +resolution.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • excl_area (float | None, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
  • gen_index (np.ndarray) – Array of generation gids with array index equal to resource gid. +Array value is -1 if the resource index was not used in the +generation run.

  • +
+
+
Returns:
+

out (dict) – Given datasets and meta data aggregated to supply curve points

+
+
+
+ +
+
+classmethod sc_mean(gid, excl, tm_dset, data, excl_dict=None, resolution=64, exclusion_shape=None, close=True)
+

Compute exclusions weight mean for the sc point from data

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • data (ndarray | ResourceDataset) – Array of data or open dataset handler to apply exclusions too

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit

  • +
+
+
Returns:
+

ndarray – Exclusions weighted means of data for supply curve point

+
+
+
+ +
+
+property sc_point_gid
+

Supply curve point gid

+
+
Returns:
+

int

+
+
+
+ +
+
+classmethod sc_sum(gid, excl, tm_dset, data, excl_dict=None, resolution=64, exclusion_shape=None, close=True)
+

Compute the aggregate (sum) of data for the sc point

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • data (ndarray | ResourceDataset) – Array of data or open dataset handler to apply exclusions too

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
+
+
Returns:
+

ndarray – Sum / aggregation of data for supply curve point

+
+
+
+ +
+
+property state
+

Get the SC point state based on the resource meta data.

+
+ +
+
+classmethod summarize(gid, excl_fpath, gen_fpath, tm_dset, gen_index, excl_dict=None, inclusion_mask=None, res_class_dset=None, res_class_bin=None, excl_area=None, power_density=None, cf_dset='cf_mean-means', lcoe_dset='lcoe_fcr-means', h5_dsets=None, resolution=64, exclusion_shape=None, close=False, friction_layer=None, args=None, data_layers=None, cap_cost_scale=None, recalc_lcoe=True)[source]
+

Get a summary dictionary of a single supply curve point.

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl_fpath (str) – Filepath to exclusions h5.

  • +
  • gen_fpath (str) – Filepath to .h5 reV generation output results.

  • +
  • tm_dset (str) – Dataset name in the techmap file containing the +exclusions-to-resource mapping data.

  • +
  • gen_index (np.ndarray) – Array of generation gids with array index equal to resource gid. +Array value is -1 if the resource index was not used in the +generation run.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • inclusion_mask (np.ndarray) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. The shape of this will be checked against the input +resolution.

  • +
  • res_class_dset (str | np.ndarray | None) – Dataset in the generation file dictating resource classes. +Can be pre-extracted resource data in np.ndarray. +None if no resource classes.

  • +
  • res_class_bin (list | None) – Two-entry lists dictating the single resource class bin. +None if no resource classes.

  • +
  • excl_area (float | None, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • power_density (float | None | pd.DataFrame) – Constant power density float, None, or opened dataframe with +(resource) “gid” and “power_density columns”.

  • +
  • cf_dset (str | np.ndarray) – Dataset name from gen containing capacity factor mean values. +Can be pre-extracted generation output data in np.ndarray.

  • +
  • lcoe_dset (str | np.ndarray) – Dataset name from gen containing LCOE mean values. +Can be pre-extracted generation output data in np.ndarray.

  • +
  • h5_dsets (None | list | dict) – Optional list of dataset names to summarize from the gen/econ h5 +files. Can also be pre-extracted data dictionary where keys are +the dataset names and values are the arrays of data from the +h5 files.

  • +
  • resolution (int | None) – SC resolution, must be input in combination with gid.

  • +
  • exclusion_shape (tuple) – Shape of the exclusions extent (rows, cols). Inputing this will +speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
  • friction_layer (None | FrictionMask) – Friction layer with scalar friction values if valid friction inputs +were entered. Otherwise, None to not apply friction layer.

  • +
  • args (tuple | list, optional) – List of summary arguments to include. None defaults to all +available args defined in the class attr, by default None

  • +
  • data_layers (dict, optional) – Aggregation data layers. Must be a dictionary keyed by data label +name. Each value must be another dictionary with “dset”, “method”, +and “fpath”, by default None

  • +
  • cap_cost_scale (str | None) – Optional LCOE scaling equation to implement “economies of scale”. +Equations must be in python string format and return a scalar +value to multiply the capital cost by. Independent variables in +the equation should match the names of the columns in the reV +supply curve aggregation table.

  • +
  • recalc_lcoe (bool) – Flag to re-calculate the LCOE from the multi-year mean capacity +factor and annual energy production data. This requires several +datasets to be aggregated in the h5_dsets input: system_capacity, +fixed_charge_rate, capital_cost, fixed_operating_cost, +and variable_operating_cost.

  • +
+
+
Returns:
+

summary (dict) – Dictionary of summary outputs for this sc point.

+
+
+
+ +
+
+property summary
+

Supply curve point’s meta data summary

+
+
Returns:
+

pandas.Series – List of supply curve point’s meta data

+
+
+
+ +
+
+property timezone
+

Get the SC point timezone based on the resource meta data.

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.points.SupplyCurvePoint.html b/_autosummary/reV.supply_curve.points.SupplyCurvePoint.html new file mode 100644 index 000000000..ca9f5d1e5 --- /dev/null +++ b/_autosummary/reV.supply_curve.points.SupplyCurvePoint.html @@ -0,0 +1,1079 @@ + + + + + + + reV.supply_curve.points.SupplyCurvePoint — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.points.SupplyCurvePoint

+
+
+class SupplyCurvePoint(gid, excl, tm_dset, excl_dict=None, inclusion_mask=None, resolution=64, excl_area=None, exclusion_shape=None, close=True)[source]
+

Bases: AbstractSupplyCurvePoint

+

Generic single SC point based on exclusions, resolution, and techmap

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | list | tuple | ExclusionMask) – Filepath(s) to exclusions h5 or ExclusionMask file handler.

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • inclusion_mask (np.ndarray) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. The shape of this will be checked against the input +resolution.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • excl_area (float | None, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + + + + + + + + + + + + + +

agg_data_layers(summary, data_layers)

Perform additional data layer aggregation.

aggregate(arr)

Calc sum (aggregation) of the resource data.

close()

Close all file handlers.

exclusion_weighted_mean(arr[, drop_nan])

Calc the exclusions-weighted mean value of an array of resource data.

get_agg_slices(gid, shape, resolution)

Get the row, col slices of an aggregation gid.

mean_wind_dirs(arr)

Calc the mean wind directions at every time-step

sc_mean(gid, excl, tm_dset, data[, ...])

Compute exclusions weight mean for the sc point from data

sc_sum(gid, excl, tm_dset, data[, ...])

Compute the aggregate (sum) of data for the sc point

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

area

Get the non-excluded resource area of the supply curve point in the current resource class.

bool_mask

Get a boolean inclusion mask (True if excl point is not excluded).

centroid

Get the supply curve point centroid coordinate.

cols

Get the cols of the exclusions layer associated with this SC point.

exclusions

Get the exclusions object.

gid

supply curve point gid

h5

placeholder for h5 Resource handler object

include_mask

[0, 1] where 1 is included and 0 is excluded).

include_mask_flat

Get the flattened inclusion mask (normalized with expected range: [0, 1] where 1 is included and 0 is excluded).

latitude

Get the SC point latitude

longitude

Get the SC point longitude

n_gids

Get the total number of not fully excluded pixels associated with the available resource/generation gids at the given sc gid.

pixel_area

The area in km2 of a single exclusion pixel.

resolution

Get the supply curve grid aggregation resolution

rows

Get the rows of the exclusions layer associated with this SC point.

sc_point_gid

Supply curve point gid

summary

Placeholder for Supply curve point's meta data summary

+
+
+close()[source]
+

Close all file handlers.

+
+ +
+
+property exclusions
+

Get the exclusions object.

+
+
Returns:
+

_excls (ExclusionMask) – ExclusionMask h5 handler object.

+
+
+
+ +
+
+property centroid
+

Get the supply curve point centroid coordinate.

+
+
Returns:
+

centroid (tuple) – SC point centroid (lat, lon).

+
+
+
+ +
+
+property pixel_area
+

The area in km2 of a single exclusion pixel. If this value was not +provided on initialization, it is determined from the profile of the +exclusion file.

+
+
Returns:
+

float

+
+
+
+ +
+
+property area
+

Get the non-excluded resource area of the supply curve point in the +current resource class.

+
+
Returns:
+

area (float) – Non-excluded resource/generation area in square km.

+
+
+
+ +
+
+property latitude
+

Get the SC point latitude

+
+ +
+
+property longitude
+

Get the SC point longitude

+
+ +
+
+property n_gids
+

Get the total number of not fully excluded pixels associated with the +available resource/generation gids at the given sc gid.

+
+
Returns:
+

n_gids (list)

+
+
+
+ +
+
+property include_mask
+

[0, 1] +where 1 is included and 0 is excluded).

+
+
Returns:
+

np.ndarray

+
+
Type:
+

Get the 2D inclusion mask (normalized with expected range

+
+
+
+ +
+
+property include_mask_flat
+

Get the flattened inclusion mask (normalized with expected +range: [0, 1] where 1 is included and 0 is excluded).

+
+
Returns:
+

np.ndarray

+
+
+
+ +
+
+property bool_mask
+

Get a boolean inclusion mask (True if excl point is not excluded).

+
+
Returns:
+

mask (np.ndarray) – Mask with length equal to the flattened exclusion shape

+
+
+
+ +
+
+property h5
+

placeholder for h5 Resource handler object

+
+ +
+
+property summary
+

Placeholder for Supply curve point’s meta data summary

+
+ +
+
+exclusion_weighted_mean(arr, drop_nan=True)[source]
+

Calc the exclusions-weighted mean value of an array of resource data.

+
+
Parameters:
+
    +
  • arr (np.ndarray) – Array of resource data.

  • +
  • drop_nan (bool) – Flag to drop nan values from the mean calculation (only works for +1D arr input, profiles should not have NaN’s)

  • +
+
+
Returns:
+

mean (float | np.ndarray) – Mean of arr masked by the binary exclusions then weighted by +the non-zero exclusions. This will be a 1D numpy array if the +input data is a 2D numpy array (averaged along axis=1)

+
+
+
+ +
+
+mean_wind_dirs(arr)[source]
+

Calc the mean wind directions at every time-step

+
+
Parameters:
+

arr (np.ndarray) – Array of wind direction data.

+
+
Returns:
+

mean_wind_dirs (np.ndarray | float) – Mean wind direction of arr masked by the binary exclusions

+
+
+
+ +
+
+aggregate(arr)[source]
+

Calc sum (aggregation) of the resource data.

+
+
Parameters:
+

arr (np.ndarray) – Array of resource data.

+
+
Returns:
+

agg (float) – Sum of arr masked by the binary exclusions

+
+
+
+ +
+
+classmethod sc_mean(gid, excl, tm_dset, data, excl_dict=None, resolution=64, exclusion_shape=None, close=True)[source]
+

Compute exclusions weight mean for the sc point from data

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • data (ndarray | ResourceDataset) – Array of data or open dataset handler to apply exclusions too

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit

  • +
+
+
Returns:
+

ndarray – Exclusions weighted means of data for supply curve point

+
+
+
+ +
+
+classmethod sc_sum(gid, excl, tm_dset, data, excl_dict=None, resolution=64, exclusion_shape=None, close=True)[source]
+

Compute the aggregate (sum) of data for the sc point

+
+
Parameters:
+
    +
  • gid (int) – gid for supply curve point to analyze.

  • +
  • excl (str | ExclusionMask) – Filepath to exclusions h5 or ExclusionMask file handler.

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • data (ndarray | ResourceDataset) – Array of data or open dataset handler to apply exclusions too

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +None if excl input is pre-initialized.

  • +
  • resolution (int) – Number of exclusion points per SC point along an axis. +This number**2 is the total number of exclusion points per +SC point.

  • +
  • exclusion_shape (tuple) – Shape of the full exclusions extent (rows, cols). Inputing this +will speed things up considerably.

  • +
  • close (bool) – Flag to close object file handlers on exit.

  • +
+
+
Returns:
+

ndarray – Sum / aggregation of data for supply curve point

+
+
+
+ +
+
+agg_data_layers(summary, data_layers)[source]
+

Perform additional data layer aggregation. If there is no valid data +in the included area, the data layer will be taken from the full SC +point extent (ignoring exclusions). If there is still no valid data, +a warning will be raised and the data layer will have a NaN/None value.

+
+
Parameters:
+
    +
  • summary (dict) – Dictionary of summary outputs for this sc point.

  • +
  • data_layers (None | dict) – Aggregation data layers. Must be a dictionary keyed by data label +name. Each value must be another dictionary with “dset”, “method”, +and “fpath”.

  • +
+
+
Returns:
+

summary (dict) – Dictionary of summary outputs for this sc point. A new entry for +each data layer is added.

+
+
+
+ +
+
+property cols
+

Get the cols of the exclusions layer associated with this SC point.

+
+
Returns:
+

cols (slice) – Column slice to index the high-res layer (exclusions layer) for the +gid in the agg layer (supply curve layer).

+
+
+
+ +
+
+static get_agg_slices(gid, shape, resolution)
+

Get the row, col slices of an aggregation gid.

+
+
Parameters:
+
    +
  • gid (int) – Gid of interest in the aggregated layer.

  • +
  • shape (tuple) – (row, col) shape tuple of the underlying high-res layer.

  • +
  • resolution (int) – Resolution of the aggregation: number of pixels in 1D being +aggregated.

  • +
+
+
Returns:
+

    +
  • row_slice (slice) – Row slice to index the high-res layer for the gid in the agg layer.

  • +
  • col_slice (slice) – Col slice to index the high-res layer for the gid in the agg layer.

  • +
+

+
+
+
+ +
+
+property gid
+

supply curve point gid

+
+ +
+
+property resolution
+

Get the supply curve grid aggregation resolution

+
+ +
+
+property rows
+

Get the rows of the exclusions layer associated with this SC point.

+
+
Returns:
+

rows (slice) – Row slice to index the high-res layer (exclusions layer) for the +gid in the agg layer (supply curve layer).

+
+
+
+ +
+
+property sc_point_gid
+

Supply curve point gid

+
+
Returns:
+

int

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.points.html b/_autosummary/reV.supply_curve.points.html new file mode 100644 index 000000000..859956e32 --- /dev/null +++ b/_autosummary/reV.supply_curve.points.html @@ -0,0 +1,648 @@ + + + + + + + reV.supply_curve.points — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.points

+

reV supply curve points frameworks.

+

Classes

+ + + + + + + + + + + + + + + +

AbstractSupplyCurvePoint(gid, exclusion_shape)

Abstract SC point based on only the point gid, SC shape, and resolution.

AggregationSupplyCurvePoint(gid, excl, ...)

Generic single SC point to aggregate data from an h5 file.

GenerationSupplyCurvePoint(gid, excl, gen, ...)

Supply curve point summary framework that ties a reV SC point to its respective generation and resource data.

SupplyCurvePoint(gid, excl, tm_dset[, ...])

Generic single SC point based on exclusions, resolution, and techmap

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.html b/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.html new file mode 100644 index 000000000..7a6e24359 --- /dev/null +++ b/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.html @@ -0,0 +1,777 @@ + + + + + + + reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler

+
+
+class SupplyCurveAggFileHandler(excl_fpath, gen_fpath, econ_fpath=None, data_layers=None, power_density=None, excl_dict=None, friction_fpath=None, friction_dset=None, area_filter_kernel='queen', min_area=None)[source]
+

Bases: AbstractAggFileHandler

+

Framework to handle aggregation summary context managers: +- exclusions .h5 file +- generation .h5 file +- econ .h5 file (optional) +- friction surface .h5 file (optional) +- variable power density .csv (optional)

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • gen_fpath (str) – Filepath to .h5 reV generation output results.

  • +
  • econ_fpath (str | None) – Filepath to .h5 reV econ output results. This is optional and only +used if the lcoe_dset is not present in the gen_fpath file.

  • +
  • data_layers (None | dict) – Aggregation data layers. Must be a dictionary keyed by data label +name. Each value must be another dictionary with “dset”, “method”, +and “fpath”.

  • +
  • power_density (float | str | None) – Power density in MW/km2 or filepath to variable power +density file. None will attempt to infer a constant +power density from the generation meta data technology. +Variable power density csvs must have “gid” and “power_density” +columns where gid is the resource gid (typically wtk or nsrdb gid) +and the power_density column is in MW/km2.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class.

  • +
  • friction_fpath (str | None) – Filepath to friction surface data (cost based exclusions). +Must be paired with friction_dset. The friction data must be the +same shape as the exclusions. Friction input creates a new output +“mean_lcoe_friction” which is the nominal LCOE multiplied by the +friction data.

  • +
  • friction_dset (str | None) – Dataset name in friction_fpath for the friction surface data. +Must be paired with friction_fpath. Must be same shape as +exclusions.

  • +
  • area_filter_kernel (str) – Contiguous area filter method to use on final exclusions mask

  • +
  • min_area (float | None) – Minimum required contiguous area filter in sq-km

  • +
+
+
+

Methods

+ + + + + + +

close()

Close all file handlers.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + +

data_layers

Get the data layers object.

exclusions

Get the exclusions file handler object.

friction_layer

Get the friction layer (cost based exclusions).

gen

Get the gen file handler object.

h5

Placeholder for h5 Resource handler

power_density

Get the power density object.

+
+
+close()[source]
+

Close all file handlers.

+
+ +
+
+property gen
+

Get the gen file handler object.

+
+
Returns:
+

_gen (Outputs) – reV gen outputs handler object.

+
+
+
+ +
+
+property data_layers
+

Get the data layers object.

+
+
Returns:
+

_data_layers (dict) – Data layers namespace.

+
+
+
+ +
+
+property power_density
+

Get the power density object.

+
+
Returns:
+

_power_density (float | None | pd.DataFrame) – Constant power density float, None, or opened dataframe with +(resource) “gid” and “power_density columns”.

+
+
+
+ +
+
+property friction_layer
+

Get the friction layer (cost based exclusions).

+
+
Returns:
+

friction_layer (None | FrictionMask) – Friction layer with scalar friction values if valid friction inputs +were entered. Otherwise, None to not apply friction layer.

+
+
+
+ +
+
+property exclusions
+

Get the exclusions file handler object.

+
+
Returns:
+

_excl (ExclusionMask) – Exclusions h5 handler object.

+
+
+
+ +
+
+property h5
+

Placeholder for h5 Resource handler

+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggregation.html b/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggregation.html new file mode 100644 index 000000000..01dbad6e4 --- /dev/null +++ b/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggregation.html @@ -0,0 +1,1261 @@ + + + + + + + reV.supply_curve.sc_aggregation.SupplyCurveAggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.sc_aggregation.SupplyCurveAggregation

+
+
+class SupplyCurveAggregation(excl_fpath, tm_dset, econ_fpath=None, excl_dict=None, area_filter_kernel='queen', min_area=None, resolution=64, excl_area=None, gids=None, pre_extract_inclusions=False, res_class_dset=None, res_class_bins=None, cf_dset='cf_mean-means', lcoe_dset='lcoe_fcr-means', h5_dsets=None, data_layers=None, power_density=None, friction_fpath=None, friction_dset=None, cap_cost_scale=None, recalc_lcoe=True)[source]
+

Bases: BaseAggregation

+

reV supply curve points aggregation framework.

+

reV supply curve aggregation combines a high-resolution +(e.g. 90m) exclusion dataset with a (typically) lower resolution +(e.g. 2km) generation dataset by mapping all data onto the high- +resolution grid and aggregating it by a large factor (e.g. 64 or +128). The result is coarsely-gridded data that summarizes +capacity and generation potential as well as associated +economics under a particular land access scenario. This module +can also summarize extra data layers during the aggregation +process, allowing for complementary land characterization +analysis.

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions data HDF5 file. The exclusions HDF5 +file should contain the layers specified in excl_dict +and data_layers. These layers may also be spread out +across multiple HDF5 files, in which case this input should +be a list or tuple of filepaths pointing to the files +containing the layers. Note that each data layer must be +uniquely defined (i.e.only appear once and in a single +input file).

  • +
  • tm_dset (str) – Dataset name in the excl_fpath file containing the +techmap (exclusions-to-resource mapping data). This data +layer links the supply curve GID’s to the generation GID’s +that are used to evaluate performance metrics such as +mean_cf.

    +
    +

    Important

    +

    This dataset uniquely couples the (typically +high-resolution) exclusion layers to the (typically +lower-resolution) resource data. Therefore, a separate +techmap must be used for every unique combination of +resource and exclusion coordinates.

    +
    +
    +

    Note

    +

    If executing reV from the command line, you +can specify a name that is not in the exclusions HDF5 +file, and reV will calculate the techmap for you. Note +however that computing the techmap and writing it to the +exclusion HDF5 file is a blocking operation, so you may +only run a single reV aggregation step at a time this +way.

    +
    +
  • +
  • econ_fpath (str, optional) – Filepath to HDF5 file with reV econ output results +containing an lcoe_dset dataset. If None, lcoe_dset +should be a dataset in the gen_fpath HDF5 file that +aggregation is executed on.

    +
    +

    Note

    +

    If executing reV from the command line, this +input can be set to "PIPELINE" to parse the output +from one of these preceding pipeline steps: +multi-year, collect, or generation. However, +note that duplicate executions of any of these commands +within the pipeline may invalidate this parsing, meaning +the econ_fpath input will have to be specified manually.

    +
    +

    By default, None.

    +
  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arguments of the format +{layer_dset_name: {kwarg: value}}, where +layer_dset_name is a dataset in the exclusion h5 file +and the kwarg: value pair is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class. +For example:

    +
    excl_dict = {
    +    "typical_exclusion": {
    +        "exclude_values": 255,
    +    },
    +    "another_exclusion": {
    +        "exclude_values": [2, 3],
    +        "weight": 0.5
    +    },
    +    "exclusion_with_nodata": {
    +        "exclude_range": [10, 100],
    +        "exclude_nodata": True,
    +        "nodata_value": -1
    +    },
    +    "partial_setback": {
    +        "use_as_weights": True
    +    },
    +    "height_limit": {
    +        "exclude_range": [0, 200]
    +    },
    +    "slope": {
    +        "include_range": [0, 20]
    +    },
    +    "developable_land": {
    +        "force_include_values": 42
    +    },
    +    "more_developable_land": {
    +        "force_include_range": [5, 10]
    +    },
    +    ...
    +}
    +
    +
    +

    Note that all the keys given in this dictionary should be +datasets of the excl_fpath file. If None or empty +dictionary, no exclusions are applied. By default, None.

    +
  • +
  • area_filter_kernel ({“queen”, “rook”}, optional) – Contiguous area filter method to use on final exclusions +mask. The filters are defined as:

    +
    # Queen:     # Rook:
    +[[1,1,1],    [[0,1,0],
    + [1,1,1],     [1,1,1],
    + [1,1,1]]     [0,1,0]]
    +
    +
    +

    These filters define how neighboring pixels are “connected”. +Once pixels in the final exclusion layer are connected, the +area of each resulting cluster is computed and compared +against the min_area input. Any cluster with an area +less than min_area is excluded from the final mask. +This argument has no effect if min_area is None. +By default, "queen".

    +
  • +
  • min_area (float, optional) – Minimum area (in km2) required to keep an isolated +cluster of (included) land within the resulting exclusions +mask. Any clusters of land with areas less than this value +will be marked as exclusions. See the documentation for +area_filter_kernel for an explanation of how the area of +each land cluster is computed. If None, no area +filtering is performed. By default, None.

  • +
  • resolution (int, optional) – Supply Curve resolution. This value defines how many pixels +are in a single side of a supply curve cell. For example, +a value of 64 would generate a supply curve where the +side of each supply curve cell is 64x64 exclusion +pixels. By default, 64.

  • +
  • excl_area (float, optional) – Area of a single exclusion mask pixel (in km2). +If None, this value will be inferred from the profile +transform attribute in excl_fpath. By default, None.

  • +
  • gids (list, optional) – List of supply curve point gids to get summary for. If you +would like to obtain all available reV supply curve +points to run, you can use the +reV.supply_curve.extent.SupplyCurveExtent class +like so:

    +
    import pandas as pd
    +from reV.supply_curve.extent import SupplyCurveExtent
    +
    +excl_fpath = "..."
    +resolution = ...
    +with SupplyCurveExtent(excl_fpath, resolution) as sc:
    +    gids = sc.valid_sc_points(tm_dset).tolist()
    +...
    +
    +
    +

    If None, supply curve aggregation is computed for all +gids in the supply curve extent. By default, None.

    +
  • +
  • pre_extract_inclusions (bool, optional) – Optional flag to pre-extract/compute the inclusion mask from +the excl_dict input. It is typically faster to compute +the inclusion mask on the fly with parallel workers. +By default, False.

  • +
  • res_class_dset (str, optional) – Name of dataset in the reV generation HDF5 output file +containing resource data. If None, no aggregated +resource classification is performed (i.e. no mean_res +output), and the res_class_bins is ignored. +By default, None.

  • +
  • res_class_bins (list, optional) – Optional input to perform separate aggregations for various +resource data ranges. If None, only a single aggregation +per supply curve point is performed. Otherwise, this input +should be a list of floats or ints representing the resource +bin boundaries. One aggregation per resource value range is +computed, and only pixels within the given resource range +are aggregated. By default, None.

  • +
  • cf_dset (str, optional) – Dataset name from the reV generation HDF5 output file +containing capacity factor mean values. +By default, "cf_mean-means".

  • +
  • lcoe_dset (str, optional) – Dataset name from the reV generation HDF5 output file +containing LCOE mean values. +By default, "lcoe_fcr-means".

  • +
  • h5_dsets (list, optional) – Optional list of additional datasets from the reV +generation/econ HDF5 output file to aggregate. If None, +no extra datasets are aggregated. By default, None.

  • +
  • data_layers (dict, optional) –

    +

    Dictionary of aggregation data layers of the format:

    +
    data_layers = {
    +    "output_layer_name": {
    +        "dset": "layer_name",
    +        "method": "mean",
    +        "fpath": "/path/to/data.h5"
    +    },
    +    "another_output_layer_name": {
    +        "dset": "input_layer_name",
    +        "method": "mode",
    +        # optional "fpath" key omitted
    +    },
    +    ...
    +}
    +
    +
    +

    The "output_layer_name" is the column name under which +the aggregated data will appear in the output CSV file. The +"output_layer_name" does not have to match the dset +input value. The latter should match the layer name in the +HDF5 from which the data to aggregate should be pulled. The +method should be one of +{"mode", "mean", "min", "max", "sum", "category"}, +describing how the high-resolution data should be aggregated +for each supply curve point. fpath is an optional key +that can point to an HDF5 file containing the layer data. If +left out, the data is assumed to exist in the file(s) +specified by the excl_fpath input. If None, no data +layer aggregation is performed. By default, None

    +
  • +
  • power_density (float | str, optional) – Power density value (in MW/km2) or filepath to +variable power density CSV file containing the following +columns:

    +
    +
      +
    • gid : resource gid (typically wtk or nsrdb gid)

    • +
    • power_density : power density value (in +MW/km2)

    • +
    +
    +

    If None, a constant power density is inferred from the +generation meta data technology. By default, None.

    +
  • +
  • friction_fpath (str, optional) – Filepath to friction surface data (cost based exclusions). +Must be paired with the friction_dset input below. The +friction data must be the same shape as the exclusions. +Friction input creates a new output column +"mean_lcoe_friction" which is the nominal LCOE +multiplied by the friction data. If None, no friction +data is aggregated. By default, None.

  • +
  • friction_dset (str, optional) – Dataset name in friction_fpath for the friction surface +data. Must be paired with the friction_fpath above. If +None, no friction data is aggregated. +By default, None.

  • +
  • cap_cost_scale (str, optional) – Optional LCOE scaling equation to implement “economies of +scale”. Equations must be in python string format and must +return a scalar value to multiply the capital cost by. +Independent variables in the equation should match the names +of the columns in the reV supply curve aggregation +output table (see the documentation of +SupplyCurveAggregation +for details on available outputs). If None, no economies +of scale are applied. By default, None.

  • +
  • recalc_lcoe (bool, optional) – Flag to re-calculate the LCOE from the multi-year mean +capacity factor and annual energy production data. This +requires several datasets to be aggregated in the h5_dsets +input:

    +
    +
      +
    • system_capacity

    • +
    • fixed_charge_rate

    • +
    • capital_cost

    • +
    • fixed_operating_cost

    • +
    • variable_operating_cost

    • +
    +
    +

    By default, True.

    +
  • +
+
+
+

Examples

+

Standard outputs:

+
+
sc_gidint

Unique supply curve gid. This is the enumerated supply curve +points, which can have overlapping geographic locations due +to different resource bins at the same geographic SC point.

+
+
res_gidslist

Stringified list of resource gids (e.g. original WTK or +NSRDB resource GIDs) corresponding to each SC point.

+
+
gen_gidslist

Stringified list of generation gids (e.g. GID in the reV +generation output, which corresponds to the reV project +points and not necessarily the resource GIDs).

+
+
gid_countslist

Stringified list of the sum of inclusion scalar values +corresponding to each gen_gid and res_gid, where 1 is +included, 0 is excluded, and 0.7 is included with 70 percent +of available land. Each entry in this list is associated +with the corresponding entry in the gen_gids and +res_gids lists.

+
+
n_gidsint

Total number of included pixels. This is a boolean sum and +considers partial inclusions to be included (e.g. 1).

+
+
mean_cffloat

Mean capacity factor of each supply curve point (the +arithmetic mean is weighted by the inclusion layer) +(unitless).

+
+
mean_lcoefloat

Mean LCOE of each supply curve point (the arithmetic mean is +weighted by the inclusion layer). Units match the reV econ +output ($/MWh). By default, the LCOE is re-calculated using +the multi-year mean capacity factor and annual energy +production. This requires several datasets to be aggregated +in the h5_dsets input: fixed_charge_rate, +capital_cost, +fixed_operating_cost, annual_energy_production, and +variable_operating_cost. This recalc behavior can be +disabled by setting recalc_lcoe=False.

+
+
mean_resfloat

Mean resource, the resource dataset to average is provided +by the user in res_class_dset. The arithmetic mean is +weighted by the inclusion layer.

+
+
capacityfloat

Total capacity of each supply curve point (MW). Units are +contingent on the power_density input units of MW/km2.

+
+
area_sq_kmfloat

Total included area for each supply curve point in km2. This +is based on the nominal area of each exclusion pixel which +by default is calculated from the exclusion profile +attributes. The NREL reV default is 0.0081 km2 pixels +(90m x 90m). The area sum considers partial inclusions.

+
+
latitudefloat

Supply curve point centroid latitude coordinate, in degrees +(does not consider exclusions).

+
+
longitudefloat

Supply curve point centroid longitude coordinate, in degrees +(does not consider exclusions).

+
+
countrystr

Country of the supply curve point based on the most common +country of the associated resource meta data. Does not +consider exclusions.

+
+
statestr

State of the supply curve point based on the most common +state of the associated resource meta data. Does not +consider exclusions.

+
+
countystr

County of the supply curve point based on the most common +county of the associated resource meta data. Does not +consider exclusions.

+
+
elevationfloat

Mean elevation of the supply curve point based on the mean +elevation of the associated resource meta data. Does not +consider exclusions.

+
+
timezoneint

UTC offset of local timezone based on the most common +timezone of the associated resource meta data. Does not +consider exclusions.

+
+
sc_point_gidint

Spatially deterministic supply curve point gid. Duplicate +sc_point_gid values can exist due to resource binning.

+
+
sc_row_indint

Row index of the supply curve point in the aggregated +exclusion grid.

+
+
sc_col_indint

Column index of the supply curve point in the aggregated +exclusion grid

+
+
res_classint

Resource class for the supply curve gid. Each geographic +supply curve point (sc_point_gid) can have multiple +resource classes associated with it, resulting in multiple +supply curve gids (sc_gid) associated with the same +spatially deterministic supply curve point.

+
+
+

Optional outputs:

+
+
mean_frictionfloat

Mean of the friction data provided in ‘friction_fpath’ and +‘friction_dset’. The arithmetic mean is weighted by boolean +inclusions and considers partial inclusions to be included.

+
+
mean_lcoe_frictionfloat

Mean of the nominal LCOE multiplied by mean_friction value.

+
+
mean_{dset}float

Mean input h5 dataset(s) provided by the user in ‘h5_dsets’. +These mean calculations are weighted by the partial +inclusion layer.

+
+
data_layersfloat | int | str | dict

Requested data layer aggregations, each data layer must be +the same shape as the exclusion layers.

+
+
    +
  • +
    mode: int | str

    Most common value of a given data layer after +applying the boolean inclusion mask.

    +
    +
    +
  • +
  • +
    meanfloat

    Arithmetic mean value of a given data layer weighted +by the scalar inclusion mask (considers partial +inclusions).

    +
    +
    +
  • +
  • +
    minfloat | int

    Minimum value of a given data layer after applying +the boolean inclusion mask.

    +
    +
    +
  • +
  • +
    maxfloat | int

    Maximum value of a given data layer after applying +the boolean inclusion mask.

    +
    +
    +
  • +
  • +
    sumfloat

    Sum of a given data layer weighted by the scalar +inclusion mask (considers partial inclusions).

    +
    +
    +
  • +
  • +
    categorydict

    Dictionary mapping the unique values in the +data_layer to the sum of inclusion scalar values +associated with all pixels with that unique value.

    +
    +
    +
  • +
+
+
+
+

Methods

+ + + + + + + + + + + + + + + +

run(out_fpath[, gen_fpath, res_fpath, args, ...])

Run a supply curve aggregation.

run_parallel(gen_fpath[, args, max_workers, ...])

Get the supply curve points aggregation summary using futures.

run_serial(excl_fpath, gen_fpath, tm_dset, ...)

Standalone method to create agg summary - can be parallelized.

summarize(gen_fpath[, args, max_workers, ...])

Get the supply curve points aggregation summary

+

Attributes

+ + + + + + + + + +

gids

1D array of supply curve point gids to aggregate

shape

Get the shape of the full exclusions raster.

+
+
+classmethod run_serial(excl_fpath, gen_fpath, tm_dset, gen_index, econ_fpath=None, excl_dict=None, inclusion_mask=None, area_filter_kernel='queen', min_area=None, resolution=64, gids=None, args=None, res_class_dset=None, res_class_bins=None, cf_dset='cf_mean-means', lcoe_dset='lcoe_fcr-means', h5_dsets=None, data_layers=None, power_density=None, friction_fpath=None, friction_dset=None, excl_area=None, cap_cost_scale=None, recalc_lcoe=True)[source]
+

Standalone method to create agg summary - can be parallelized.

+
+
Parameters:
+
    +
  • excl_fpath (str | list | tuple) – Filepath to exclusions h5 with techmap dataset +(can be one or more filepaths).

  • +
  • gen_fpath (str) – Filepath to .h5 reV generation output results.

  • +
  • tm_dset (str) – Dataset name in the exclusions file containing the +exclusions-to-resource mapping data.

  • +
  • gen_index (np.ndarray) – Array of generation gids with array index equal to resource gid. +Array value is -1 if the resource index was not used in the +generation run.

  • +
  • econ_fpath (str | None) – Filepath to .h5 reV econ output results. This is optional and only +used if the lcoe_dset is not present in the gen_fpath file.

  • +
  • excl_dict (dict | None) – Dictionary of exclusion keyword arugments of the format +{layer_dset_name: {kwarg: value}} where layer_dset_name is a +dataset in the exclusion h5 file and kwarg is a keyword argument to +the reV.supply_curve.exclusions.LayerMask class.

  • +
  • inclusion_mask (np.ndarray | dict | optional) – 2D array pre-extracted inclusion mask where 1 is included and 0 is +excluded. This must be either match the full exclusion shape or +be a dict lookup of single-sc-point exclusion masks corresponding +to the gids input and keyed by gids, by default None which will +calculate exclusions on the fly for each sc point.

  • +
  • area_filter_kernel (str) – Contiguous area filter method to use on final exclusions mask

  • +
  • min_area (float | None) – Minimum required contiguous area filter in sq-km

  • +
  • resolution (int | None) – SC resolution, must be input in combination with gid. Prefered +option is to use the row/col slices to define the SC point instead.

  • +
  • gids (list | None) – List of supply curve point gids to get summary for (can use to +subset if running in parallel), or None for all gids in the SC +extent, by default None

  • +
  • args (list | None) – List of positional args for sc_point_method

  • +
  • res_class_dset (str | None) – Dataset in the generation file dictating resource classes. +None if no resource classes.

  • +
  • res_class_bins (list | None) – List of two-entry lists dictating the resource class bins. +None if no resource classes.

  • +
  • cf_dset (str) – Dataset name from f_gen containing capacity factor mean values.

  • +
  • lcoe_dset (str) – Dataset name from f_gen containing LCOE mean values.

  • +
  • h5_dsets (list | None) – Optional list of additional datasets from the source h5 gen/econ +files to aggregate.

  • +
  • data_layers (None | dict) – Aggregation data layers. Must be a dictionary keyed by data label +name. Each value must be another dictionary with “dset”, “method”, +and “fpath”.

  • +
  • power_density (float | str | None) – Power density in MW/km2 or filepath to variable power +density file. None will attempt to infer a constant +power density from the generation meta data technology. +Variable power density csvs must have “gid” and “power_density” +columns where gid is the resource gid (typically wtk or nsrdb gid) +and the power_density column is in MW/km2.

  • +
  • friction_fpath (str | None) – Filepath to friction surface data (cost based exclusions). +Must be paired with friction_dset. The friction data must be the +same shape as the exclusions. Friction input creates a new output +“mean_lcoe_friction” which is the nominal LCOE multiplied by the +friction data.

  • +
  • friction_dset (str | None) – Dataset name in friction_fpath for the friction surface data. +Must be paired with friction_fpath. Must be same shape as +exclusions.

  • +
  • excl_area (float | None, optional) – Area of an exclusion pixel in km2. None will try to infer the area +from the profile transform attribute in excl_fpath, by default None

  • +
  • cap_cost_scale (str | None) – Optional LCOE scaling equation to implement “economies of scale”. +Equations must be in python string format and return a scalar +value to multiply the capital cost by. Independent variables in +the equation should match the names of the columns in the reV +supply curve aggregation table.

  • +
  • recalc_lcoe (bool) – Flag to re-calculate the LCOE from the multi-year mean capacity +factor and annual energy production data. This requires several +datasets to be aggregated in the h5_dsets input: system_capacity, +fixed_charge_rate, capital_cost, fixed_operating_cost, +and variable_operating_cost.

  • +
+
+
Returns:
+

summary (list) – List of dictionaries, each being an SC point summary.

+
+
+
+ +
+
+run_parallel(gen_fpath, args=None, max_workers=None, sites_per_worker=100)[source]
+

Get the supply curve points aggregation summary using futures.

+
+
Parameters:
+
    +
  • gen_fpath (str) – Filepath to .h5 reV generation output results.

  • +
  • args (tuple | list | None) – List of summary arguments to include. None defaults to all +available args defined in the class attr.

  • +
  • max_workers (int | None, optional) – Number of cores to run summary on. None is all +available cpus, by default None

  • +
  • sites_per_worker (int) – Number of sc_points to summarize on each worker, by default 100

  • +
+
+
Returns:
+

summary (list) – List of dictionaries, each being an SC point summary.

+
+
+
+ +
+
+property gids
+

1D array of supply curve point gids to aggregate

+
+
Returns:
+

ndarray

+
+
+
+ +
+
+property shape
+

Get the shape of the full exclusions raster.

+
+
Returns:
+

tuple

+
+
+
+ +
+
+summarize(gen_fpath, args=None, max_workers=None, sites_per_worker=100)[source]
+

Get the supply curve points aggregation summary

+
+
Parameters:
+
    +
  • gen_fpath (str) – Filepath to .h5 reV generation output results.

  • +
  • args (tuple | list | None) – List of summary arguments to include. None defaults to all +available args defined in the class attr.

  • +
  • max_workers (int | None, optional) – Number of cores to run summary on. None is all +available cpus, by default None

  • +
  • sites_per_worker (int) – Number of sc_points to summarize on each worker, by default 100

  • +
+
+
Returns:
+

summary (list) – List of dictionaries, each being an SC point summary.

+
+
+
+ +
+
+run(out_fpath, gen_fpath=None, res_fpath=None, args=None, max_workers=None, sites_per_worker=100)[source]
+

Run a supply curve aggregation.

+
+
Parameters:
+
    +
  • gen_fpath (str, optional) – Filepath to HDF5 file with reV generation output +results. If None, a simple aggregation without any +generation, resource, or cost data is performed.

    +
    +

    Note

    +

    If executing reV from the command line, this +input can be set to "PIPELINE" to parse the output +from one of these preceding pipeline steps: +multi-year, collect, or econ. However, note +that duplicate executions of any of these commands within +the pipeline may invalidate this parsing, meaning the +econ_fpath input will have to be specified manually.

    +
    +

    By default, None.

    +
  • +
  • res_fpath (str, optional) – Filepath to HDF5 resource file (e.g. WTK or NSRDB). This +input is required if techmap dset is to be created or if +gen_fpath is is None. By default, None.

  • +
  • args (tuple | list, optional) – List of columns to include in summary output table. None +defaults to all available args defined in the +SupplyCurveAggregation +documentation. By default, None.

  • +
  • max_workers (int, optional) – Number of cores to run summary on. None is all available +CPUs. By default, None.

  • +
  • sites_per_worker (int, optional) – Number of sc_points to summarize on each worker. +By default, 100.

  • +
+
+
Returns:
+

str – Path to output CSV file containing supply curve aggregation.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.sc_aggregation.html b/_autosummary/reV.supply_curve.sc_aggregation.html new file mode 100644 index 000000000..2f47c0e0d --- /dev/null +++ b/_autosummary/reV.supply_curve.sc_aggregation.html @@ -0,0 +1,644 @@ + + + + + + + reV.supply_curve.sc_aggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.sc_aggregation

+

reV supply curve aggregation framework.

+

Created on Fri Jun 21 13:24:31 2019

+

@author: gbuster

+

Classes

+ + + + + + + + + +

SupplyCurveAggFileHandler(excl_fpath, gen_fpath)

Framework to handle aggregation summary context managers: - exclusions .h5 file - generation .h5 file - econ .h5 file (optional) - friction surface .h5 file (optional) - variable power density .csv (optional)

SupplyCurveAggregation(excl_fpath, tm_dset)

reV supply curve points aggregation framework.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.supply_curve.SupplyCurve.html b/_autosummary/reV.supply_curve.supply_curve.SupplyCurve.html new file mode 100644 index 000000000..ff83dae44 --- /dev/null +++ b/_autosummary/reV.supply_curve.supply_curve.SupplyCurve.html @@ -0,0 +1,1013 @@ + + + + + + + reV.supply_curve.supply_curve.SupplyCurve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.supply_curve.SupplyCurve

+
+
+class SupplyCurve(sc_points, trans_table, sc_features=None, sc_capacity_col='capacity')[source]
+

Bases: object

+

reV LCOT calculation and SupplyCurve sorting class.

+

reV supply curve computes the transmission costs associated +with each supply curve point output by reV supply curve +aggregation. Transmission costs can either be computed +competitively (where total capacity remaining on the +transmission grid is tracked and updated after each new +connection) or non-competitively (where the cheapest connections +for each supply curve point are allowed regardless of the +remaining transmission grid capacity). In both cases, the +permutation of transmission costs between supply curve points +and transmission grid features should be computed using the +reVX Least Cost Transmission Paths +utility.

+
+
Parameters:
+
    +
  • sc_points (str | pandas.DataFrame) – Path to CSV or JSON or DataFrame containing supply curve +point summary. Can also be a filepath to a reV bespoke +HDF5 output file where the meta dataset has the same +format as the supply curve aggregation output.

    +
    +

    Note

    +

    If executing reV from the command line, this +input can also be "PIPELINE" to parse the output of +the previous pipeline step and use it as input to this +call. However, note that duplicate executions of any +preceding commands within the pipeline may invalidate this +parsing, meaning the sc_points input will have to be +specified manually.

    +
    +
  • +
  • trans_table (str | pandas.DataFrame | list) – Path to CSV or JSON or DataFrame containing supply curve +transmission mapping. This can also be a list of +transmission tables with different line voltage (capacity) +ratings. See the reVX Least Cost Transmission Paths +utility to generate these input tables.

  • +
  • sc_features (str | pandas.DataFrame, optional) – Path to CSV or JSON or DataFrame containing additional +supply curve features (e.g. transmission multipliers, +regions, etc.). These features will be merged to the +sc_points input table on ALL columns that both have in +common. If None, no extra supply curve features are +added. By default, None.

  • +
  • sc_capacity_col (str, optional) – Name of capacity column in trans_sc_table. The values in +this column determine the size of transmission lines built. +The transmission capital costs per MW and the reinforcement +costs per MW will be returned in terms of these capacity +values. Note that if this column != “capacity”, then +“capacity” must also be included in trans_sc_table since +those values match the “mean_cf” data (which is used to +calculate LCOT and Total LCOE). This input can be used to, +e.g., size transmission lines based on solar AC capacity ( +sc_capacity_col="capacity_ac"). By default, +"capacity".

  • +
+
+
+

Examples

+

Standard outputs in addition to the values provided in +sc_points, produced by +reV.supply_curve.sc_aggregation.SupplyCurveAggregation:

+
+
    +
  • +
    transmission_multiplierint | float

    Transmission cost multiplier that scales the line cost +but not the tie-in cost in the calculation of LCOT.

    +
    +
    +
  • +
  • +
    trans_gidint

    Unique transmission feature identifier that each supply +curve point was connected to.

    +
    +
    +
  • +
  • +
    trans_capacityfloat

    Total capacity (not available capacity) of the +transmission feature that each supply curve point was +connected to. Default units are MW.

    +
    +
    +
  • +
  • +
    trans_typestr

    Tranmission feature type that each supply curve point +was connected to (e.g. Transline, Substation).

    +
    +
    +
  • +
  • +
    trans_cap_cost_per_mwfloat

    Capital cost of connecting each supply curve point to +their respective transmission feature. This value +includes line cost with transmission_multiplier and the +tie-in cost. Default units are $/MW.

    +
    +
    +
  • +
  • +
    dist_kmfloat

    Distance in km from supply curve point to transmission +connection.

    +
    +
    +
  • +
  • +
    lcotfloat

    Levelized cost of connecting to transmission ($/MWh).

    +
    +
    +
  • +
  • +
    total_lcoefloat

    Total LCOE of each supply curve point (mean_lcoe + lcot) +($/MWh).

    +
    +
    +
  • +
  • +
    total_lcoe_frictionfloat

    Total LCOE of each supply curve point considering the +LCOE friction scalar from the aggregation step +(mean_lcoe_friction + lcot) ($/MWh).

    +
    +
    +
  • +
+
+

Methods

+ + + + + + + + + + + + + + + + + + +

add_sum_cols(table, sum_cols)

Add a summation column to table.

compute_total_lcoe(fcr[, ...])

Compute LCOT and total LCOE for all sc point to transmission feature connections

full_sort(fcr[, transmission_costs, ...])

run full supply curve sorting

run(out_fpath, fixed_charge_rate[, simple, ...])

Run Supply Curve Transmission calculations.

simple_sort(fcr[, transmission_costs, ...])

Run simple supply curve sorting that does not take into account available capacity

+
+
+compute_total_lcoe(fcr, transmission_costs=None, avail_cap_frac=1, line_limited=False, connectable=True, max_workers=None, consider_friction=True)[source]
+

Compute LCOT and total LCOE for all sc point to transmission feature +connections

+
+
Parameters:
+
    +
  • fcr (float) – Fixed charge rate, used to compute LCOT

  • +
  • transmission_costs (str | dict, optional) – Transmission feature costs to use with TransmissionFeatures +handler: line_tie_in_cost, line_cost, station_tie_in_cost, +center_tie_in_cost, sink_tie_in_cost, by default None

  • +
  • avail_cap_frac (int, optional) – Fraction of transmissions features capacity ‘ac_cap’ to make +available for connection to supply curve points, by default 1

  • +
  • line_limited (bool, optional) – Flag to have substation connection is limited by maximum capacity +of the attached lines, legacy method, by default False

  • +
  • connectable (bool, optional) – Flag to only compute tranmission capital cost if transmission +feature has enough available capacity, by default True

  • +
  • max_workers (int | NoneType, optional) – Number of workers to use to compute lcot, if > 1 run in parallel. +None uses all available cpu’s. by default None

  • +
  • consider_friction (bool, optional) – Flag to consider friction layer on LCOE when “mean_lcoe_friction” +is in the sc points input, by default True

  • +
+
+
+
+ +
+
+static add_sum_cols(table, sum_cols)[source]
+

Add a summation column to table.

+
+
Parameters:
+
    +
  • table (pd.DataFrame) – Supply curve table.

  • +
  • sum_cols (dict) – Mapping of new column label(s) to multiple column labels to sum. +Example: sum_col={‘total_cap_cost’: [‘cap_cost1’, ‘cap_cost2’]} +Which would add a new ‘total_cap_cost’ column which would be the +sum of ‘cap_cost1’ and ‘cap_cost2’ if they are present in table.

  • +
+
+
Returns:
+

table (pd.DataFrame) – Supply curve table with additional summation columns.

+
+
+
+ +
+
+full_sort(fcr, transmission_costs=None, avail_cap_frac=1, line_limited=False, connectable=True, max_workers=None, consider_friction=True, sort_on=None, columns=('trans_gid', 'trans_capacity', 'trans_type', 'trans_cap_cost_per_mw', 'dist_km', 'lcot', 'total_lcoe'), wind_dirs=None, n_dirs=2, downwind=False, offshore_compete=False)[source]
+

run full supply curve sorting

+
+
Parameters:
+
    +
  • fcr (float) – Fixed charge rate, used to compute LCOT

  • +
  • transmission_costs (str | dict, optional) – Transmission feature costs to use with TransmissionFeatures +handler: line_tie_in_cost, line_cost, station_tie_in_cost, +center_tie_in_cost, sink_tie_in_cost, by default None

  • +
  • avail_cap_frac (int, optional) – Fraction of transmissions features capacity ‘ac_cap’ to make +available for connection to supply curve points, by default 1

  • +
  • line_limited (bool, optional) – Flag to have substation connection is limited by maximum capacity +of the attached lines, legacy method, by default False

  • +
  • connectable (bool, optional) – Flag to only compute tranmission capital cost if transmission +feature has enough available capacity, by default True

  • +
  • max_workers (int | NoneType, optional) – Number of workers to use to compute lcot, if > 1 run in parallel. +None uses all available cpu’s. by default None

  • +
  • consider_friction (bool, optional) – Flag to consider friction layer on LCOE when “mean_lcoe_friction” +is in the sc points input, by default True

  • +
  • sort_on (str, optional) – Column label to sort the Supply Curve table on. This affects the +build priority - connections with the lowest value in this column +will be built first, by default None, which will use +total LCOE without any reinforcement costs as the sort value.

  • +
  • columns (list | tuple, optional) – Columns to preserve in output connections dataframe, +by default (‘trans_gid’, ‘trans_capacity’, ‘trans_type’, +‘trans_cap_cost_per_mw’, ‘dist_km’, ‘lcot’, ‘total_lcoe’)

  • +
  • wind_dirs (pandas.DataFrame | str, optional) – path to .csv or reVX.wind_dirs.wind_dirs.WindDirs output with +the neighboring supply curve point gids and power-rose value at +each cardinal direction, by default None

  • +
  • n_dirs (int, optional) – Number of prominent directions to use, by default 2

  • +
  • downwind (bool, optional) – Flag to remove downwind neighbors as well as upwind neighbors, +by default False

  • +
  • offshore_compete (bool, default) – Flag as to whether offshore farms should be included during +CompetitiveWindFarms, by default False

  • +
+
+
Returns:
+

supply_curve (pandas.DataFrame) – Updated sc_points table with transmission connections, LCOT +and LCOE+LCOT based on full supply curve connections

+
+
+
+ +
+
+simple_sort(fcr, transmission_costs=None, avail_cap_frac=1, max_workers=None, consider_friction=True, sort_on=None, columns=('trans_gid', 'trans_type', 'lcot', 'total_lcoe', 'dist_km', 'trans_cap_cost_per_mw'), wind_dirs=None, n_dirs=2, downwind=False, offshore_compete=False)[source]
+

Run simple supply curve sorting that does not take into account +available capacity

+
+
Parameters:
+
    +
  • fcr (float) – Fixed charge rate, used to compute LCOT

  • +
  • transmission_costs (str | dict, optional) – Transmission feature costs to use with TransmissionFeatures +handler: line_tie_in_cost, line_cost, station_tie_in_cost, +center_tie_in_cost, sink_tie_in_cost, by default None

  • +
  • avail_cap_frac (int, optional) – Fraction of transmissions features capacity ‘ac_cap’ to make +available for connection to supply curve points, by default 1

  • +
  • line_limited (bool, optional) – Flag to have substation connection is limited by maximum capacity +of the attached lines, legacy method, by default False

  • +
  • connectable (bool, optional) – Flag to only compute tranmission capital cost if transmission +feature has enough available capacity, by default True

  • +
  • max_workers (int | NoneType, optional) – Number of workers to use to compute lcot, if > 1 run in parallel. +None uses all available cpu’s. by default None

  • +
  • consider_friction (bool, optional) – Flag to consider friction layer on LCOE when “mean_lcoe_friction” +is in the sc points input, by default True

  • +
  • sort_on (str, optional) – Column label to sort the Supply Curve table on. This affects the +build priority - connections with the lowest value in this column +will be built first, by default None, which will use +total LCOE without any reinforcement costs as the sort value.

  • +
  • columns (list | tuple, optional) – Columns to preserve in output connections dataframe, +by default (‘trans_gid’, ‘trans_capacity’, ‘trans_type’, +‘trans_cap_cost_per_mw’, ‘dist_km’, ‘lcot’, ‘total_lcoe’)

  • +
  • wind_dirs (pandas.DataFrame | str, optional) – path to .csv or reVX.wind_dirs.wind_dirs.WindDirs output with +the neighboring supply curve point gids and power-rose value at +each cardinal direction, by default None

  • +
  • n_dirs (int, optional) – Number of prominent directions to use, by default 2

  • +
  • downwind (bool, optional) – Flag to remove downwind neighbors as well as upwind neighbors

  • +
  • offshore_compete (bool, default) – Flag as to whether offshore farms should be included during +CompetitiveWindFarms, by default False

  • +
+
+
Returns:
+

supply_curve (pandas.DataFrame) – Updated sc_points table with transmission connections, LCOT +and LCOE+LCOT based on simple supply curve connections

+
+
+
+ +
+
+run(out_fpath, fixed_charge_rate, simple=True, avail_cap_frac=1, line_limited=False, transmission_costs=None, consider_friction=True, sort_on=None, columns=('trans_gid', 'trans_type', 'trans_cap_cost_per_mw', 'dist_km', 'lcot', 'total_lcoe'), max_workers=None, competition=None)[source]
+

Run Supply Curve Transmission calculations.

+

Run full supply curve taking into account available capacity of +tranmission features when making connections.

+
+
Parameters:
+
    +
  • out_fpath (str) – Full path to output CSV file. Does not need to include file +ending - it will be added automatically if missing.

  • +
  • fixed_charge_rate (float) – Fixed charge rate, (in decimal form: 5% = 0.05). This value +is used to compute LCOT.

  • +
  • simple (bool, optional) – Option to run the simple sort (does not keep track of +capacity available on the existing transmission grid). If +False, a full transmission sort (where connections are +limited based on available transmission capacity) is run. +Note that the full transmission sort requires the +avail_cap_frac and line_limited inputs. +By default, True.

  • +
  • avail_cap_frac (int, optional) – This input has no effect if simple=True. Fraction of +transmissions features capacity ac_cap to make available +for connection to supply curve points. By default, 1.

  • +
  • line_limited (bool, optional) – This input has no effect if simple=True. Flag to have +substation connection limited by maximum capacity +of the attached lines. This is a legacy method. +By default, False.

  • +
  • transmission_costs (str | dict, optional) – Dictionary of transmission feature costs or path to JSON +file containing a dictionary of transmission feature costs. +These costs are used to compute transmission capital cost +if the input transmission tables do not have a +"trans_cap_cost" column (this input is ignored +otherwise). The dictionary must include:

    +
    +
      +
    • line_tie_in_cost

    • +
    • line_cost

    • +
    • station_tie_in_cost

    • +
    • center_tie_in_cost

    • +
    • sink_tie_in_cost

    • +
    +
    +

    By default, None.

    +
  • +
  • consider_friction (bool, optional) – Flag to add a new "total_lcoe_friction" column to the +supply curve output that contains the sum of the computed +"total_lcoe" value and the input +"mean_lcoe_friction" values. If "mean_lcoe_friction" +is not in the sc_points input, this option is ignored. +By default, True.

  • +
  • sort_on (str, optional) – Column label to sort the supply curve table on. This affects +the build priority when doing a “full” sort - connections +with the lowest value in this column will be built first. +For a “simple” sort, only connections with the lowest value +in this column will be considered. If None, the sort is +performed on the total LCOE without any reinforcement +costs added (this is typically what you want - it avoids +unrealistically long spur-line connections). +By default None.

  • +
  • columns (list | tuple, optional) – Columns to preserve in output supply curve dataframe. +By default, ('trans_gid', 'trans_type', +'trans_cap_cost_per_mw', 'dist_km', 'lcot', 'total_lcoe').

  • +
  • max_workers (int, optional) – Number of workers to use to compute LCOT. If > 1, +computation is run in parallel. If None, computation +uses all available CPU’s. By default, None.

  • +
  • competition (dict, optional) – Optional dictionary of arguments for competitive wind farm +exclusions, which removes supply curve points upwind (and +optionally downwind) of the lowest LCOE supply curves. +If None, no competition is applied. Otherwise, this +dictionary can have up to four keys:

    +
    +
      +
    • wind_dirs (required) : A path to a CSV file or +reVX ProminentWindDirections +output with the neighboring supply curve point gids +and power-rose values at each cardinal direction.

    • +
    • n_dirs (optional) : An integer representing the +number of prominent directions to use during wind farm +competition. By default, 2.

    • +
    • downwind (optional) : A flag indicating that +downwind neighbors should be removed in addition to +upwind neighbors during wind farm competition. +By default, False.

    • +
    • offshore_compete (optional) : A flag indicating +that offshore farms should be included during wind +farm competition. By default, False.

    • +
    +
    +

    By default None.

    +
  • +
+
+
Returns:
+

str – Path to output supply curve.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.supply_curve.html b/_autosummary/reV.supply_curve.supply_curve.html new file mode 100644 index 000000000..b2bd919e4 --- /dev/null +++ b/_autosummary/reV.supply_curve.supply_curve.html @@ -0,0 +1,641 @@ + + + + + + + reV.supply_curve.supply_curve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.supply_curve

+

reV supply curve module +- Calculation of LCOT +- Supply Curve creation

+

Classes

+ + + + + + +

SupplyCurve(sc_points, trans_table[, ...])

reV LCOT calculation and SupplyCurve sorting class.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.tech_mapping.TechMapping.html b/_autosummary/reV.supply_curve.tech_mapping.TechMapping.html new file mode 100644 index 000000000..bc2ba5882 --- /dev/null +++ b/_autosummary/reV.supply_curve.tech_mapping.TechMapping.html @@ -0,0 +1,793 @@ + + + + + + + reV.supply_curve.tech_mapping.TechMapping — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.tech_mapping.TechMapping

+
+
+class TechMapping(excl_fpath, res_fpath, sc_resolution=2560, dist_margin=1.05)[source]
+

Bases: object

+

Framework to create map between tech layer (exclusions), res, and gen

+
+
Parameters:
+
    +
  • excl_fpath (str) – Filepath to exclusions h5 file, must contain latitude and longitude +arrays to allow for mapping to resource points

  • +
  • res_fpath (str) – Filepath to .h5 resource file that we’re mapping to.

  • +
  • sc_resolution (int | None, optional) – Supply curve resolution, does not affect the exclusion to resource +(tech) mapping, but defines how many exclusion pixels are mapped +at a time, by default 2560

  • +
  • dist_margin (float, optional) – Extra margin to multiply times the computed distance between +neighboring resource points, by default 1.05

  • +
+
+
+

Methods

+ + + + + + + + + + + + + + + +

map_resource([max_workers, points_per_worker])

Map all resource gids to exclusion gids

map_resource_gids(gids, excl_fpath, ...)

Map exclusion gids to the resource meta.

run(excl_fpath, res_fpath[, dset, ...])

Run parallel mapping and save to h5 file.

save_tech_map(excl_fpath, dset, indices[, ...])

Save tech mapping indices and coordinates to an h5 output file.

+

Attributes

+ + + + + + +

distance_threshold

Get the upper bound on NN distance between excl and res points.

+
+
+property distance_threshold
+

Get the upper bound on NN distance between excl and res points.

+
+
Returns:
+

float – Estimate the distance between resource points. Calculated as half +of the diagonal between closest resource points, with desired +extra margin

+
+
+
+ +
+
+classmethod map_resource_gids(gids, excl_fpath, sc_row_indices, sc_col_indices, excl_row_slices, excl_col_slices, tree, dist_thresh)[source]
+

Map exclusion gids to the resource meta.

+
+
Parameters:
+
    +
  • gids (np.ndarray) – Supply curve gids with tech exclusion points to map to the +resource meta points.

  • +
  • excl_fpath (str) – Filepath to exclusions h5 file, must contain latitude and longitude +arrays to allow for mapping to resource points

  • +
  • sc_row_indices (list) – List of row indices in exclusion array for for every sc_point gid

  • +
  • sc_col_indices (list) – List of column indices in exclusion array for for every sc_point +gid

  • +
  • excl_row_slices (list) – List representing the supply curve points rows. Each list entry +contains the exclusion row slice that are included in the sc +point.

  • +
  • excl_col_slices (list) – List representing the supply curve points columns. Each list entry +contains the exclusion columns slice that are included in the sc +point.

  • +
  • tree (cKDTree) – cKDTree built from resource lat, lon coordinates

  • +
  • dist_tresh (float) – Estimate the distance between resource points. Calculated as half +of the diagonal between closest resource points, with an extra +5% margin

  • +
+
+
Returns:
+

ind (list) – List of arrays of index values from the NN. List entries correspond +to input gids.

+
+
+
+ +
+
+static save_tech_map(excl_fpath, dset, indices, distance_threshold=None, res_fpath=None, chunks=(128, 128))[source]
+

Save tech mapping indices and coordinates to an h5 output file.

+
+
Parameters:
+
    +
  • excl_fpath (str) – Filepath to exclusions h5 file to add techmap to as ‘dset’

  • +
  • dset (str) – Dataset name in fpath_out to save mapping results to.

  • +
  • indices (np.ndarray) – Index values of the NN resource point. -1 if no res point found. +2D integer array with shape equal to the exclusions extent shape.

  • +
  • distance_threshold (float) – Distance upper bound to save as attr.

  • +
  • res_fpath (str, optional) – Filepath to .h5 resource file that we’re mapping to, +by default None

  • +
  • chunks (tuple) – Chunk shape of the 2D output datasets.

  • +
+
+
+
+ +
+
+map_resource(max_workers=None, points_per_worker=10)[source]
+

Map all resource gids to exclusion gids

+
+
Parameters:
+
    +
  • max_workers (int, optional) – Number of cores to run mapping on. None uses all available cpus, +by default None

  • +
  • points_per_worker (int, optional) – Number of supply curve points to map to resource gids on each +worker, by default 10

  • +
+
+
Returns:
+

indices (np.ndarray) – Index values of the NN resource point. -1 if no res point found. +2D integer array with shape equal to the exclusions extent shape.

+
+
+
+ +
+
+classmethod run(excl_fpath, res_fpath, dset=None, sc_resolution=2560, dist_margin=1.05, max_workers=None, points_per_worker=10)[source]
+

Run parallel mapping and save to h5 file.

+
+
Parameters:
+
    +
  • excl_fpath (str) – Filepath to exclusions h5 (tech layer). dset will be +created in excl_fpath.

  • +
  • res_fpath (str) – Filepath to .h5 resource file that we’re mapping to.

  • +
  • dset (str, optional) – Dataset name in excl_fpath to save mapping results to, if None +do not save tech_map to excl_fpath, by default None

  • +
  • sc_resolution (int | None, optional) – Supply curve resolution, does not affect the exclusion to resource +(tech) mapping, but defines how many exclusion pixels are mapped +at a time, by default 2560

  • +
  • dist_margin (float, optional) – Extra margin to multiply times the computed distance between +neighboring resource points, by default 1.05

  • +
  • max_workers (int, optional) – Number of cores to run mapping on. None uses all available cpus, +by default None

  • +
  • points_per_worker (int, optional) – Number of supply curve points to map to resource gids on each +worker, by default 10

  • +
+
+
Returns:
+

indices (np.ndarray) – Index values of the NN resource point. -1 if no res point found. +2D integer array with shape equal to the exclusions extent shape.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.supply_curve.tech_mapping.html b/_autosummary/reV.supply_curve.tech_mapping.html new file mode 100644 index 000000000..d21c67b12 --- /dev/null +++ b/_autosummary/reV.supply_curve.tech_mapping.html @@ -0,0 +1,643 @@ + + + + + + + reV.supply_curve.tech_mapping — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.supply_curve.tech_mapping

+

reV tech mapping framework.

+

This module manages the exclusions-to-resource mapping. +The core of this module is a parallel cKDTree.

+

Created on Fri Jun 21 16:05:47 2019

+

@author: gbuster

+

Classes

+ + + + + + +

TechMapping(excl_fpath, res_fpath[, ...])

Framework to create map between tech layer (exclusions), res, and gen

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.ModuleName.html b/_autosummary/reV.utilities.ModuleName.html new file mode 100644 index 000000000..744e8216e --- /dev/null +++ b/_autosummary/reV.utilities.ModuleName.html @@ -0,0 +1,703 @@ + + + + + + + reV.utilities.ModuleName — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.ModuleName

+
+
+class ModuleName(value)[source]
+

Bases: str, Enum

+

A collection of the module names available in reV.

+

Each module name should match the name of the click command +that will be used to invoke its respective cli. As of 3/1/2022, +this means that all commands are lowercase with underscores +replaced by dashes.

+
+

Reference

+

See this line in the click source code to get the most up-to-date +click name conversions: https://tinyurl.com/4rehbsvf

+

Methods

+ + + + + + +

all_names()

All module names.

+

Attributes

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

BESPOKE

COLLECT

ECON

GENERATION

HYBRIDS

MULTI_YEAR

NRWAL

QA_QC

REP_PROFILES

SUPPLY_CURVE

SUPPLY_CURVE_AGGREGATION

+
+
+classmethod all_names()[source]
+

All module names.

+
+
Returns:
+

set – The set of all module name strings.

+
+
+
+ +
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.cli_functions.format_analysis_years.html b/_autosummary/reV.utilities.cli_functions.format_analysis_years.html new file mode 100644 index 000000000..642ae1611 --- /dev/null +++ b/_autosummary/reV.utilities.cli_functions.format_analysis_years.html @@ -0,0 +1,649 @@ + + + + + + + reV.utilities.cli_functions.format_analysis_years — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.cli_functions.format_analysis_years

+
+
+format_analysis_years(analysis_years=None)[source]
+

Format user’s analysis_years input

+
+
Parameters:
+

analysis_years (int | str | list, optional) – Years to run reV analysis on. Can be an integer or string, or a +list of integers or strings (or None). This input will get +converted to a list of values automatically. If None, a +ConfigWarning will be thrown. By default, None.

+
+
Returns:
+

list – List of analysis years. This list will never be empty, but it +can contain None as the only value.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.cli_functions.html b/_autosummary/reV.utilities.cli_functions.html new file mode 100644 index 000000000..47e17ce4a --- /dev/null +++ b/_autosummary/reV.utilities.cli_functions.html @@ -0,0 +1,645 @@ + + + + + + + reV.utilities.cli_functions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.cli_functions

+

General CLI utility functions.

+

Functions

+ + + + + + + + + + + + +

format_analysis_years([analysis_years])

Format user's analysis_years input

init_cli_logging(name, log_directory, verbose)

Initialize CLI logger

parse_from_pipeline(config, out_dir, ...)

Parse the out file from target modules and set as the values for key.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.cli_functions.init_cli_logging.html b/_autosummary/reV.utilities.cli_functions.init_cli_logging.html new file mode 100644 index 000000000..8d073148e --- /dev/null +++ b/_autosummary/reV.utilities.cli_functions.init_cli_logging.html @@ -0,0 +1,646 @@ + + + + + + + reV.utilities.cli_functions.init_cli_logging — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.cli_functions.init_cli_logging

+
+
+init_cli_logging(name, log_directory, verbose)[source]
+

Initialize CLI logger

+
+
Parameters:
+
    +
  • name (str) – The name to use for the log file written to disk.

  • +
  • log_directory (str) – Path to log file output directory.

  • +
  • verbose (bool) – Option to make logger verbose (DEBUG).

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.cli_functions.parse_from_pipeline.html b/_autosummary/reV.utilities.cli_functions.parse_from_pipeline.html new file mode 100644 index 000000000..bd86a0695 --- /dev/null +++ b/_autosummary/reV.utilities.cli_functions.parse_from_pipeline.html @@ -0,0 +1,661 @@ + + + + + + + reV.utilities.cli_functions.parse_from_pipeline — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.cli_functions.parse_from_pipeline

+
+
+parse_from_pipeline(config, out_dir, config_key, target_modules)[source]
+

Parse the out file from target modules and set as the values for key.

+

This function only updates the config_key input if it is set to +"PIPELINE".

+
+
Parameters:
+
    +
  • config (dict) – Configuration dictionary. The config_key will be updated in +this dictionary if it is set to "PIPELINE".

  • +
  • out_dir (str) – Path to pipeline project directory where config and status files +are located. The status file is expected to be in this +directory.

  • +
  • config_key (str) – Key in config files to replace with "out_file" value(s) from +previous pipeline step.

  • +
  • target_modules (list of str | list of ModuleName) – List of (previous) target modules to parse for the +config_key.

  • +
+
+
Returns:
+

dict – Input config dictionary with updated config_key input.

+
+
Raises:
+

PipelineError – If "out_file" not found in previous target module status + files.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.curtailment.curtail.html b/_autosummary/reV.utilities.curtailment.curtail.html new file mode 100644 index 000000000..88e21d349 --- /dev/null +++ b/_autosummary/reV.utilities.curtailment.curtail.html @@ -0,0 +1,653 @@ + + + + + + + reV.utilities.curtailment.curtail — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.curtailment.curtail

+
+
+curtail(resource, curtailment, random_seed=0)[source]
+

Curtail the SAM wind resource object based on project points.

+
+
Parameters:
+
    +
  • resource (rex.sam_resource.SAMResource) – SAM resource object for WIND resource.

  • +
  • curtailment (reV.config.curtailment.Curtailment) – Curtailment config object.

  • +
  • random_seed (int | NoneType) – Number to seed the numpy random number generator. Used to generate +reproducable psuedo-random results if the probability of curtailment +is not set to 1. Numpy random will be seeded with the system time if +this is None.

  • +
+
+
Returns:
+

resource (reV.handlers.sam_resource.SAMResource) – Same as the input argument but with the wind speed dataset set to zero +where curtailment is in effect.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.curtailment.html b/_autosummary/reV.utilities.curtailment.html new file mode 100644 index 000000000..04a45e49d --- /dev/null +++ b/_autosummary/reV.utilities.curtailment.html @@ -0,0 +1,641 @@ + + + + + + + reV.utilities.curtailment — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.curtailment

+

Curtailment utility methods.

+

Created on Fri Mar 1 13:47:30 2019

+

@author: gbuster

+

Functions

+ + + + + + +

curtail(resource, curtailment[, random_seed])

Curtail the SAM wind resource object based on project points.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.CollectionRuntimeError.html b/_autosummary/reV.utilities.exceptions.CollectionRuntimeError.html new file mode 100644 index 000000000..d84b343f3 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.CollectionRuntimeError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.CollectionRuntimeError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.CollectionRuntimeError

+
+
+exception CollectionRuntimeError[source]
+

RuntimeError for collection handler.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.CollectionValueError.html b/_autosummary/reV.utilities.exceptions.CollectionValueError.html new file mode 100644 index 000000000..a21a08999 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.CollectionValueError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.CollectionValueError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.CollectionValueError

+
+
+exception CollectionValueError[source]
+

ValueError for collection handler.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.CollectionWarning.html b/_autosummary/reV.utilities.exceptions.CollectionWarning.html new file mode 100644 index 000000000..88776de93 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.CollectionWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.CollectionWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.CollectionWarning

+
+
+exception CollectionWarning[source]
+

Warning during .h5 collection

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.ConfigError.html b/_autosummary/reV.utilities.exceptions.ConfigError.html new file mode 100644 index 000000000..ab7e2b542 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.ConfigError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.ConfigError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.ConfigError

+
+
+exception ConfigError[source]
+

Error for bad configuration inputs

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.ConfigWarning.html b/_autosummary/reV.utilities.exceptions.ConfigWarning.html new file mode 100644 index 000000000..83dc451a3 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.ConfigWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.ConfigWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.ConfigWarning

+
+
+exception ConfigWarning[source]
+

Warning for unclear or default configuration inputs

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.DataShapeError.html b/_autosummary/reV.utilities.exceptions.DataShapeError.html new file mode 100644 index 000000000..8daf9f817 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.DataShapeError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.DataShapeError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.DataShapeError

+
+
+exception DataShapeError[source]
+

Error with mismatched data shapes.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.EmptySupplyCurvePointError.html b/_autosummary/reV.utilities.exceptions.EmptySupplyCurvePointError.html new file mode 100644 index 000000000..6844ce14e --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.EmptySupplyCurvePointError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.EmptySupplyCurvePointError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.EmptySupplyCurvePointError

+
+
+exception EmptySupplyCurvePointError[source]
+

Execution error for SAM simulations

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.ExclusionLayerError.html b/_autosummary/reV.utilities.exceptions.ExclusionLayerError.html new file mode 100644 index 000000000..93e1fbdc3 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.ExclusionLayerError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.ExclusionLayerError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.ExclusionLayerError

+
+
+exception ExclusionLayerError[source]
+

Error with bad exclusion data

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.ExecutionError.html b/_autosummary/reV.utilities.exceptions.ExecutionError.html new file mode 100644 index 000000000..095df3014 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.ExecutionError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.ExecutionError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.ExecutionError

+
+
+exception ExecutionError[source]
+

Error for execution failure

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.ExtrapolationWarning.html b/_autosummary/reV.utilities.exceptions.ExtrapolationWarning.html new file mode 100644 index 000000000..501a9ff46 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.ExtrapolationWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.ExtrapolationWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.ExtrapolationWarning

+
+
+exception ExtrapolationWarning[source]
+

Warning for when value will be extrapolated

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.FileInputError.html b/_autosummary/reV.utilities.exceptions.FileInputError.html new file mode 100644 index 000000000..677afec47 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.FileInputError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.FileInputError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.FileInputError

+
+
+exception FileInputError[source]
+

Error during input file checks.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.FileInputWarning.html b/_autosummary/reV.utilities.exceptions.FileInputWarning.html new file mode 100644 index 000000000..57793baca --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.FileInputWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.FileInputWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.FileInputWarning

+
+
+exception FileInputWarning[source]
+

Warning during input file checks.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.HandlerKeyError.html b/_autosummary/reV.utilities.exceptions.HandlerKeyError.html new file mode 100644 index 000000000..b061a593c --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.HandlerKeyError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.HandlerKeyError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.HandlerKeyError

+
+
+exception HandlerKeyError[source]
+

KeyError for Handlers

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.HandlerRuntimeError.html b/_autosummary/reV.utilities.exceptions.HandlerRuntimeError.html new file mode 100644 index 000000000..55caa0b1f --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.HandlerRuntimeError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.HandlerRuntimeError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.HandlerRuntimeError

+
+
+exception HandlerRuntimeError[source]
+

RuntimeError for Handlers

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.HandlerValueError.html b/_autosummary/reV.utilities.exceptions.HandlerValueError.html new file mode 100644 index 000000000..162f60bb4 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.HandlerValueError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.HandlerValueError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.HandlerValueError

+
+
+exception HandlerValueError[source]
+

ValueError for Handlers

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.HandlerWarning.html b/_autosummary/reV.utilities.exceptions.HandlerWarning.html new file mode 100644 index 000000000..03d59826f --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.HandlerWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.HandlerWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.HandlerWarning

+
+
+exception HandlerWarning[source]
+

Warning during .h5 handling

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.InputError.html b/_autosummary/reV.utilities.exceptions.InputError.html new file mode 100644 index 000000000..d89719d6b --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.InputError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.InputError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.InputError

+
+
+exception InputError[source]
+

Error during input checks.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.InputWarning.html b/_autosummary/reV.utilities.exceptions.InputWarning.html new file mode 100644 index 000000000..5f9a95d46 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.InputWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.InputWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.InputWarning

+
+
+exception InputWarning[source]
+

Warning for unclear or default configuration inputs

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.JSONError.html b/_autosummary/reV.utilities.exceptions.JSONError.html new file mode 100644 index 000000000..ffe069ae4 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.JSONError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.JSONError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.JSONError

+
+
+exception JSONError[source]
+

Error reading json file.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.MultiFileExclusionError.html b/_autosummary/reV.utilities.exceptions.MultiFileExclusionError.html new file mode 100644 index 000000000..ff7b3039e --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.MultiFileExclusionError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.MultiFileExclusionError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.MultiFileExclusionError

+
+
+exception MultiFileExclusionError[source]
+

Error for bad multi file exclusion inputs.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.NearestNeighborError.html b/_autosummary/reV.utilities.exceptions.NearestNeighborError.html new file mode 100644 index 000000000..7904ae803 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.NearestNeighborError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.NearestNeighborError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.NearestNeighborError

+
+
+exception NearestNeighborError[source]
+

Execution error for bad nearest neighbor mapping results.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.OffshoreWindInputError.html b/_autosummary/reV.utilities.exceptions.OffshoreWindInputError.html new file mode 100644 index 000000000..3eb75e369 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.OffshoreWindInputError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.OffshoreWindInputError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.OffshoreWindInputError

+
+
+exception OffshoreWindInputError[source]
+

Error for bad offshore wind inputs

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.OffshoreWindInputWarning.html b/_autosummary/reV.utilities.exceptions.OffshoreWindInputWarning.html new file mode 100644 index 000000000..51cba7a29 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.OffshoreWindInputWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.OffshoreWindInputWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.OffshoreWindInputWarning

+
+
+exception OffshoreWindInputWarning[source]
+

Warning for potentially dangerous offshore wind inputs

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.OutputWarning.html b/_autosummary/reV.utilities.exceptions.OutputWarning.html new file mode 100644 index 000000000..0caad5f58 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.OutputWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.OutputWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.OutputWarning

+
+
+exception OutputWarning[source]
+

Warning for suspect output files or data

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.ParallelExecutionWarning.html b/_autosummary/reV.utilities.exceptions.ParallelExecutionWarning.html new file mode 100644 index 000000000..d26f45af4 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.ParallelExecutionWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.ParallelExecutionWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.ParallelExecutionWarning

+
+
+exception ParallelExecutionWarning[source]
+

Warning for parallel job execution.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.PipelineError.html b/_autosummary/reV.utilities.exceptions.PipelineError.html new file mode 100644 index 000000000..667b2217d --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.PipelineError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.PipelineError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.PipelineError

+
+
+exception PipelineError[source]
+

Error for pipeline execution failure

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.ProjectPointsValueError.html b/_autosummary/reV.utilities.exceptions.ProjectPointsValueError.html new file mode 100644 index 000000000..46682dbd9 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.ProjectPointsValueError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.ProjectPointsValueError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.ProjectPointsValueError

+
+
+exception ProjectPointsValueError[source]
+

Error for bad ProjectPoints CLI values

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.PySAMVersionError.html b/_autosummary/reV.utilities.exceptions.PySAMVersionError.html new file mode 100644 index 000000000..99d31189d --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.PySAMVersionError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.PySAMVersionError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.PySAMVersionError

+
+
+exception PySAMVersionError[source]
+

Version error for SAM installation

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.PySAMVersionWarning.html b/_autosummary/reV.utilities.exceptions.PySAMVersionWarning.html new file mode 100644 index 000000000..829a8d2eb --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.PySAMVersionWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.PySAMVersionWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.PySAMVersionWarning

+
+
+exception PySAMVersionWarning[source]
+

Version warning for SAM installation

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.ResourceError.html b/_autosummary/reV.utilities.exceptions.ResourceError.html new file mode 100644 index 000000000..f9c1a9549 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.ResourceError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.ResourceError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.ResourceError

+
+
+exception ResourceError[source]
+

Error for poorly formatted resource.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.SAMExecutionError.html b/_autosummary/reV.utilities.exceptions.SAMExecutionError.html new file mode 100644 index 000000000..06e10a75a --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.SAMExecutionError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.SAMExecutionError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.SAMExecutionError

+
+
+exception SAMExecutionError[source]
+

Execution error for SAM simulations

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.SAMExecutionWarning.html b/_autosummary/reV.utilities.exceptions.SAMExecutionWarning.html new file mode 100644 index 000000000..02359ad10 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.SAMExecutionWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.SAMExecutionWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.SAMExecutionWarning

+
+
+exception SAMExecutionWarning[source]
+

Warning for problematic SAM execution

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.SAMInputError.html b/_autosummary/reV.utilities.exceptions.SAMInputError.html new file mode 100644 index 000000000..97937e4fb --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.SAMInputError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.SAMInputError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.SAMInputError

+
+
+exception SAMInputError[source]
+

Input error for SAM simulations

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.SAMInputWarning.html b/_autosummary/reV.utilities.exceptions.SAMInputWarning.html new file mode 100644 index 000000000..a10c70ac9 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.SAMInputWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.SAMInputWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.SAMInputWarning

+
+
+exception SAMInputWarning[source]
+

Warning for bad SAM inputs

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.SlurmWarning.html b/_autosummary/reV.utilities.exceptions.SlurmWarning.html new file mode 100644 index 000000000..5926e0981 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.SlurmWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.SlurmWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.SlurmWarning

+
+
+exception SlurmWarning[source]
+

Warning for SLURM errors/warnings

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.SupplyCurveError.html b/_autosummary/reV.utilities.exceptions.SupplyCurveError.html new file mode 100644 index 000000000..54b98b4e7 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.SupplyCurveError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.SupplyCurveError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.SupplyCurveError

+
+
+exception SupplyCurveError[source]
+

Execution error for SAM simulations

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.SupplyCurveInputError.html b/_autosummary/reV.utilities.exceptions.SupplyCurveInputError.html new file mode 100644 index 000000000..657119ebf --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.SupplyCurveInputError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.SupplyCurveInputError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.SupplyCurveInputError

+
+
+exception SupplyCurveInputError[source]
+

Execution error for SAM simulations

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.WhileLoopPackingError.html b/_autosummary/reV.utilities.exceptions.WhileLoopPackingError.html new file mode 100644 index 000000000..8d8ddac9b --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.WhileLoopPackingError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.WhileLoopPackingError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.WhileLoopPackingError

+
+
+exception WhileLoopPackingError[source]
+

Error for stuck in while loop while packing

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.html b/_autosummary/reV.utilities.exceptions.html new file mode 100644 index 000000000..a5110ff39 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.html @@ -0,0 +1,762 @@ + + + + + + + reV.utilities.exceptions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions

+

Custom Exceptions and Errors for reV

+

Exceptions

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +

CollectionRuntimeError

RuntimeError for collection handler.

CollectionValueError

ValueError for collection handler.

CollectionWarning

Warning during .h5 collection

ConfigError

Error for bad configuration inputs

ConfigWarning

Warning for unclear or default configuration inputs

DataShapeError

Error with mismatched data shapes.

EmptySupplyCurvePointError

Execution error for SAM simulations

ExclusionLayerError

Error with bad exclusion data

ExecutionError

Error for execution failure

ExtrapolationWarning

Warning for when value will be extrapolated

FileInputError

Error during input file checks.

FileInputWarning

Warning during input file checks.

HandlerKeyError

KeyError for Handlers

HandlerRuntimeError

RuntimeError for Handlers

HandlerValueError

ValueError for Handlers

HandlerWarning

Warning during .h5 handling

InputError

Error during input checks.

InputWarning

Warning for unclear or default configuration inputs

JSONError

Error reading json file.

MultiFileExclusionError

Error for bad multi file exclusion inputs.

NearestNeighborError

Execution error for bad nearest neighbor mapping results.

OffshoreWindInputError

Error for bad offshore wind inputs

OffshoreWindInputWarning

Warning for potentially dangerous offshore wind inputs

OutputWarning

Warning for suspect output files or data

ParallelExecutionWarning

Warning for parallel job execution.

PipelineError

Error for pipeline execution failure

ProjectPointsValueError

Error for bad ProjectPoints CLI values

PySAMVersionError

Version error for SAM installation

PySAMVersionWarning

Version warning for SAM installation

ResourceError

Error for poorly formatted resource.

SAMExecutionError

Execution error for SAM simulations

SAMExecutionWarning

Warning for problematic SAM execution

SAMInputError

Input error for SAM simulations

SAMInputWarning

Warning for bad SAM inputs

SlurmWarning

Warning for SLURM errors/warnings

SupplyCurveError

Execution error for SAM simulations

SupplyCurveInputError

Execution error for SAM simulations

WhileLoopPackingError

Error for stuck in while loop while packing

reVDeprecationWarning

Warning of deprecated feature.

reVError

Generic Error for reV

reVLossesValueError

Value Error for reV losses module.

reVLossesWarning

Warning for reV losses module.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.reVDeprecationWarning.html b/_autosummary/reV.utilities.exceptions.reVDeprecationWarning.html new file mode 100644 index 000000000..cca0c3ade --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.reVDeprecationWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.reVDeprecationWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.reVDeprecationWarning

+
+
+exception reVDeprecationWarning[source]
+

Warning of deprecated feature.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.reVError.html b/_autosummary/reV.utilities.exceptions.reVError.html new file mode 100644 index 000000000..d7ec3f8ab --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.reVError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.reVError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.reVError

+
+
+exception reVError[source]
+

Generic Error for reV

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.reVLossesValueError.html b/_autosummary/reV.utilities.exceptions.reVLossesValueError.html new file mode 100644 index 000000000..d2dfdcf64 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.reVLossesValueError.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.reVLossesValueError — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.reVLossesValueError

+
+
+exception reVLossesValueError[source]
+

Value Error for reV losses module.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.exceptions.reVLossesWarning.html b/_autosummary/reV.utilities.exceptions.reVLossesWarning.html new file mode 100644 index 000000000..2e76cbc42 --- /dev/null +++ b/_autosummary/reV.utilities.exceptions.reVLossesWarning.html @@ -0,0 +1,637 @@ + + + + + + + reV.utilities.exceptions.reVLossesWarning — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.exceptions.reVLossesWarning

+
+
+exception reVLossesWarning[source]
+

Warning for reV losses module.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.html b/_autosummary/reV.utilities.html new file mode 100644 index 000000000..9721d7bfe --- /dev/null +++ b/_autosummary/reV.utilities.html @@ -0,0 +1,665 @@ + + + + + + + reV.utilities — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities

+

reV utilities.

+

Functions

+ + + + + + +

log_versions(logger)

Log package versions: - rex and reV to info - h5py, numpy, pandas, scipy, and PySAM to debug

+

Classes

+ + + + + + +

ModuleName(value)

A collection of the module names available in reV.

+ + + + + + + + + + + + + + + + + + +

reV.utilities.cli_functions

General CLI utility functions.

reV.utilities.curtailment

Curtailment utility methods.

reV.utilities.exceptions

Custom Exceptions and Errors for reV

reV.utilities.pytest_utils

Functions used for pytests

reV.utilities.slots

Slotted memory framework classes.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.log_versions.html b/_autosummary/reV.utilities.log_versions.html new file mode 100644 index 000000000..073c2f4cd --- /dev/null +++ b/_autosummary/reV.utilities.log_versions.html @@ -0,0 +1,643 @@ + + + + + + + reV.utilities.log_versions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.log_versions

+
+
+log_versions(logger)[source]
+

Log package versions: +- rex and reV to info +- h5py, numpy, pandas, scipy, and PySAM to debug

+
+
Parameters:
+

logger (logging.Logger) – Logger object to log memory message to.

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.pytest_utils.html b/_autosummary/reV.utilities.pytest_utils.html new file mode 100644 index 000000000..be0ac0f03 --- /dev/null +++ b/_autosummary/reV.utilities.pytest_utils.html @@ -0,0 +1,645 @@ + + + + + + + reV.utilities.pytest_utils — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.pytest_utils

+

Functions used for pytests

+

Functions

+ + + + + + + + + + + + +

make_fake_h5_chunks(td, features[, shuffle])

Make fake h5 chunks to test collection

pd_date_range(*args, **kwargs)

A simple wrapper on the pd.date_range() method that handles the closed vs.

write_chunk(meta, times, data, features, ...)

Write data chunk to an h5 file

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.pytest_utils.make_fake_h5_chunks.html b/_autosummary/reV.utilities.pytest_utils.make_fake_h5_chunks.html new file mode 100644 index 000000000..186ca3acf --- /dev/null +++ b/_autosummary/reV.utilities.pytest_utils.make_fake_h5_chunks.html @@ -0,0 +1,656 @@ + + + + + + + reV.utilities.pytest_utils.make_fake_h5_chunks — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.pytest_utils.make_fake_h5_chunks

+
+
+make_fake_h5_chunks(td, features, shuffle=False)[source]
+

Make fake h5 chunks to test collection

+
+
Parameters:
+
    +
  • td (tempfile.TemporaryDirectory) – Test TemporaryDirectory

  • +
  • features (list) – List of dsets to write to chunks

  • +
  • shuffle (bool) – Whether to shuffle gids

  • +
+
+
Returns:
+

    +
  • out_pattern (str) – Pattern for output file names

  • +
  • data (ndarray) – Full non-chunked data array

  • +
  • features (list) – List of feature names in output

  • +
  • s_slices (list) – List of spatial slices used to chunk full data array

  • +
  • times (pd.DatetimeIndex) – Times in output

  • +
+

+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.pytest_utils.pd_date_range.html b/_autosummary/reV.utilities.pytest_utils.pd_date_range.html new file mode 100644 index 000000000..ca4ba2200 --- /dev/null +++ b/_autosummary/reV.utilities.pytest_utils.pd_date_range.html @@ -0,0 +1,638 @@ + + + + + + + reV.utilities.pytest_utils.pd_date_range — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.pytest_utils.pd_date_range

+
+
+pd_date_range(*args, **kwargs)[source]
+

A simple wrapper on the pd.date_range() method that handles the closed +vs. inclusive kwarg change in pd 1.4.0

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.pytest_utils.write_chunk.html b/_autosummary/reV.utilities.pytest_utils.write_chunk.html new file mode 100644 index 000000000..6515d4d8a --- /dev/null +++ b/_autosummary/reV.utilities.pytest_utils.write_chunk.html @@ -0,0 +1,648 @@ + + + + + + + reV.utilities.pytest_utils.write_chunk — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.pytest_utils.write_chunk

+
+
+write_chunk(meta, times, data, features, out_file)[source]
+

Write data chunk to an h5 file

+
+
Parameters:
+
    +
  • meta (dict) – Dictionary of meta data for this chunk. Includes flattened lat and lon +arrays

  • +
  • times (pd.DatetimeIndex) – times in this chunk

  • +
  • features (list) – List of feature names in this chunk

  • +
  • out_file (str) – Name of output file

  • +
+
+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.slots.SlottedDict.html b/_autosummary/reV.utilities.slots.SlottedDict.html new file mode 100644 index 000000000..e5e89e409 --- /dev/null +++ b/_autosummary/reV.utilities.slots.SlottedDict.html @@ -0,0 +1,709 @@ + + + + + + + reV.utilities.slots.SlottedDict — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.slots.SlottedDict

+
+
+class SlottedDict[source]
+

Bases: object

+

Slotted memory dictionary emulator.

+

Methods

+ + + + + + + + + + + + + + + +

items()

Get an items iterator similar to a dictionary.

keys()

Get a keys list similar to a dictionary.

update(slotted_dict)

Add output variables from another instance into this instance.

values()

Get a values list similar to a dictionary.

+

Attributes

+ + + + + + +

var_list

+
+
+update(slotted_dict)[source]
+

Add output variables from another instance into this instance.

+
+
Parameters:
+

slotted_dict (SlottedDict) – An different instance of this class (slotted dictionary class) to +merge into this instance. Variable data in this instance could be +overwritten by the new data.

+
+
+
+ +
+
+items()[source]
+

Get an items iterator similar to a dictionary.

+
+
Parameters:
+

items (iterator) – [key, value] iterator similar to the output of dict.items()

+
+
+
+ +
+
+keys()[source]
+

Get a keys list similar to a dictionary.

+
+
Parameters:
+

key (list) – List of slotted variable names that have been set.

+
+
+
+ +
+
+values()[source]
+

Get a values list similar to a dictionary.

+
+
Parameters:
+

values (list) – List of slotted variable values that have been set.

+
+
+
+ +
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.utilities.slots.html b/_autosummary/reV.utilities.slots.html new file mode 100644 index 000000000..4fff1c174 --- /dev/null +++ b/_autosummary/reV.utilities.slots.html @@ -0,0 +1,639 @@ + + + + + + + reV.utilities.slots — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.utilities.slots

+

Slotted memory framework classes.

+

Classes

+ + + + + + +

SlottedDict()

Slotted memory dictionary emulator.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_autosummary/reV.version.html b/_autosummary/reV.version.html new file mode 100644 index 000000000..614e82c64 --- /dev/null +++ b/_autosummary/reV.version.html @@ -0,0 +1,630 @@ + + + + + + + reV.version — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV.version

+

reV Version number

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/cli.html b/_cli/cli.html new file mode 100644 index 000000000..72c4d5182 --- /dev/null +++ b/_cli/cli.html @@ -0,0 +1,655 @@ + + + + + + + Command Line Interfaces (CLIs) — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV batch.html b/_cli/reV batch.html new file mode 100644 index 000000000..c874a0e58 --- /dev/null +++ b/_cli/reV batch.html @@ -0,0 +1,819 @@ + + + + + + + reV batch — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV batch

+

Execute an analysis pipeline over a parametric set of inputs.

+

The general structure for calling this CLI command is given below (add --help to print help info to the terminal).

+
reV batch [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the batch configuration file. Below is a sample template config

+
+
{
+    "logging": {
+        "log_file": null,
+        "log_level": "INFO"
+    },
+    "pipeline_config": "[REQUIRED]",
+    "sets": [
+        {
+            "args": "[REQUIRED]",
+            "files": "[REQUIRED]",
+            "set_tag": "set1"
+        },
+        {
+            "args": "[REQUIRED]",
+            "files": "[REQUIRED]",
+            "set_tag": "set2"
+        }
+    ]
+}
+
+
+
+
+

Parameters

+
+
loggingdict, optional

Dictionary containing keyword-argument pairs to pass to +init_logger. This +initializes logging for the batch command. Note that +each pipeline job submitted via batch has it’s own +logging key that will initialize pipeline step +logging. Therefore, it’s only ever necessary to use +this input if you want logging information about the +batching portion of the execution.

+
+
pipeline_configstr

Path to the pipeline configuration defining the commands to +run for every parametric set.

+
+
setslist of dicts

A list of dictionaries, where each dictionary defines a +“set” of parametric runs. Each dictionary should have +the following keys:

+
+
+
argsdict

A dictionary defining the arguments across all input +configuration files to parameterize. Each argument +to be parametrized should be a key in this +dictionary, and the value should be a list of the +parameter values to run for this argument (single-item lists +are allowed and can be used to vary a parameter value across +sets).

+
+
"args": {
+    "input_constant_1": [
+        18.02,
+        19.04
+    ],
+    "path_to_a_file": [
+        "/first/path.h5",
+        "/second/path.h5",
+        "/third/path.h5"
+    ]
+}
+
+
+
+

This example would run a total of six pipelines, one +with each of the following arg combinations:

+
input_constant_1=18.20, path_to_a_file="/first/path.h5"
+input_constant_1=18.20, path_to_a_file="/second/path.h5"
+input_constant_1=18.20, path_to_a_file="/third/path.h5"
+input_constant_1=19.04, path_to_a_file="/first/path.h5"
+input_constant_1=19.04, path_to_a_file="/second/path.h5"
+input_constant_1=19.04, path_to_a_file="/third/path.h5"
+
+
+

Remember that the keys in the args dictionary +should be part of (at least) one of your other +configuration files.

+
+
fileslist

A list of paths to the configuration files that +contain the arguments to be updated for every +parametric run. Arguments can be spread out over +multiple files. For example:

+
+
"files": [
+    "./config_run.yaml",
+    "./config_analyze.json"
+]
+
+
+
+
+
set_tagstr, optional

Optional string defining a set tag that will prefix +each job tag for this set. This tag does not need to +include an underscore, as that is provided during +concatenation.

+
+
+
+
+
+
+
+ +
+
+--dry
+

Flag to do a dry run (make batch dirs and update files without running the pipeline).

+
+ +
+
+--cancel
+

Flag to cancel all jobs associated associated with the batch_jobs.csv file in the current batch config directory.

+
+ +
+
+--delete
+

Flag to delete all batch job sub directories associated with the batch_jobs.csv file in the current batch config directory.

+
+ +
+
+--monitor-background
+

Flag to monitor all batch pipelines continuously in the background. Note that the stdout/stderr will not be captured, but you can set a pipeline "log_file" to capture logs.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV bespoke.html b/_cli/reV bespoke.html new file mode 100644 index 000000000..a73879350 --- /dev/null +++ b/_cli/reV bespoke.html @@ -0,0 +1,1096 @@ + + + + + + + reV bespoke — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV bespoke

+

Execute the bespoke step from a config file.

+

Much like generation, reV bespoke analysis runs SAM +simulations by piping in renewable energy resource data (usually +from the WTK), loading the SAM config, and then executing the +PySAM.Windpower.Windpower compute module. +However, unlike reV generation, bespoke analysis is +performed on the supply-curve grid resolution, and the plant +layout is optimized for every supply-curve point based on an +optimization objective specified by the user. See the NREL +publication on the bespoke methodology for more information.

+

See the documentation for the reV SAM class (e.g. +reV.SAM.generation.WindPower, +reV.SAM.generation.PvWattsv8, +reV.SAM.generation.Geothermal, etc.) for info on the +allowed and/or required SAM config file inputs.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV bespoke [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the bespoke configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "nodes": 1,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null,
+        "max_workers": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "excl_fpath": "[REQUIRED]",
+    "res_fpath": "[REQUIRED]",
+    "tm_dset": "[REQUIRED]",
+    "objective_function": "[REQUIRED]",
+    "capital_cost_function": "[REQUIRED]",
+    "fixed_operating_cost_function": "[REQUIRED]",
+    "variable_operating_cost_function": "[REQUIRED]",
+    "project_points": "[REQUIRED]",
+    "sam_files": "[REQUIRED]",
+    "min_spacing": "5x",
+    "wake_loss_multiplier": 1,
+    "ga_kwargs": null,
+    "output_request": [
+        "system_capacity",
+        "cf_mean"
+    ],
+    "ws_bins": [
+        0.0,
+        20.0,
+        5.0
+    ],
+    "wd_bins": [
+        0.0,
+        360.0,
+        45.0
+    ],
+    "excl_dict": null,
+    "area_filter_kernel": "queen",
+    "min_area": null,
+    "resolution": 64,
+    "excl_area": null,
+    "data_layers": null,
+    "pre_extract_inclusions": false,
+    "prior_run": null,
+    "gid_map": null,
+    "bias_correct": null,
+    "pre_load_data": false
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
nodes:
+

(int, optional) +Number of nodes to split the project points across. +Note that the total number of requested nodes for +a job may be larger than this value if the command +splits across other inputs. Default is 1.

+
+
max_workers:
+

(int, optional) +Number of local workers to run on. If None, uses all available cores (typically 36). By default, None.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
excl_fpathstr | list | tuple

Filepath to exclusions data HDF5 file. The exclusions HDF5 file should contain the layers specified in excl_dict and data_layers. These layers may also be spread out across multiple HDF5 files, in which case this input should be a list or tuple of filepaths pointing to the files containing the layers. Note that each data layer must be uniquely defined (i.e.only appear once and in a single input file).

+
+
res_fpathstr

Filepath to wind resource data in NREL WTK format. This input can be path to a single resource HDF5 file or a path including a wildcard input like /h5_dir/prefix*suffix to run bespoke on multiple years of resource data. The former must be readable by rex.renewable_resource.WindResource while the latter must be readable by or rex.multi_year_resource.MultiYearWindResource (i.e. the resource data conform to the rex data format). This means the data file(s) must contain a 1D time_index dataset indicating the UTC time of observation, a 1D meta dataset represented by a DataFrame with site-specific columns, and 2D resource datasets that match the dimensions of (time_index, meta). The time index must start at 00:00 of January 1st of the year under consideration, and its shape must be a multiple of 8760.

+
+
tm_dsetstr

Dataset name in the excl_fpath file containing the techmap (exclusions-to-resource mapping data). This data layer links the supply curve GID’s to the generation GID’s that are used to evaluate the performance metrics of each wind plant. By default, the generation GID’s are assumed to match the resource GID’s, but this mapping can be customized via the gid_map input (see the documentation for gid_map for more details).

+
+

Important

+

This dataset uniquely couples the (typically +high-resolution) exclusion layers to the (typically +lower-resolution) resource data. Therefore, a separate +techmap must be used for every unique combination of +resource and exclusion coordinates.

+
+
+
objective_functionstr

The objective function of the optimization written out as a string. This expression should compute the objective to be minimized during layout optimization. Variables available for computation are:

+
+
    +
  • n_turbines: the number of turbines

  • +
  • system_capacity: wind plant capacity

  • +
  • aep: annual energy production

  • +
  • fixed_charge_rate: user input fixed_charge_rate if +included as part of the sam system config.

  • +
  • self.wind_plant: the SAM wind plant object, +through which all SAM variables can be accessed

  • +
  • capital_cost: plant capital cost as evaluated +by capital_cost_function

  • +
  • fixed_operating_cost: plant fixed annual operating +cost as evaluated by fixed_operating_cost_function

  • +
  • variable_operating_cost: plant variable annual +operating cost, as evaluated by +variable_operating_cost_function

  • +
+
+
+
capital_cost_functionstr

The plant capital cost function written out as a string. This expression must return the total plant capital cost in $. This expression has access to the same variables as the objective_function argument above.

+
+
fixed_operating_cost_functionstr

The plant annual fixed operating cost function written out as a string. This expression must return the fixed operating cost in $/year. This expression has access to the same variables as the objective_function argument above.

+
+
variable_operating_cost_functionstr

The plant annual variable operating cost function written out as a string. This expression must return the variable operating cost in $/kWh. This expression has access to the same variables as the objective_function argument above.

+
+
project_pointsint | list | tuple | str | dict | pd.DataFrame | slice

Input specifying which sites to process. A single integer representing the supply curve GID of a site may be specified to evaluate reV at a supply curve point. A list or tuple of integers (or slice) representing the supply curve GIDs of multiple sites can be specified to evaluate reV at multiple specific locations. A string pointing to a project points CSV file may also be specified. Typically, the CSV contains two columns:

+
+
    +
  • gid: Integer specifying the supply curve GID of +each site.

  • +
  • config: Key in the sam_files input dictionary +(see below) corresponding to the SAM configuration to +use for each particular site. This value can also be +None (or left out completely) if you specify only +a single SAM configuration file as the sam_files +input.

  • +
+
+

The CSV file may also contain site-specific inputs by including a column named after a config keyword (e.g. a column called capital_cost may be included to specify a site-specific capital cost value for each location). Columns that do not correspond to a config key may also be included, but they will be ignored. The CSV file input can also have these extra columns:

+
+
    +
  • capital_cost_multiplier

  • +
  • fixed_operating_cost_multiplier

  • +
  • variable_operating_cost_multiplier

  • +
+
+

These particular inputs are treated as multipliers to be applied to the respective cost curves (capital_cost_function, fixed_operating_cost_function, and variable_operating_cost_function) both during and after the optimization. A DataFrame following the same guidelines as the CSV input (or a dictionary that can be used to initialize such a DataFrame) may be used for this input as well. If you would like to obtain all available reV supply curve points to run, you can use the reV.supply_curve.extent.SupplyCurveExtent class like so:

+
import pandas as pd
+from reV.supply_curve.extent import SupplyCurveExtent
+
+excl_fpath = "..."
+resolution = ...
+with SupplyCurveExtent(excl_fpath, resolution) as sc:
+    points = sc.valid_sc_points(tm_dset).tolist()
+    points = pd.DataFrame({"gid": points})
+    points["config"] = "default"  # or a list of config choices
+
+# Use the points directly or save them to csv for CLI usage
+points.to_csv("project_points.csv", index=False)
+
+
+
+
sam_filesdict | str

A dictionary mapping SAM input configuration ID(s) to SAM configuration(s). Keys are the SAM config ID(s) which correspond to the config column in the project points CSV. Values for each key are either a path to a corresponding SAM config file or a full dictionary of SAM config inputs. For example:

+
sam_files = {
+    "default": "/path/to/default/sam.json",
+    "onshore": "/path/to/onshore/sam_config.yaml",
+    "offshore": {
+        "sam_key_1": "sam_value_1",
+        "sam_key_2": "sam_value_2",
+        ...
+    },
+    ...
+}
+
+
+

This input can also be a string pointing to a single SAM config file. In this case, the config column of the CSV points input should be set to None or left out completely. See the documentation for the reV SAM class (e.g. reV.SAM.generation.WindPower, reV.SAM.generation.PvWattsv8, reV.SAM.generation.Geothermal, etc.) for info on the allowed and/or required SAM config file inputs.

+
+
min_spacingfloat | int | str, optional

Minimum spacing between turbines (in meters). This input can also be a string like “5x”, which is interpreted as 5 times the turbine rotor diameter. By default, "5x".

+
+
wake_loss_multiplierfloat, optional

A multiplier used to scale the annual energy lost due to wake losses.

+
+

Warning

+

This multiplier will ONLY be applied during the +optimization process and will NOT come through in output +values such as the hourly profiles, aep, any of the cost +functions, or even the output objective.

+
+

By default, 1.

+
+
ga_kwargsdict, optional

Dictionary of keyword arguments to pass to GA initialization. If None, default initialization values are used. See GeneticAlgorithm for a description of the allowed keyword arguments. By default, None.

+
+
output_requestlist | tuple, optional

Outputs requested from the SAM windpower simulation after the bespoke plant layout optimization. Can be any of the parameters in the “Outputs” group of the PySAM module PySAM.Windpower.Windpower.Outputs, PySAM module. This list can also include a select number of SAM config/resource parameters to include in the output: any key in any of the output attribute JSON files may be requested. Time-series profiles requested via this input are output in UTC. This input can also be used to request resource means like "ws_mean", "windspeed_mean", "temperature_mean", and "pressure_mean". By default, ('system_capacity', 'cf_mean').

+
+
ws_binstuple, optional

A 3-entry tuple with (start, stop, step) for the windspeed binning of the wind joint probability distribution. The stop value is inclusive, so ws_bins=(0, 20, 5) would result in four bins with bin edges (0, 5, 10, 15, 20). By default, (0.0, 20.0, 5.0).

+
+
wd_binstuple, optional

A 3-entry tuple with (start, stop, step) for the wind direction binning of the wind joint probability distribution. The stop value is inclusive, so wd_bins=(0, 360, 90) would result in four bins with bin edges (0, 90, 180, 270, 360). By default, (0.0, 360.0, 45.0).

+
+
excl_dictdict, optional

Dictionary of exclusion keyword arguments of the format {layer_dset_name: {kwarg: value}}, where layer_dset_name is a dataset in the exclusion h5 file and the kwarg: value pair is a keyword argument to the reV.supply_curve.exclusions.LayerMask class. For example:

+
excl_dict = {
+    "typical_exclusion": {
+        "exclude_values": 255,
+    },
+    "another_exclusion": {
+        "exclude_values": [2, 3],
+        "weight": 0.5
+    },
+    "exclusion_with_nodata": {
+        "exclude_range": [10, 100],
+        "exclude_nodata": True,
+        "nodata_value": -1
+    },
+    "partial_setback": {
+        "use_as_weights": True
+    },
+    "height_limit": {
+        "exclude_range": [0, 200]
+    },
+    "slope": {
+        "include_range": [0, 20]
+    },
+    "developable_land": {
+        "force_include_values": 42
+    },
+    "more_developable_land": {
+        "force_include_range": [5, 10]
+    },
+    ...
+}
+
+
+

Note that all the keys given in this dictionary should be datasets of the excl_fpath file. If None or empty dictionary, no exclusions are applied. By default, None.

+
+
area_filter_kernel{“queen”, “rook”}, optional

Contiguous area filter method to use on final exclusions mask. The filters are defined as:

+
# Queen:     # Rook:
+[[1,1,1],    [[0,1,0],
+ [1,1,1],     [1,1,1],
+ [1,1,1]]     [0,1,0]]
+
+
+

These filters define how neighboring pixels are “connected”. Once pixels in the final exclusion layer are connected, the area of each resulting cluster is computed and compared against the min_area input. Any cluster with an area less than min_area is excluded from the final mask. This argument has no effect if min_area is None. By default, "queen".

+
+
min_areafloat, optional

Minimum area (in km2) required to keep an isolated cluster of (included) land within the resulting exclusions mask. Any clusters of land with areas less than this value will be marked as exclusions. See the documentation for area_filter_kernel for an explanation of how the area of each land cluster is computed. If None, no area filtering is performed. By default, None.

+
+
resolutionint, optional

Supply Curve resolution. This value defines how many pixels are in a single side of a supply curve cell. For example, a value of 64 would generate a supply curve where the side of each supply curve cell is 64x64 exclusion pixels. By default, 64.

+
+
excl_areafloat, optional

Area of a single exclusion mask pixel (in km2). If None, this value will be inferred from the profile transform attribute in excl_fpath. By default, None.

+
+
data_layersdict, optional

Dictionary of aggregation data layers of the format:

+
data_layers = {
+    "output_layer_name": {
+        "dset": "layer_name",
+        "method": "mean",
+        "fpath": "/path/to/data.h5"
+    },
+    "another_output_layer_name": {
+        "dset": "input_layer_name",
+        "method": "mode",
+        # optional "fpath" key omitted
+    },
+    ...
+}
+
+
+

The "output_layer_name" is the column name under which the aggregated data will appear in the meta DataFrame of the output file. The "output_layer_name" does not have to match the dset input value. The latter should match the layer name in the HDF5 from which the data to aggregate should be pulled. The method should be one of {"mode", "mean", "min", "max", "sum", "category"}, describing how the high-resolution data should be aggregated for each supply curve point. fpath is an optional key that can point to an HDF5 file containing the layer data. If left out, the data is assumed to exist in the file(s) specified by the excl_fpath input. If None, no data layer aggregation is performed. By default, None.

+
+
pre_extract_inclusionsbool, optional

Optional flag to pre-extract/compute the inclusion mask from the excl_dict input. It is typically faster to compute the inclusion mask on the fly with parallel workers. By default, False.

+
+
prior_runstr, optional

Optional filepath to a bespoke output HDF5 file belonging to a prior run. If specified, this module will only run the timeseries power generation step and assume that all of the wind plant layouts are fixed from the prior run. The meta data of this file must contain the following columns (automatically satisfied if the HDF5 file was generated by reV bespoke):

+
+
    +
  • capacity : Capacity of the plant, in MW.

  • +
  • turbine_x_coords: A string representation of a +python list containing the X coordinates (in m; origin +of cell at bottom left) of the turbines within the +plant (supply curve cell).

  • +
  • turbine_y_coords : A string representation of a +python list containing the Y coordinates (in m; origin +of cell at bottom left) of the turbines within the +plant (supply curve cell).

  • +
+
+

If None, no previous run data is considered. By default, None

+
+
gid_mapstr | dict, optional

Mapping of unique integer generation gids (keys) to single integer resource gids (values). This enables unique generation gids in the project points to map to non-unique resource gids, which can be useful when evaluating multiple resource datasets in reV (e.g., forecasted ECMWF resource data to complement historical WTK meteorology). This input can be a pre-extracted dictionary or a path to a JSON or CSV file. If this input points to a CSV file, the file must have the columns gid (which matches the project points) and gid_map (gids to extract from the resource input). If None, the GID values in the project points are assumed to match the resource GID values. By default, None.

+
+
bias_correctstr | pd.DataFrame, optional

Optional DataFrame or CSV filepath to a wind or solar resource bias correction table. This has columns:

+
+
    +
  • gid: GID of site (can be index name)

  • +
  • adder: Value to add to resource at each site

  • +
  • scalar: Value to scale resource at each site by

  • +
+
+

The gid field should match the true resource gid regardless of the optional gid_map input. If both adder and scalar are present, the wind or solar resource is corrected by \((res*scalar)+adder\). If either is missing, scalar defaults to 1 and adder to 0. Only windspeed or GHI + DNI are corrected, depending on the technology (wind for the former, solar for the latter). GHI and DNI are corrected with the same correction factors. If None, no corrections are applied. By default, None.

+
+
pre_load_databool, optional

Option to pre-load resource data. This step can be time-consuming up front, but it drastically reduces the number of parallel reads to the res_fpath HDF5 file(s), and can have a significant overall speedup on systems with slow parallel I/O capabilities. Pre-loaded data can use a significant amount of RAM, so be sure to split execution across many nodes (e.g. 100 nodes, 36 workers each for CONUS) or request large amounts of memory for a smaller number of nodes. By default, False.

+
+
log_directorystr

Path to log output directory.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV collect.html b/_cli/reV collect.html new file mode 100644 index 000000000..71f43aa86 --- /dev/null +++ b/_cli/reV collect.html @@ -0,0 +1,787 @@ + + + + + + + reV collect — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV collect

+

Execute the collect step from a config file.

+

Collect data generated across multiple nodes into a single HDF5 +file.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV collect [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the collect configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "project_points": null,
+    "datasets": null,
+    "purge_chunks": false,
+    "clobber": true,
+    "collect_pattern": "PIPELINE"
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
project_pointsstr | list, optional

This input should represent the project points that correspond to the full collection of points contained in the input HDF5 files to be collected. You may simply point to a ProjectPoints csv file that contains the GID’s that should be collected. You may also input the GID’s as a list, though this may not be suitable for collections with a large number of points. You may also set this to input to None to generate a list of GID’s automatically from the input files. By default, None.

+
+
datasetslist of str, optional

List of dataset names to collect into the output file. If collection is performed into multiple files (i.e. multiple input patterns), this list can contain all relevant datasets across all files (a warning wil be thrown, but it is safe to ignore it). If None, all datasets from the input files are collected. By default, None.

+
+
purge_chunksbool, optional

Option to delete single-node input HDF5 files. Note that the input files will not be removed if any of the datasets they contain have not been collected, regardless of the value of this input. By default, False.

+
+
clobberbool, optional

Flag to purge all collection output HDF5 files prior to running the collection step if they exist on disk. This helps avoid any surprising data byproducts when re-running the collection step in a project directory. By default, True.

+
+
collect_patternstr | list | dict, optional

Unix-style /filepath/pattern*.h5 representing the files to be collected into a single output HDF5 file. If no output file path is specified (i.e. this input is a single pattern or a list of patterns), the output file path will be inferred from the pattern itself (specifically, the wildcard will be removed and the result will be the output file path). If a list of patterns is provided, each pattern will be collected into a separate output file. To specify the name of the output file(s), set this input to a dictionary where the keys are paths to the output file (including the filename itself; relative paths are allowed) and the values are patterns representing the files that should be collected into the output file. If running a collect job as part of a pipeline, this input can be set to "PIPELINE", which will parse the output of the previous step and generate the input file pattern and output file name automatically. By default, "PIPELINE".

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV econ.html b/_cli/reV econ.html new file mode 100644 index 000000000..fae76622a --- /dev/null +++ b/_cli/reV econ.html @@ -0,0 +1,892 @@ + + + + + + + reV econ — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV econ

+

Execute the econ step from a config file.

+

reV econ analysis runs SAM econ calculations, typically to +compute LCOE (using PySAM.Lcoefcr.Lcoefcr), though +PySAM.Singleowner.Singleowner or +PySAM.Windbos.Windbos calculations can also be +performed simply by requesting outputs from those computation +modules. See the keys of +Econ.OPTIONS for all +available econ outputs. Econ computations rely on an input a +generation (i.e. capacity factor) profile. You can request +reV to run the analysis for one or more “sites”, which +correspond to the meta indices in the generation data.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV econ [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the econ configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "nodes": 1,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null,
+        "max_workers": 1,
+        "sites_per_worker": 100,
+        "memory_utilization_limit": 0.4,
+        "timeout": 1800,
+        "pool_size": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "project_points": "[REQUIRED]",
+    "sam_files": "[REQUIRED]",
+    "cf_file": "[REQUIRED]",
+    "site_data": null,
+    "output_request": [
+        "lcoe_fcr"
+    ],
+    "append": false,
+    "analysis_years": null
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
nodes:
+

(int, optional) +Number of nodes to split the project points across. +Note that the total number of requested nodes for +a job may be larger than this value if the command +splits across other inputs. Default is 1.

+
+
max_workers:
+

(int, optional) +Number of local workers to run on. By default, 1.

+
+
sites_per_worker:
+

(int, optional) +Number of sites to run in series on a worker. None defaults to the resource file chunk size. By default, None.

+
+
memory_utilization_limit:
+

(float, optional) +Memory utilization limit (fractional). Must be a value between 0 and 1. This input sets how many site results will be stored in-memory at any given time before flushing to disk. By default, 0.4.

+
+
timeout:
+

(int, optional) +Number of seconds to wait for parallel run iteration to complete before returning zeros. By default, 1800 seconds.

+
+
pool_size:
+

(int, optional) +Number of futures to submit to a single process pool for parallel futures. If None, the pool size is set to os.cpu_count() * 2. By default, None.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
project_pointsint | list | tuple | str | dict | pd.DataFrame | slice

Input specifying which sites to process. A single integer representing the GID of a site may be specified to evaluate reV at a single location. A list or tuple of integers (or slice) representing the GIDs of multiple sites can be specified to evaluate reV at multiple specific locations. A string pointing to a project points CSV file may also be specified. Typically, the CSV contains two columns:

+
+
    +
  • gid: Integer specifying the GID of each site.

  • +
  • config: Key in the sam_files input dictionary +(see below) corresponding to the SAM configuration to +use for each particular site. This value can also be +None (or left out completely) if you specify only +a single SAM configuration file as the sam_files +input.

  • +
+
+

The CSV file may also contain site-specific inputs by including a column named after a config keyword (e.g. a column called capital_cost may be included to specify a site-specific capital cost value for each location). Columns that do not correspond to a config key may also be included, but they will be ignored. A DataFrame following the same guidelines as the CSV input (or a dictionary that can be used to initialize such a DataFrame) may be used for this input as well.

+
+
sam_filesdict | str

A dictionary mapping SAM input configuration ID(s) to SAM configuration(s). Keys are the SAM config ID(s) which correspond to the config column in the project points CSV. Values for each key are either a path to a corresponding SAM config file or a full dictionary of SAM config inputs. For example:

+
sam_files = {
+    "default": "/path/to/default/sam.json",
+    "onshore": "/path/to/onshore/sam_config.yaml",
+    "offshore": {
+        "sam_key_1": "sam_value_1",
+        "sam_key_2": "sam_value_2",
+        ...
+    },
+    ...
+}
+
+
+

This input can also be a string pointing to a single SAM config file. In this case, the config column of the CSV points input should be set to None or left out completely. See the documentation for the reV SAM class (e.g. reV.SAM.generation.WindPower, reV.SAM.generation.PvWattsv8, reV.SAM.generation.Geothermal, etc.) for documentation on the allowed and/or required SAM config file inputs.

+
+
cf_filestr

Path to reV output generation file containing a capacity factor output.

+
+

Note

+

If executing reV from the command line, this +path can contain brackets {} that will be filled in +by the analysis_years input. Alternatively, this input +can be set to "PIPELINE" to parse the output of the +previous step (reV generation) and use it as input to +this call. However, note that duplicate executions of +reV generation within the pipeline may invalidate this +parsing, meaning the cf_file input will have to be +specified manually.

+
+
+
site_datastr | pd.DataFrame, optional

Site-specific input data for SAM calculation. If this input is a string, it should be a path that points to a CSV file. Otherwise, this input should be a DataFrame with pre-extracted site data. Rows in this table should match the input sites via a gid column. The rest of the columns should match configuration input keys that will take site-specific values. Note that some or all site-specific inputs can be specified via the project_points input table instead. If None, no site-specific data is considered. By default, None.

+
+
output_requestlist | tuple, optional

List of output variables requested from SAM. Can be any of the parameters in the “Outputs” group of the PySAM module (e.g. PySAM.Windpower.Windpower.Outputs, PySAM.Pvwattsv8.Pvwattsv8.Outputs, PySAM.Geothermal.Geothermal.Outputs, etc.) being executed. This list can also include a select number of SAM config/resource parameters to include in the output: any key in any of the output attribute JSON files may be requested. Time-series profiles requested via this input are output in UTC. By default, ('lcoe_fcr',).

+
+
appendbool

Option to append econ datasets to source cf_file. By default, False.

+
+
log_directorystr

Path to log output directory.

+
+
analysis_yearsint | list, optional

A single year or list of years to perform analysis for. These years will be used to fill in any brackets {} in the resource_file input. If None, the resource_file input is assumed to be the full path to the single resource file to be processed. By default, None.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV generation.html b/_cli/reV generation.html new file mode 100644 index 000000000..93b3f8bac --- /dev/null +++ b/_cli/reV generation.html @@ -0,0 +1,981 @@ + + + + + + + reV generation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV generation

+

Execute the generation step from a config file.

+

reV generation analysis runs SAM simulations by piping in +renewable energy resource data (usually from the NSRDB or WTK), +loading the SAM config, and then executing the PySAM compute +module for a given technology. See the documentation for the +reV SAM class (e.g. reV.SAM.generation.WindPower, +reV.SAM.generation.PvWattsv8, +reV.SAM.generation.Geothermal, etc.) for info on the +allowed and/or required SAM config file inputs. If economic +parameters are supplied in the SAM config, then you can bundle a +“follow-on” econ calculation by just adding the desired econ +output keys to the output_request. You can request reV to ‘ +run the analysis for one or more “sites”, which correspond to +the meta indices in the resource data (also commonly called the +gid's).

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV generation [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the generation configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "nodes": 1,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null,
+        "max_workers": 1,
+        "sites_per_worker": null,
+        "memory_utilization_limit": 0.4,
+        "timeout": 1800,
+        "pool_size": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "technology": "[REQUIRED]",
+    "project_points": "[REQUIRED]",
+    "sam_files": "[REQUIRED]",
+    "resource_file": "[REQUIRED]",
+    "low_res_resource_file": null,
+    "output_request": [
+        "cf_mean"
+    ],
+    "site_data": null,
+    "curtailment": null,
+    "gid_map": null,
+    "drop_leap": false,
+    "scale_outputs": true,
+    "write_mapped_gids": false,
+    "bias_correct": null,
+    "analysis_years": null
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
nodes:
+

(int, optional) +Number of nodes to split the project points across. +Note that the total number of requested nodes for +a job may be larger than this value if the command +splits across other inputs. Default is 1.

+
+
max_workers:
+

(int, optional) +Number of local workers to run on. By default, 1.

+
+
sites_per_worker:
+

(int, optional) +Number of sites to run in series on a worker. None defaults to the resource file chunk size. By default, None.

+
+
memory_utilization_limit:
+

(float, optional) +Memory utilization limit (fractional). Must be a value between 0 and 1. This input sets how many site results will be stored in-memory at any given time before flushing to disk. By default, 0.4.

+
+
timeout:
+

(int, optional) +Number of seconds to wait for parallel run iteration to complete before returning zeros. By default, 1800 seconds.

+
+
pool_size:
+

(int, optional) +Number of futures to submit to a single process pool for parallel futures. If None, the pool size is set to os.cpu_count() * 2. By default, None.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
technologystr

String indicating which SAM technology to analyze. Must be one of the keys of OPTIONS. The string should be lower-cased with spaces and underscores removed.

+
+
project_pointsint | list | tuple | str | dict | pd.DataFrame | slice

Input specifying which sites to process. A single integer representing the generation GID of a site may be specified to evaluate reV at a single location. A list or tuple of integers (or slice) representing the generation GIDs of multiple sites can be specified to evaluate reV at multiple specific locations. A string pointing to a project points CSV file may also be specified. Typically, the CSV contains two columns:

+
+
    +
  • gid: Integer specifying the generation GID of each +site.

  • +
  • config: Key in the sam_files input dictionary +(see below) corresponding to the SAM configuration to +use for each particular site. This value can also be +None (or left out completely) if you specify only +a single SAM configuration file as the sam_files +input.

  • +
+
+

The CSV file may also contain site-specific inputs by including a column named after a config keyword (e.g. a column called capital_cost may be included to specify a site-specific capital cost value for each location). Columns that do not correspond to a config key may also be included, but they will be ignored. A DataFrame following the same guidelines as the CSV input (or a dictionary that can be used to initialize such a DataFrame) may be used for this input as well.

+
+

Note

+

By default, the generation GID of each site is +assumed to match the resource GID to be evaluated for that +site. However, unique generation GID’s can be mapped to +non-unique resource GID’s via the gid_map input (see the +documentation for gid_map for more details).

+
+
+
sam_filesdict | str

A dictionary mapping SAM input configuration ID(s) to SAM configuration(s). Keys are the SAM config ID(s) which correspond to the config column in the project points CSV. Values for each key are either a path to a corresponding SAM config file or a full dictionary of SAM config inputs. For example:

+
sam_files = {
+    "default": "/path/to/default/sam.json",
+    "onshore": "/path/to/onshore/sam_config.yaml",
+    "offshore": {
+        "sam_key_1": "sam_value_1",
+        "sam_key_2": "sam_value_2",
+        ...
+    },
+    ...
+}
+
+
+

This input can also be a string pointing to a single SAM config file. In this case, the config column of the CSV points input should be set to None or left out completely. See the documentation for the reV SAM class (e.g. reV.SAM.generation.WindPower, reV.SAM.generation.PvWattsv8, reV.SAM.generation.Geothermal, etc.) for info on the allowed and/or required SAM config file inputs.

+
+
resource_filestr

Filepath to resource data. This input can be path to a single resource HDF5 file, a path to a directory containing data spread across multiple HDF5 files, or a path including a wildcard input like /h5_dir/prefix*suffix. In all cases, the resource data must be readable by rex.resource.Resource or rex.multi_file_resource.MultiFileResource. (i.e. the resource data conform to the rex data format). This means the data file(s) must contain a 1D time_index dataset indicating the UTC time of observation, a 1D meta dataset represented by a DataFrame with site-specific columns, and 2D resource datasets that match the dimensions of (time_index, meta). The time index must start at 00:00 of January 1st of the year under consideration, and its shape must be a multiple of 8760.

+
+

Note

+

If executing reV from the command line, this +path can contain brackets {} that will be filled in by +the analysis_years input.

+
+
+

Important

+

If you are using custom resource data (i.e. +not NSRDB/WTK/Sup3rCC, etc.), ensure the following:

+
+
    +
  • The data conforms to the +rex data format.

  • +
  • The meta DataFrame is organized such that every +row is a pixel and at least the columns +latitude, longitude, timezone, and +elevation are given for each location.

  • +
  • The time index and associated temporal data is in +UTC.

  • +
  • The latitude is between -90 and 90 and longitude is +between -180 and 180.

  • +
  • For solar data, ensure the DNI/DHI are not zero. You +can calculate one of these these inputs from the +other using the relationship

    +
    +\[GHI = DNI * cos(SZA) + DHI\]
    +
  • +
+
+
+
+
low_res_resource_filestr, optional

Optional low resolution resource file that will be dynamically mapped+interpolated to the nominal-resolution resource_file. This needs to be of the same format as resource_file - both files need to be handled by the same rex Resource handler (e.g. WindResource). All of the requirements from the resource_file apply to this input as well. If None, no dynamic mapping to higher resolutions is performed. By default, None.

+
+
output_requestlist | tuple, optional

List of output variables requested from SAM. Can be any of the parameters in the “Outputs” group of the PySAM module (e.g. PySAM.Windpower.Windpower.Outputs, PySAM.Pvwattsv8.Pvwattsv8.Outputs, PySAM.Geothermal.Geothermal.Outputs, etc.) being executed. This list can also include a select number of SAM config/resource parameters to include in the output: any key in any of the output attribute JSON files may be requested. If cf_mean is not included in this list, it will automatically be added. Time-series profiles requested via this input are output in UTC.

+
+

Note

+

If you are performing reV solar runs using +PVWatts and would like reV to include AC capacity +values in your aggregation/supply curves, then you must +include the "dc_ac_ratio" time series as an output in +output_request when running reV generation. The AC +capacity outputs will automatically be added during the +aggregation/supply curve step if the "dc_ac_ratio" +dataset is detected in the generation file.

+
+

By default, ('cf_mean',).

+
+
site_datastr | pd.DataFrame, optional

Site-specific input data for SAM calculation. If this input is a string, it should be a path that points to a CSV file. Otherwise, this input should be a DataFrame with pre-extracted site data. Rows in this table should match the input sites via a gid column. The rest of the columns should match configuration input keys that will take site-specific values. Note that some or all site-specific inputs can be specified via the project_points input table instead. If None, no site-specific data is considered. By default, None.

+
+
curtailmentdict | str, optional

Inputs for curtailment parameters, which can be:

+
+
    +
  • Explicit namespace of curtailment variables (dict)

  • +
  • Pointer to curtailment config file with path (str)

  • +
+
+

The allowed key-value input pairs in the curtailment configuration are documented as properties of the reV.config.curtailment.Curtailment class. If None, no curtailment is modeled. By default, None.

+
+
gid_mapdict | str, optional

Mapping of unique integer generation gids (keys) to single integer resource gids (values). This enables unique generation gids in the project points to map to non-unique resource gids, which can be useful when evaluating multiple resource datasets in reV (e.g., forecasted ECMWF resource data to complement historical WTK meteorology). This input can be a pre-extracted dictionary or a path to a JSON or CSV file. If this input points to a CSV file, the file must have the columns gid (which matches the project points) and gid_map (gids to extract from the resource input). If None, the GID values in the project points are assumed to match the resource GID values. By default, None.

+
+
drop_leapbool, optional

Drop leap day instead of final day of year when handling leap years. By default, False.

+
+
scale_outputsbool, optional

Flag to scale outputs in-place immediately upon Gen returning data. By default, True.

+
+
write_mapped_gidsbool, optional

Option to write mapped gids to output meta instead of resource gids. By default, False.

+
+
bias_correctstr | pd.DataFrame, optional

Optional DataFrame or CSV filepath to a wind or solar resource bias correction table. This has columns:

+
+
    +
  • gid: GID of site (can be index name)

  • +
  • adder: Value to add to resource at each site

  • +
  • scalar: Value to scale resource at each site by

  • +
+
+

The gid field should match the true resource gid regardless of the optional gid_map input. If both adder and scalar are present, the wind or solar resource is corrected by \((res*scalar)+adder\). If either is missing, scalar defaults to 1 and adder to 0. Only windspeed or GHI + DNI are corrected, depending on the technology (wind for the former, solar for the latter). GHI and DNI are corrected with the same correction factors. If None, no corrections are applied. By default, None.

+
+
log_directorystr

Path to log output directory.

+
+
analysis_yearsint | list, optional

A single year or list of years to perform analysis for. These years will be used to fill in any brackets {} in the resource_file input. If None, the resource_file input is assumed to be the full path to the single resource file to be processed. By default, None.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV hybrids.html b/_cli/reV hybrids.html new file mode 100644 index 000000000..9c54a3e7e --- /dev/null +++ b/_cli/reV hybrids.html @@ -0,0 +1,810 @@ + + + + + + + reV hybrids — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV hybrids

+

Execute the hybrids step from a config file.

+

reV hybrids computes a “hybrid” wind and solar supply curve, +where each supply curve point contains some wind and some solar +capacity. Various ratio limits on wind-to-solar farm properties +(e.g. wind-to-solar capacity) can be applied during the +hybridization process. Hybrid generation profiles are also +computed during this process.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV hybrids [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the hybrids configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "solar_fpath": "[REQUIRED]",
+    "wind_fpath": "[REQUIRED]",
+    "allow_solar_only": false,
+    "allow_wind_only": false,
+    "fillna": null,
+    "limits": null,
+    "ratio_bounds": null,
+    "ratio": "solar_capacity/wind_capacity",
+    "save_hybrid_meta": true
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
solar_fpathstr

Filepath to rep profile output file to extract solar profiles and summaries from.

+
+
wind_fpathstr

Filepath to rep profile output file to extract wind profiles and summaries from.

+
+
allow_solar_onlybool, optional

Option to allow SC points with only solar capacity (no wind). By default, False.

+
+
allow_wind_onlybool, optional

Option to allow SC points with only wind capacity (no solar). By default, False.

+
+
fillnadict, optional

Dictionary containing column_name, fill_value pairs representing any fill values that should be applied after merging the wind and solar meta. Note that column names will likely have to be prefixed with solar or wind. By default None.

+
+
limitsdict, optional

Option to specify mapping (in the form of a dictionary) of {colum_name: max_value} representing the upper limit (maximum value) for the values of a column in the merged meta. For example, limits={'solar_capacity': 100} would limit all the values of the solar capacity in the merged meta to a maximum value of 100. This limit is applied BEFORE ratio calculations. The names of the columns should match the column names in the merged meta, so they are likely prefixed with solar or wind. By default, None (no limits applied).

+
+
ratio_boundstuple, optional

Option to set ratio bounds (in two-tuple form) on the columns of the ratio input. For example, ratio_bounds=(0.5, 1.5) would adjust the values of both of the ratio columns such that their ratio is always between half and double (e.g., no value would be more than double the other). To specify a single ratio value, use the same value as the upper and lower bound. For example, ratio_bounds=(1, 1) would adjust the values of both of the ratio columns such that their ratio is always equal. By default, None (no limit on the ratio).

+
+
ratiostr, optional

Option to specify the columns used to calculate the ratio that is limited by the ratio_bounds input. This input is a string in the form “{numerator_column}/{denominator_column}”. For example, ratio='solar_capacity/wind_capacity' would limit the ratio of the solar to wind capacities as specified by the ratio_bounds input. If ratio_bounds is None, this input does nothing. The names of the columns should be prefixed with one of the prefixes defined as class variables. By default 'solar_capacity/wind_capacity'.

+
+
save_hybrid_metabool, optional

Flag to save hybrid SC table to hybrid rep profile output. By default, True.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV multiyear.html b/_cli/reV multiyear.html new file mode 100644 index 000000000..cfc2c537a --- /dev/null +++ b/_cli/reV multiyear.html @@ -0,0 +1,814 @@ + + + + + + + reV multiyear — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV multiyear

+

Execute the multi-year step from a config file.

+

reV multi-year combines reV generation data from multiple +years (typically stored in separate files) into a single multi-year +file. Each dataset in the multi-year file is labeled with the +corresponding years, and multi-year averages of the yearly datasets +are also computed.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV multiyear [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the multi-year configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "groups": "[REQUIRED]",
+    "clobber": true
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
groupsdict

Dictionary of collection groups and their parameters. This should be a dictionary mapping group names (keys) to a set of key word arguments (values) that can be used to initialize MultiYearGroup (excluding the required name and out_dir inputs, which are populated automatically). For example:

+
groups = {
+    "none": {
+        "dsets": [
+            "cf_profile",
+            "cf_mean",
+            "ghi_mean",
+            "lcoe_fcr",
+        ],
+        "source_dir": "./",
+        "source_prefix": "",
+        "pass_through_dsets": [
+            "capital_cost",
+            "fixed_operating_cost",
+            "system_capacity",
+            "fixed_charge_rate",
+            "variable_operating_cost",
+        ]
+    },
+    "solar_group": {
+        "source_files": "PIPELINE",
+        "dsets": [
+            "cf_profile_ac",
+            "cf_mean_ac",
+            "ac",
+            "dc",
+            "clipped_power"
+        ],
+        "pass_through_dsets": [
+            "system_capacity_ac",
+            "dc_ac_ratio"
+        ]
+    },
+    ...
+}
+
+
+

The group names will be used as the HDF5 file group name under which the collected data will be stored. You can have exactly one group with the name "none" for a “no group” collection (this is typically what you want and all you need to specify).

+
+
clobberbool, optional

Flag to purge the multi-year output file prior to running the multi-year collection step if the file already exists on disk. This ensures the data is always freshly collected from the single-year files. If False, then datasets in the existing file will not be overwritten with (potentially new/updated) data from the single-year files. By default, True.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV nrwal.html b/_cli/reV nrwal.html new file mode 100644 index 000000000..212dc3c6c --- /dev/null +++ b/_cli/reV nrwal.html @@ -0,0 +1,860 @@ + + + + + + + reV nrwal — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV nrwal

+

Execute the nrwal step from a config file.

+

reV NRWAL analysis runs reV data through the NRWAL +compute library. Everything in this module operates on the +spatiotemporal resolution of the reV generation output file +(usually the wind or solar resource resolution but could also be +the supply curve resolution after representative profiles is +run).

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV nrwal [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the nrwal configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "gen_fpath": "[REQUIRED]",
+    "site_data": "[REQUIRED]",
+    "sam_files": "[REQUIRED]",
+    "nrwal_configs": "[REQUIRED]",
+    "output_request": "[REQUIRED]",
+    "save_raw": true,
+    "meta_gid_col": "gid",
+    "site_meta_cols": null,
+    "csv_output": false
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
gen_fpathstr

Full filepath to HDF5 file with reV generation or rep_profiles output. Anything in the output_request input is added to and/or manipulated within this file.

+
+

Note

+

If executing reV from the command line, this +input can also be "PIPELINE" to parse the output of +one of the previous step and use it as input to this call. +However, note that duplicate executions of reV +commands prior to this one within the pipeline may +invalidate this parsing, meaning the gen_fpath input +will have to be specified manually.

+
+
+
site_datastr | pd.DataFrame

Site-specific input data for NRWAL calculation.If this input is a string, it should be a path that points to a CSV file. Otherwise, this input should be a DataFrame with pre-extracted site data. Rows in this table should match the meta_gid_col in the gen_fpath meta data input sites via a gid column. A config column must also be provided that corresponds to the nrwal_configs input. Only sites with a gid in this file’s gid column will be run through NRWAL.

+
+
sam_filesdict | str

A dictionary mapping SAM input configuration ID(s) to SAM configuration(s). Keys are the SAM config ID(s) which correspond to the keys in the nrwal_configs input. Values for each key are either a path to a corresponding SAM config file or a full dictionary of SAM config inputs. For example:

+
sam_files = {
+    "default": "/path/to/default/sam.json",
+    "onshore": "/path/to/onshore/sam_config.yaml",
+    "offshore": {
+        "sam_key_1": "sam_value_1",
+        "sam_key_2": "sam_value_2",
+        ...
+    },
+    ...
+}
+
+
+

This input can also be a string pointing to a single SAM config file. In this case, the config column of the CSV points input should be set to None or left out completely. See the documentation for the reV SAM class (e.g. reV.SAM.generation.WindPower, reV.SAM.generation.PvWattsv8, reV.SAM.generation.Geothermal, etc.) for documentation on the allowed and/or required SAM config file inputs.

+
+
nrwal_configsdict

A dictionary mapping SAM input configuration ID(s) to NRWAL configuration(s). Keys are the SAM config ID(s) which correspond to the keys in the sam_files input. Values for each key are either a path to a corresponding NRWAL YAML or JSON config file or a full dictionary of NRWAL config inputs. For example:

+
nrwal_configs = {
+    "default": "/path/to/default/nrwal.json",
+    "onshore": "/path/to/onshore/nrwal_config.yaml",
+    "offshore": {
+        "nrwal_key_1": "nrwal_value_1",
+        "nrwal_key_2": "nrwal_value_2",
+        ...
+    },
+    ...
+}
+
+
+
+
output_requestlist | tuple

List of output dataset names to be written to the gen_fpath file. Any key from the NRWAL configs or any of the inputs (site_data or sam_files) is available to be exported as an output dataset. If you want to manipulate a dset like cf_mean from gen_fpath and include it in the output_request, you should set save_raw=True and then use cf_mean_raw in the NRWAL equations as the input. This allows you to define an equation in the NRWAL configs for a manipulated cf_mean output that can be included in the output_request list.

+
+
save_rawbool, optional

Flag to save an initial (“raw”) copy of input datasets from gen_fpath that are also part of the output_request. For example, if you request cf_mean in output_request but also manipulate the cf_mean dataset in the NRWAL equations, the original cf_mean will be archived under the cf_mean_raw dataset in gen_fpath. By default, True.

+
+
meta_gid_colstr, optional

Column label in the source meta data from gen_fpath that contains the unique gid identifier. This will be joined to the site_data gid column. By default, "gid".

+
+
site_meta_colslist | tuple, optional

Column labels from site_data to be added to the meta data table in gen_fpath. If None, only the columns in DEFAULT_META_COLS will be added. Any columns requested via this input will be considered in addition to the DEFAULT_META_COLS. By default, None.

+
+
csv_outputbool, optional

Option to write H5 file meta + all requested outputs to CSV file instead of storing in the HDF5 file directly. This can be useful if the same HDF5 file is used for multiple sets of NRWAL runs. Note that all requested output datasets must be 1-dimensional in order to fir within the CSV output.

+
+

Important

+

This option is not compatible with +save_raw=True. If you set csv_output=True, then +the save_raw option is forced to be False. +Therefore, make sure that you do not have any references +to “input_dataset_name_raw” in your NRWAL config. If you +need to manipulate an input dataset, save it to a +different output name in the NRWAL config or manually add +an “input_dataset_name_raw” dataset to your generation +HDF5 file before running NRWAL.

+
+

By default, False.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV pipeline.html b/_cli/reV pipeline.html new file mode 100644 index 000000000..e97d55a1b --- /dev/null +++ b/_cli/reV pipeline.html @@ -0,0 +1,811 @@ + + + + + + + reV pipeline — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV pipeline

+

Execute multiple steps in an analysis pipeline.

+

The general structure for calling this CLI command is given below (add --help to print help info to the terminal).

+
reV pipeline [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Path to the pipeline configuration file. This argument can be +left out, but one and only one file with “pipeline” in the +name should exist in the directory and contain the config +information. Below is a sample template config

+
+
{
+    "pipeline": [
+        {
+            "bespoke": "./config_bespoke.json"
+        },
+        {
+            "generation": "./config_generation.json"
+        },
+        {
+            "econ": "./config_econ.json"
+        },
+        {
+            "collect": "./config_collect.json"
+        },
+        {
+            "multi-year": "./config_multi_year.json"
+        },
+        {
+            "supply-curve-aggregation": "./config_supply_curve_aggregation.json"
+        },
+        {
+            "supply-curve": "./config_supply_curve.json"
+        },
+        {
+            "rep-profiles": "./config_rep_profiles.json"
+        },
+        {
+            "hybrids": "./config_hybrids.json"
+        },
+        {
+            "nrwal": "./config_nrwal.json"
+        },
+        {
+            "qa-qc": "./config_qa_qc.json"
+        },
+        {
+            "script": "./config_script.json"
+        }
+    ],
+    "logging": {
+        "log_file": null,
+        "log_level": "INFO"
+    }
+}
+
+
+
+
+

Parameters

+
+
pipelinelist of dicts

A list of dictionaries, where each dictionary represents one +step in the pipeline. Each dictionary should have one of two +configurations:

+
+
    +
  • A single key-value pair, where the key is the name of +the CLI command to run, and the value is the path to +a config file containing the configuration for that +command

  • +
  • Exactly two key-value pairs, where one of the keys is +"command", with a value that points to the name of +a command to execute, while the second key is a _unique_ +user-defined name of the pipeline step to execute, with +a value that points to the path to a config file +containing the configuration for the command specified +by the other key. This configuration allows users to +specify duplicate commands as part of their pipeline +execution.

  • +
+
+
+
loggingdict, optional

Dictionary containing keyword-argument pairs to pass to +init_logger. This +initializes logging for the submission portion of the +pipeline. Note, however, that each step (command) will +also record the submission step log output to a +common “project” log file, so it’s only ever necessary +to use this input if you want a different (lower) level +of verbosity than the log_level specified in the +config for the step of the pipeline being executed.

+
+
+
+
+ +
+
+--cancel
+

Flag to cancel all jobs associated with a given pipeline.

+
+ +
+
+--monitor
+

Flag to monitor pipeline jobs continuously. Default is not to monitor (kick off jobs and exit).

+
+ +
+
+-r, --recursive
+

Flag to recursively submit pipelines, starting from the current directory and checking every sub-directory therein. The -c option will be completely ignored if you use this option. Instead, the code will check every sub-directory for exactly one file with the word pipeline in it. If found, that file is assumed to be the pipeline config and is used to kick off the pipeline. In any other case, the directory is skipped.

+
+ +
+
+--background
+

Flag to monitor pipeline jobs continuously in the background. Note that the stdout/stderr will not be captured, but you can set a pipeline ‘log_file’ to capture logs.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV project-points.html b/_cli/reV project-points.html new file mode 100644 index 000000000..3ae4caa0c --- /dev/null +++ b/_cli/reV project-points.html @@ -0,0 +1,710 @@ + + + + + + + reV project-points — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV project-points

+

reV ProjectPoints generator

+
reV project-points [OPTIONS] COMMAND [ARGS]...
+
+
+

Options

+
+
+--version
+

Show the version and exit.

+
+ +
+
+-f, --fpath <fpath>
+

Required .csv file path to save project points to (required)

+
+ +
+
+-rf, --res_file <res_file>
+

Required Filepath to single resource file, multi-h5 directory, or /h5_dir/prefix*suffix (required)

+
+ +
+
+-sf, --sam_file <sam_file>
+

Required SAM config file (required)

+
+ +
+
+-v, --verbose
+

Flag to turn on debug logging. Default is not verbose.

+
+ +
+

from-lat-lons

+

Convert latitude and longitude coordinates to ProjectPoints

+
reV project-points from-lat-lons [OPTIONS]
+
+
+

Options

+
+
+-llf, --lat_lon_fpath <lat_lon_fpath>
+

File path to .csv or .json containing latitude, longitude coordinates of interest

+
+ +
+
+--lat_lon_coords, --llc <lat_lon_coords>
+

(lat, lon) coordinates of interest

+
+ +
+
+

from-regions

+

Extract ProjectPoints for given geographic regions

+
reV project-points from-regions [OPTIONS]
+
+
+

Options

+
+
+-regs, --regions <regions>
+

json string or file path to .json containing regions of interest containing regions of interest

+
+ +
+
+-r, --region <region>
+

Region to extract

+
+ +
+
+-col, --region_col <region_col>
+

Meta column to search for region

+
+ +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV qa-qc.html b/_cli/reV qa-qc.html new file mode 100644 index 000000000..12ed0c6f5 --- /dev/null +++ b/_cli/reV qa-qc.html @@ -0,0 +1,776 @@ + + + + + + + reV qa-qc — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV qa-qc

+

Execute the qa-qc step from a config file.

+

reV QA/QC performs quality assurance checks on reV output +data. Users can specify the type of QA/QC that should be applied +to each reV module.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV qa-qc [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the qa-qc configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null,
+        "max_workers": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "modules": "[REQUIRED]"
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
max_workers:
+

(int, optional) +Max number of workers to run for QA/QA. If None, uses all CPU cores. By default, None.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
modulesdict

Dictionary of modules to QA/QC. Keys should be the names of the modules to QA/QC. The values are dictionaries that represent the config for the respective QA/QC step. Allowed config keys for QA/QC are the “property” attributes of QaQcModule.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV rep-profiles.html b/_cli/reV rep-profiles.html new file mode 100644 index 000000000..a2e238a1b --- /dev/null +++ b/_cli/reV rep-profiles.html @@ -0,0 +1,890 @@ + + + + + + + reV rep-profiles — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV rep-profiles

+

Execute the rep-profiles step from a config file.

+

reV rep profiles compute representative generation profiles +for each supply curve point output by reV supply curve +aggregation. Representative profiles can either be a spatial +aggregation of generation profiles or actual generation profiles +that most closely resemble an aggregated profile (selected based +on an error metric).

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV rep-profiles [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the rep-profiles configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null,
+        "max_workers": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "gen_fpath": "[REQUIRED]",
+    "rev_summary": "[REQUIRED]",
+    "reg_cols": "[REQUIRED]",
+    "cf_dset": "cf_profile",
+    "rep_method": "meanoid",
+    "err_method": "rmse",
+    "weight": "gid_counts",
+    "n_profiles": 1,
+    "aggregate_profiles": false,
+    "save_rev_summary": true,
+    "scaled_precision": false,
+    "analysis_years": null
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
max_workers:
+

(int, optional) +Number of parallel rep profile workers. 1 will run serial, while None will use all available. By default, None.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
gen_fpathstr

Filepath to reV generation output HDF5 file to extract cf_dset dataset from.

+
+

Note

+

If executing reV from the command line, this +path can contain brackets {} that will be filled in by +the analysis_years input. Alternatively, this input can +be set to "PIPELINE", which will parse this input from +one of these preceding pipeline steps: multi-year, +collect, generation, or +supply-curve-aggregation. However, note that duplicate +executions of any of these commands within the pipeline +may invalidate this parsing, meaning the gen_fpath input +will have to be specified manually.

+
+
+
rev_summarystr | pd.DataFrame

Aggregated reV supply curve summary file. Must include the following columns:

+
+
    +
  • res_gids : string representation of python list +containing the resource GID values corresponding to +each supply curve point.

  • +
  • gen_gids : string representation of python list +containing the reV generation GID values +corresponding to each supply curve point.

  • +
  • weight column (name based on weight input) : string +representation of python list containing the resource +GID weights for each supply curve point.

  • +
+
+
+

Note

+

If executing reV from the command line, this +input can be set to "PIPELINE", which will parse this +input from one of these preceding pipeline steps: +supply-curve-aggregation or supply-curve. +However, note that duplicate executions of any of these +commands within the pipeline may invalidate this parsing, +meaning the rev_summary input will have to be specified +manually.

+
+
+
reg_colsstr | list

Label(s) for a categorical region column(s) to extract profiles for. For example, "state" will extract a rep profile for each unique entry in the "state" column in rev_summary. To get a profile for each supply curve point, try setting reg_cols to a primary key such as "sc_gid".

+
+
cf_dsetstr, optional

Dataset name to pull generation profiles from. This dataset must be present in the gen_fpath HDF5 file. By default, "cf_profile"

+
+

Note

+

If executing reV from the command line, this +name can contain brackets {} that will be filled in by +the analysis_years input (e.g. "cf_profile-{}").

+
+
+
rep_method{‘mean’, ‘meanoid’, ‘median’, ‘medianoid’}, optional

Method identifier for calculation of the representative profile. By default, 'meanoid'

+
+
err_method{‘mbe’, ‘mae’, ‘rmse’}, optional

Method identifier for calculation of error from the representative profile. If this input is None, the representative meanoid / medianoid profile will be returned directly. By default, 'rmse'.

+
+
weightstr, optional

Column in rev_summary used to apply weights when computing mean profiles. The supply curve table data in the weight column should have weight values corresponding to the res_gids in the same row (i.e. string representation of python list containing weight values).

+
+

Important

+

You’ll often want to set this value to +something other than None (typically "gid_counts" +if running on standard reV outputs). Otherwise, the +unique generation profiles within each supply curve point +are weighted equally. For example, if you have a 64x64 +supply curve point, and one generation profile takes up +4095 (99.98%) 90m cells while a second generation profile +takes up only one 90m cell (0.02%), they will contribute +equally to the meanoid profile unless these weights are +specified.

+
+

By default, 'gid_counts'.

+
+
n_profilesint, optional

Number of representative profiles to save to the output file. By default, 1.

+
+
aggregate_profilesbool, optional

Flag to calculate the aggregate (weighted meanoid) profile for each supply curve point. This behavior is in lieu of finding the single profile per region closest to the meanoid. If you set this flag to True, the rep_method, err_method, and n_profiles inputs will be forcibly set to the default values. By default, False.

+
+
save_rev_summarybool, optional

Flag to save full reV supply curve table to rep profile output. By default, True.

+
+
scaled_precisionbool, optional

Flag to scale cf_profiles by 1000 and save as uint16. By default, False.

+
+
analysis_yearsint | list, optional

A single year or list of years to perform analysis for. These years will be used to fill in any brackets {} in the cf_dset or gen_fpath inputs. If None, the cf_dset and gen_fpath inputs are assumed to be the full dataset name and the full path to the single resource file to be processed, respectively. Note that only one of cf_dset or gen_fpath are allowed to contain brackets ({}) to be filled in by the analysis years. By default, None.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV reset-status.html b/_cli/reV reset-status.html new file mode 100644 index 000000000..780ca8f86 --- /dev/null +++ b/_cli/reV reset-status.html @@ -0,0 +1,652 @@ + + + + + + + reV reset-status — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV reset-status

+

Reset the pipeline/job status (progress) for a given directory (defaults to ./). Multiple directories can be supplied to reset the status of each.

+

The general structure for calling this CLI command is given below (add --help to print help info to the terminal).

+
reV reset-status [DIRECTORY]...
+
+
+

Options

+
+
+-f, --force
+

Force pipeline status reset even if jobs are queued/running

+
+ +
+
+-a, --after-step <after_step>
+

Reset pipeline starting after the given pipeline step. The status of this step will remain unaffected, but the status of steps following it will be reset completely.

+
+ +

Arguments

+
+
+DIRECTORY
+

Optional argument(s)

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV script.html b/_cli/reV script.html new file mode 100644 index 000000000..99782634a --- /dev/null +++ b/_cli/reV script.html @@ -0,0 +1,789 @@ + + + + + + + reV script — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV script

+

Execute the script step from a config file.

+

This command runs one or more terminal commands/scripts as part of a +pipeline step.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV script [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the script configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "cmd": "[REQUIRED]"
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
cmdstr | list

A single command represented as a string or a list of command strings to execute on a node. If the input is a list, each command string in the list will be executed on a separate node. For example, to run a python script, simply specify

+
"cmd": "python my_script.py"
+
+
+

This will run the python file “my_script.py” (in the project directory) on a single node.

+
+

Important

+

It is inefficient to run scripts that only use a +single processor on HPC nodes for extended periods of time. +Always make sure your long-running scripts use Python’s +multiprocessing library wherever possible to make the most +use of shared HPC resources.

+
+

To run multiple commands in parallel, supply them as a list:

+
"cmd": [
+    "python /path/to/my_script/py -a -out out_file.txt",
+    "wget https://website.org/latest.zip"
+]
+
+
+

This input will run two commands (a python script with the specified arguments and a wget command to download a file from the web), each on their own node and in parallel as part of this pipeline step. Note that commands are always executed from the project directory.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV status.html b/_cli/reV status.html new file mode 100644 index 000000000..76c03b586 --- /dev/null +++ b/_cli/reV status.html @@ -0,0 +1,681 @@ + + + + + + + reV status — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV status

+

Display the status of a project FOLDER.

+

By default, the status of the current working directory is displayed.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).”

+
reV status [OPTIONS] [FOLDER]
+
+
+

Options

+
+
+-ps, --pipe_steps <pipe_steps>
+

Filter status for the given pipeline step(s). Multiple steps can be specified by repeating this option (e.g. -ps step1 -ps step2 ...) By default, the status of all pipeline steps is displayed.

+
+ +
+
+-s, --status <status>
+

Filter jobs for the requested status(es). Allowed options (case-insensitive) +include:

+
+
    +
  • Failed: failure fail failed f

  • +
  • Running: running run r

  • +
  • Submitted: submitted submit sb pending pend p

  • +
  • Success: successful success s

  • +
  • Not submitted: unsubmitted unsubmit u not_submitted ns

  • +
+
+

Multiple status keys can be specified by repeating this option +(e.g. -s status1 -s status2 ...). By default, all status values are +displayed.

+
+ +
+
+-i, --include <include>
+

Extra status keys to include in the print output for each job. Multiple status keys can be specified by repeating this option (e.g. -i key1 -i key2 ...) By default, no extra keys are displayed.

+
+ +
+
+-r, --recursive
+

Option to perform a recursive search of directories (starting with the input directory). The status of every nested directory is reported.

+
+ +

Arguments

+
+
+FOLDER
+

Optional argument

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV supply-curve-aggregation.html b/_cli/reV supply-curve-aggregation.html new file mode 100644 index 000000000..74d29073f --- /dev/null +++ b/_cli/reV supply-curve-aggregation.html @@ -0,0 +1,1014 @@ + + + + + + + reV supply-curve-aggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV supply-curve-aggregation

+

Execute the supply-curve-aggregation step from a config file.

+

reV supply curve aggregation combines a high-resolution +(e.g. 90m) exclusion dataset with a (typically) lower resolution +(e.g. 2km) generation dataset by mapping all data onto the high- +resolution grid and aggregating it by a large factor (e.g. 64 or +128). The result is coarsely-gridded data that summarizes +capacity and generation potential as well as associated +economics under a particular land access scenario. This module +can also summarize extra data layers during the aggregation +process, allowing for complementary land characterization +analysis.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV supply-curve-aggregation [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the supply-curve-aggregation configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null,
+        "max_workers": null,
+        "sites_per_worker": 100
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "excl_fpath": "[REQUIRED]",
+    "tm_dset": "[REQUIRED]",
+    "econ_fpath": null,
+    "excl_dict": null,
+    "area_filter_kernel": "queen",
+    "min_area": null,
+    "resolution": 64,
+    "excl_area": null,
+    "gids": null,
+    "pre_extract_inclusions": false,
+    "res_class_dset": null,
+    "res_class_bins": null,
+    "cf_dset": "cf_mean-means",
+    "lcoe_dset": "lcoe_fcr-means",
+    "h5_dsets": null,
+    "data_layers": null,
+    "power_density": null,
+    "friction_fpath": null,
+    "friction_dset": null,
+    "cap_cost_scale": null,
+    "recalc_lcoe": true,
+    "gen_fpath": null,
+    "res_fpath": null,
+    "args": null
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
max_workers:
+

(int, optional) +Number of cores to run summary on. None is all available CPUs. By default, None.

+
+
sites_per_worker:
+

(int, optional) +Number of sc_points to summarize on each worker. By default, 100.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
excl_fpathstr | list | tuple

Filepath to exclusions data HDF5 file. The exclusions HDF5 file should contain the layers specified in excl_dict and data_layers. These layers may also be spread out across multiple HDF5 files, in which case this input should be a list or tuple of filepaths pointing to the files containing the layers. Note that each data layer must be uniquely defined (i.e.only appear once and in a single input file).

+
+
tm_dsetstr

Dataset name in the excl_fpath file containing the techmap (exclusions-to-resource mapping data). This data layer links the supply curve GID’s to the generation GID’s that are used to evaluate performance metrics such as mean_cf.

+
+

Important

+

This dataset uniquely couples the (typically +high-resolution) exclusion layers to the (typically +lower-resolution) resource data. Therefore, a separate +techmap must be used for every unique combination of +resource and exclusion coordinates.

+
+
+

Note

+

If executing reV from the command line, you +can specify a name that is not in the exclusions HDF5 +file, and reV will calculate the techmap for you. Note +however that computing the techmap and writing it to the +exclusion HDF5 file is a blocking operation, so you may +only run a single reV aggregation step at a time this +way.

+
+
+
econ_fpathstr, optional

Filepath to HDF5 file with reV econ output results containing an lcoe_dset dataset. If None, lcoe_dset should be a dataset in the gen_fpath HDF5 file that aggregation is executed on.

+
+

Note

+

If executing reV from the command line, this +input can be set to "PIPELINE" to parse the output +from one of these preceding pipeline steps: +multi-year, collect, or generation. However, +note that duplicate executions of any of these commands +within the pipeline may invalidate this parsing, meaning +the econ_fpath input will have to be specified manually.

+
+

By default, None.

+
+
excl_dictdict | None

Dictionary of exclusion keyword arguments of the format {layer_dset_name: {kwarg: value}}, where layer_dset_name is a dataset in the exclusion h5 file and the kwarg: value pair is a keyword argument to the reV.supply_curve.exclusions.LayerMask class. For example:

+
excl_dict = {
+    "typical_exclusion": {
+        "exclude_values": 255,
+    },
+    "another_exclusion": {
+        "exclude_values": [2, 3],
+        "weight": 0.5
+    },
+    "exclusion_with_nodata": {
+        "exclude_range": [10, 100],
+        "exclude_nodata": True,
+        "nodata_value": -1
+    },
+    "partial_setback": {
+        "use_as_weights": True
+    },
+    "height_limit": {
+        "exclude_range": [0, 200]
+    },
+    "slope": {
+        "include_range": [0, 20]
+    },
+    "developable_land": {
+        "force_include_values": 42
+    },
+    "more_developable_land": {
+        "force_include_range": [5, 10]
+    },
+    ...
+}
+
+
+

Note that all the keys given in this dictionary should be datasets of the excl_fpath file. If None or empty dictionary, no exclusions are applied. By default, None.

+
+
area_filter_kernel{“queen”, “rook”}, optional

Contiguous area filter method to use on final exclusions mask. The filters are defined as:

+
# Queen:     # Rook:
+[[1,1,1],    [[0,1,0],
+ [1,1,1],     [1,1,1],
+ [1,1,1]]     [0,1,0]]
+
+
+

These filters define how neighboring pixels are “connected”. Once pixels in the final exclusion layer are connected, the area of each resulting cluster is computed and compared against the min_area input. Any cluster with an area less than min_area is excluded from the final mask. This argument has no effect if min_area is None. By default, "queen".

+
+
min_areafloat, optional

Minimum area (in km2) required to keep an isolated cluster of (included) land within the resulting exclusions mask. Any clusters of land with areas less than this value will be marked as exclusions. See the documentation for area_filter_kernel for an explanation of how the area of each land cluster is computed. If None, no area filtering is performed. By default, None.

+
+
resolutionint, optional

Supply Curve resolution. This value defines how many pixels are in a single side of a supply curve cell. For example, a value of 64 would generate a supply curve where the side of each supply curve cell is 64x64 exclusion pixels. By default, 64.

+
+
excl_areafloat, optional

Area of a single exclusion mask pixel (in km2). If None, this value will be inferred from the profile transform attribute in excl_fpath. By default, None.

+
+
gidslist, optional

List of supply curve point gids to get summary for. If you would like to obtain all available reV supply curve points to run, you can use the reV.supply_curve.extent.SupplyCurveExtent class like so:

+
import pandas as pd
+from reV.supply_curve.extent import SupplyCurveExtent
+
+excl_fpath = "..."
+resolution = ...
+with SupplyCurveExtent(excl_fpath, resolution) as sc:
+    gids = sc.valid_sc_points(tm_dset).tolist()
+...
+
+
+

If None, supply curve aggregation is computed for all gids in the supply curve extent. By default, None.

+
+
pre_extract_inclusionsbool, optional

Optional flag to pre-extract/compute the inclusion mask from the excl_dict input. It is typically faster to compute the inclusion mask on the fly with parallel workers. By default, False.

+
+
res_class_dsetstr, optional

Name of dataset in the reV generation HDF5 output file containing resource data. If None, no aggregated resource classification is performed (i.e. no mean_res output), and the res_class_bins is ignored. By default, None.

+
+
res_class_binslist, optional

Optional input to perform separate aggregations for various resource data ranges. If None, only a single aggregation per supply curve point is performed. Otherwise, this input should be a list of floats or ints representing the resource bin boundaries. One aggregation per resource value range is computed, and only pixels within the given resource range are aggregated. By default, None.

+
+
cf_dsetstr, optional

Dataset name from the reV generation HDF5 output file containing capacity factor mean values. By default, "cf_mean-means".

+
+
lcoe_dsetstr, optional

Dataset name from the reV generation HDF5 output file containing LCOE mean values. By default, "lcoe_fcr-means".

+
+
h5_dsetslist, optional

Optional list of additional datasets from the reV generation/econ HDF5 output file to aggregate. If None, no extra datasets are aggregated. By default, None.

+
+
data_layersdict, optional

Dictionary of aggregation data layers of the format:

+
data_layers = {
+    "output_layer_name": {
+        "dset": "layer_name",
+        "method": "mean",
+        "fpath": "/path/to/data.h5"
+    },
+    "another_output_layer_name": {
+        "dset": "input_layer_name",
+        "method": "mode",
+        # optional "fpath" key omitted
+    },
+    ...
+}
+
+
+

The "output_layer_name" is the column name under which the aggregated data will appear in the output CSV file. The "output_layer_name" does not have to match the dset input value. The latter should match the layer name in the HDF5 from which the data to aggregate should be pulled. The method should be one of {"mode", "mean", "min", "max", "sum", "category"}, describing how the high-resolution data should be aggregated for each supply curve point. fpath is an optional key that can point to an HDF5 file containing the layer data. If left out, the data is assumed to exist in the file(s) specified by the excl_fpath input. If None, no data layer aggregation is performed. By default, None

+
+
power_densityfloat | str, optional

Power density value (in MW/km2) or filepath to variable power density CSV file containing the following columns:

+
+
    +
  • gid : resource gid (typically wtk or nsrdb gid)

  • +
  • power_density : power density value (in +MW/km2)

  • +
+
+

If None, a constant power density is inferred from the generation meta data technology. By default, None.

+
+
friction_fpathstr, optional

Filepath to friction surface data (cost based exclusions). Must be paired with the friction_dset input below. The friction data must be the same shape as the exclusions. Friction input creates a new output column "mean_lcoe_friction" which is the nominal LCOE multiplied by the friction data. If None, no friction data is aggregated. By default, None.

+
+
friction_dsetstr, optional

Dataset name in friction_fpath for the friction surface data. Must be paired with the friction_fpath above. If None, no friction data is aggregated. By default, None.

+
+
cap_cost_scalestr, optional

Optional LCOE scaling equation to implement “economies of scale”. Equations must be in python string format and must return a scalar value to multiply the capital cost by. Independent variables in the equation should match the names of the columns in the reV supply curve aggregation output table (see the documentation of SupplyCurveAggregation for details on available outputs). If None, no economies of scale are applied. By default, None.

+
+
recalc_lcoebool, optional

Flag to re-calculate the LCOE from the multi-year mean capacity factor and annual energy production data. This requires several datasets to be aggregated in the h5_dsets input:

+
+
    +
  • system_capacity

  • +
  • fixed_charge_rate

  • +
  • capital_cost

  • +
  • fixed_operating_cost

  • +
  • variable_operating_cost

  • +
+
+

By default, True.

+
+
gen_fpathstr, optional

Filepath to HDF5 file with reV generation output results. If None, a simple aggregation without any generation, resource, or cost data is performed.

+
+

Note

+

If executing reV from the command line, this +input can be set to "PIPELINE" to parse the output +from one of these preceding pipeline steps: +multi-year, collect, or econ. However, note +that duplicate executions of any of these commands within +the pipeline may invalidate this parsing, meaning the +econ_fpath input will have to be specified manually.

+
+

By default, None.

+
+
res_fpathstr, optional

Filepath to HDF5 resource file (e.g. WTK or NSRDB). This input is required if techmap dset is to be created or if gen_fpath is is None. By default, None.

+
+
argstuple | list, optional

List of columns to include in summary output table. None defaults to all available args defined in the SupplyCurveAggregation documentation. By default, None.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV supply-curve.html b/_cli/reV supply-curve.html new file mode 100644 index 000000000..dbb8cc7bc --- /dev/null +++ b/_cli/reV supply-curve.html @@ -0,0 +1,893 @@ + + + + + + + reV supply-curve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV supply-curve

+

Execute the supply-curve step from a config file.

+

reV supply curve computes the transmission costs associated +with each supply curve point output by reV supply curve +aggregation. Transmission costs can either be computed +competitively (where total capacity remaining on the +transmission grid is tracked and updated after each new +connection) or non-competitively (where the cheapest connections +for each supply curve point are allowed regardless of the +remaining transmission grid capacity). In both cases, the +permutation of transmission costs between supply curve points +and transmission grid features should be computed using the +reVX Least Cost Transmission Paths +utility.

+

The general structure for calling this CLI command is given below +(add --help to print help info to the terminal).

+
reV supply-curve [OPTIONS]
+
+
+

Options

+
+
+-c, --config_file <config_file>
+

Required Path to the supply-curve configuration file. Below is a sample template config

+
+
{
+    "execution_control": {
+        "option": "local",
+        "allocation": "[REQUIRED IF ON HPC]",
+        "walltime": "[REQUIRED IF ON HPC]",
+        "qos": "normal",
+        "memory": null,
+        "queue": null,
+        "feature": null,
+        "conda_env": null,
+        "module": null,
+        "sh_script": null,
+        "max_workers": null
+    },
+    "log_directory": "./logs",
+    "log_level": "INFO",
+    "sc_points": "[REQUIRED]",
+    "trans_table": "[REQUIRED]",
+    "sc_features": null,
+    "sc_capacity_col": "capacity",
+    "fixed_charge_rate": "[REQUIRED]",
+    "simple": true,
+    "avail_cap_frac": 1,
+    "line_limited": false,
+    "transmission_costs": null,
+    "consider_friction": true,
+    "sort_on": null,
+    "columns": [
+        "trans_gid",
+        "trans_type",
+        "trans_cap_cost_per_mw",
+        "dist_km",
+        "lcot",
+        "total_lcoe"
+    ],
+    "competition": null
+}
+
+
+
+
+

Parameters

+
+
execution_controldict

Dictionary containing execution control arguments. Allowed arguments are:

+
+
option:
+

({‘local’, ‘kestrel’, ‘eagle’, ‘awspc’, ‘slurm’, ‘peregrine’}) +Hardware run option. Determines the type of job +scheduler to use as well as the base AU cost. The +“slurm” option is a catchall for HPC systems +that use the SLURM scheduler and should only be +used if desired hardware is not listed above. If +“local”, no other HPC-specific keys in are +required in execution_control (they are ignored +if provided).

+
+
allocation:
+

(str) +HPC project (allocation) handle.

+
+
walltime:
+

(int) +Node walltime request in hours.

+
+
qos:
+

(str, optional) +Quality-of-service specifier. On Eagle or +Kestrel, this should be one of {‘standby’, ‘normal’, +‘high’}. Note that ‘high’ priority doubles the AU +cost. By default, "normal".

+
+
memory:
+

(int, optional) +Node memory request in GB. By default, None, which +does not specify a memory limit.

+
+
max_workers:
+

(int, optional) +Number of workers to use to compute LCOT. If > 1, computation is run in parallel. If None, computation uses all available CPU’s. By default, None.

+
+
queue:
+

(str, optional; PBS ONLY) +HPC queue to submit job to. Examples include: ‘debug’, +‘short’, ‘batch’, ‘batch-h’, ‘long’, etc. +By default, None, which uses “test_queue”.

+
+
feature:
+

(str, optional) +Additional flags for SLURM job (e.g. “-p debug”). +By default, None, which does not specify any +additional flags.

+
+
conda_env:
+

(str, optional) +Name of conda environment to activate. By default, +None, which does not load any environments.

+
+
module:
+

(str, optional) +Module to load. By default, None, which does not +load any modules.

+
+
sh_script:
+

(str, optional) +Extra shell script to run before command call. +By default, None, which does not run any +scripts.

+
+
+

Only the option key is required for local execution. For execution on the HPC, the allocation and walltime keys are also required. All other options are populated with default values, as seen above.

+
+
log_directorystr

Path to directory where logs should be written. Path can be relative and does not have to exist on disk (it will be created if missing). By default, "./logs".

+
+
log_level{“DEBUG”, “INFO”, “WARNING”, “ERROR”}

String representation of desired logger verbosity. Suitable options are DEBUG (most verbose), INFO (moderately verbose), WARNING (only log warnings and errors), and ERROR (only log errors). By default, "INFO".

+
+
sc_pointsstr | pandas.DataFrame

Path to CSV or JSON or DataFrame containing supply curve point summary. Can also be a filepath to a reV bespoke HDF5 output file where the meta dataset has the same format as the supply curve aggregation output.

+
+

Note

+

If executing reV from the command line, this +input can also be "PIPELINE" to parse the output of +the previous pipeline step and use it as input to this +call. However, note that duplicate executions of any +preceding commands within the pipeline may invalidate this +parsing, meaning the sc_points input will have to be +specified manually.

+
+
+
trans_tablestr | pandas.DataFrame | list

Path to CSV or JSON or DataFrame containing supply curve transmission mapping. This can also be a list of transmission tables with different line voltage (capacity) ratings. See the reVX Least Cost Transmission Paths utility to generate these input tables.

+
+
sc_featuresstr | pandas.DataFrame, optional

Path to CSV or JSON or DataFrame containing additional supply curve features (e.g. transmission multipliers, regions, etc.). These features will be merged to the sc_points input table on ALL columns that both have in common. If None, no extra supply curve features are added. By default, None.

+
+
sc_capacity_colstr, optional

Name of capacity column in trans_sc_table. The values in this column determine the size of transmission lines built. The transmission capital costs per MW and the reinforcement costs per MW will be returned in terms of these capacity values. Note that if this column != “capacity”, then “capacity” must also be included in trans_sc_table since those values match the “mean_cf” data (which is used to calculate LCOT and Total LCOE). This input can be used to, e.g., size transmission lines based on solar AC capacity ( sc_capacity_col="capacity_ac"). By default, "capacity".

+
+
fixed_charge_ratefloat

Fixed charge rate, (in decimal form: 5% = 0.05). This value is used to compute LCOT.

+
+
simplebool, optional

Option to run the simple sort (does not keep track of capacity available on the existing transmission grid). If False, a full transmission sort (where connections are limited based on available transmission capacity) is run. Note that the full transmission sort requires the avail_cap_frac and line_limited inputs. By default, True.

+
+
avail_cap_fracint, optional

This input has no effect if simple=True. Fraction of transmissions features capacity ac_cap to make available for connection to supply curve points. By default, 1.

+
+
line_limitedbool, optional

This input has no effect if simple=True. Flag to have substation connection limited by maximum capacity of the attached lines. This is a legacy method. By default, False.

+
+
transmission_costsstr | dict, optional

Dictionary of transmission feature costs or path to JSON file containing a dictionary of transmission feature costs. These costs are used to compute transmission capital cost if the input transmission tables do not have a "trans_cap_cost" column (this input is ignored otherwise). The dictionary must include:

+
+
    +
  • line_tie_in_cost

  • +
  • line_cost

  • +
  • station_tie_in_cost

  • +
  • center_tie_in_cost

  • +
  • sink_tie_in_cost

  • +
+
+

By default, None.

+
+
consider_frictionbool, optional

Flag to add a new "total_lcoe_friction" column to the supply curve output that contains the sum of the computed "total_lcoe" value and the input "mean_lcoe_friction" values. If "mean_lcoe_friction" is not in the sc_points input, this option is ignored. By default, True.

+
+
sort_onstr, optional

Column label to sort the supply curve table on. This affects the build priority when doing a “full” sort - connections with the lowest value in this column will be built first. For a “simple” sort, only connections with the lowest value in this column will be considered. If None, the sort is performed on the total LCOE without any reinforcement costs added (this is typically what you want - it avoids unrealistically long spur-line connections). By default None.

+
+
columnslist | tuple, optional

Columns to preserve in output supply curve dataframe. By default, ('trans_gid', 'trans_type', 'trans_cap_cost_per_mw', 'dist_km', 'lcot', 'total_lcoe').

+
+
competitiondict, optional

Optional dictionary of arguments for competitive wind farm exclusions, which removes supply curve points upwind (and optionally downwind) of the lowest LCOE supply curves. If None, no competition is applied. Otherwise, this dictionary can have up to four keys:

+
+
    +
  • wind_dirs (required) : A path to a CSV file or +reVX ProminentWindDirections +output with the neighboring supply curve point gids +and power-rose values at each cardinal direction.

  • +
  • n_dirs (optional) : An integer representing the +number of prominent directions to use during wind farm +competition. By default, 2.

  • +
  • downwind (optional) : A flag indicating that +downwind neighbors should be removed in addition to +upwind neighbors during wind farm competition. +By default, False.

  • +
  • offshore_compete (optional) : A flag indicating +that offshore farms should be included during wind +farm competition. By default, False.

  • +
+
+

By default None.

+
+
+

Note that you may remove any keys with a null value if you do not intend to update them yourself.

+
+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV template-configs.html b/_cli/reV template-configs.html new file mode 100644 index 000000000..49f75b912 --- /dev/null +++ b/_cli/reV template-configs.html @@ -0,0 +1,653 @@ + + + + + + + reV template-configs — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV template-configs

+

Generate template config files for requested COMMANDS. If no COMMANDS are given, config files for the entire pipeline are generated.

+

The general structure for calling this CLI command is given below (add --help to print help info to the terminal).

+
reV template-configs [COMMANDS]...
+
+
+

Options

+
+
+-t, --type <type>
+

Configuration file type to generate. Allowed options (case-insensitive): json5 json toml yaml yml.

+
+
Default:
+

json

+
+
+
+ +

Arguments

+
+
+COMMANDS
+

Optional argument(s)

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_cli/reV.html b/_cli/reV.html new file mode 100644 index 000000000..e68e4c129 --- /dev/null +++ b/_cli/reV.html @@ -0,0 +1,718 @@ + + + + + + + reV — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV

+

reV Command Line Interface.

+

Typically, a good place to start is to set up a reV job with a pipeline +config that points to several reV modules that you want to run in serial.

+

To begin, you can generate some template configuration files using:

+
$ reV template-configs
+
+
+

By default, this generates template JSON configuration files, though you +can request JSON5, YAML, or TOML configuration files instead. You can run +$ reV template-configs --help on the command line to see all available +options for the template-configs command. Once the template configuration +files have been generated, you can fill them out by referring to the +module CLI documentation (if available) or the help pages of the module CLIs +for more details on the config options for each CLI command:

+
$ reV --help
+
+$ reV bespoke --help
+
+$ reV generation --help
+
+$ reV econ --help
+
+$ reV collect --help
+
+$ reV multi-year --help
+
+$ reV supply-curve-aggregation --help
+
+$ reV supply-curve --help
+
+$ reV rep-profiles --help
+
+$ reV hybrids --help
+
+$ reV nrwal --help
+
+$ reV qa-qc --help
+
+$ reV script --help
+
+
+

After appropriately filling our the configuration files for each module you +want to run, you can call the reV pipeline CLI using:

+
$ reV pipeline -c config_pipeline.json
+
+
+

This command will run each pipeline step in sequence.

+
+

Note

+

You will need to re-submit the pipeline command above after +each completed pipeline step.

+
+

To check the status of the pipeline, you can run:

+
$ reV status
+
+
+

This will print a report to the command line detailing the progress of the +current pipeline. See $ reV status --help for all status command +options.

+

If you need to parameterize the pipeline execution, you can use the batch +command. For details on setting up a batch config file, see the documentation +or run:

+
$ reV batch --help
+
+
+

on the command line. Once you set up a batch config file, you can execute +it using:

+
$ reV batch -c config_batch.json
+
+
+

For more information on getting started, see the +How to Run a Model Powered by GAPs guide.

+

The general structure of the reV CLI is given below.

+
reV [OPTIONS] COMMAND [ARGS]...
+
+
+

Options

+
+
+-v, --verbose
+

Flag to turn on debug logging. Default is not verbose.

+
+ +
+
+--version
+

Show the version and exit.

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_images/exponential_stretching.png b/_images/exponential_stretching.png new file mode 100644 index 000000000..2a28a6362 Binary files /dev/null and b/_images/exponential_stretching.png differ diff --git a/_images/horizontal_translation.png b/_images/horizontal_translation.png new file mode 100644 index 000000000..1d3fc20a6 Binary files /dev/null and b/_images/horizontal_translation.png differ diff --git a/_images/lcoe_fcr_atlantic_rm5.png b/_images/lcoe_fcr_atlantic_rm5.png new file mode 100644 index 000000000..bee18cc4b Binary files /dev/null and b/_images/lcoe_fcr_atlantic_rm5.png differ diff --git a/_images/lcoe_fcr_pacific_rm5.png b/_images/lcoe_fcr_pacific_rm5.png new file mode 100644 index 000000000..9841389a5 Binary files /dev/null and b/_images/lcoe_fcr_pacific_rm5.png differ diff --git a/_images/linear_stretching.png b/_images/linear_stretching.png new file mode 100644 index 000000000..0d05292de Binary files /dev/null and b/_images/linear_stretching.png differ diff --git a/_images/mean_cf.png b/_images/mean_cf.png new file mode 100644 index 000000000..1bc17fb53 Binary files /dev/null and b/_images/mean_cf.png differ diff --git a/_images/mean_depth.png b/_images/mean_depth.png new file mode 100644 index 000000000..69537dc3b Binary files /dev/null and b/_images/mean_depth.png differ diff --git a/_images/mean_lcoe.png b/_images/mean_lcoe.png new file mode 100644 index 000000000..d3a7e24f6 Binary files /dev/null and b/_images/mean_lcoe.png differ diff --git a/_images/mean_ws_mean-means.png b/_images/mean_ws_mean-means.png new file mode 100644 index 000000000..30d72e293 Binary files /dev/null and b/_images/mean_ws_mean-means.png differ diff --git a/_images/rev_flow_chart.png b/_images/rev_flow_chart.png new file mode 100644 index 000000000..d6ff65585 Binary files /dev/null and b/_images/rev_flow_chart.png differ diff --git a/_modules/index.html b/_modules/index.html new file mode 100644 index 000000000..5109aa27c --- /dev/null +++ b/_modules/index.html @@ -0,0 +1,672 @@ + + + + + + Overview: module code — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/SAM/SAM.html b/_modules/reV/SAM/SAM.html new file mode 100644 index 000000000..55c59675e --- /dev/null +++ b/_modules/reV/SAM/SAM.html @@ -0,0 +1,1482 @@ + + + + + + reV.SAM.SAM — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.SAM.SAM

+# -*- coding: utf-8 -*-
+"""reV-to-SAM interface module.
+
+Wraps the NREL-PySAM library with additional reV features.
+"""
+import copy
+import json
+import logging
+import numpy as np
+import os
+import pandas as pd
+from warnings import warn
+import PySAM.GenericSystem as generic
+
+from reV.utilities.exceptions import (SAMInputWarning, SAMInputError,
+                                      SAMExecutionError, ResourceError)
+
+from rex.multi_file_resource import (MultiFileResource, MultiFileNSRDB,
+                                     MultiFileWTK)
+from rex.renewable_resource import (WindResource, SolarResource, NSRDB,
+                                    WaveResource, GeothermalResource)
+from rex.multi_res_resource import MultiResolutionResource
+from rex.utilities.utilities import check_res_file
+
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SamResourceRetriever: + """Factory utility to get the SAM resource handler.""" + + # Mapping for reV technology and SAM module to h5 resource handler type + # SolarResource is swapped for NSRDB if the res_file contains "nsrdb" + RESOURCE_TYPES = {'geothermal': GeothermalResource, + 'pvwattsv5': SolarResource, + 'pvwattsv7': SolarResource, + 'pvwattsv8': SolarResource, + 'pvsamv1': SolarResource, + 'tcsmoltensalt': SolarResource, + 'solarwaterheat': SolarResource, + 'troughphysicalheat': SolarResource, + 'lineardirectsteam': SolarResource, + 'windpower': WindResource, + 'mhkwave': WaveResource + } + + @staticmethod + def _get_base_handler(res_file, module): + """Get the base SAM resource handler, raise error if module not found. + + Parameters + ---------- + res_file : str + Single resource file (with full path) to retrieve. + module : str + SAM module name or reV technology to force interpretation + of the resource file type. + Example: module set to 'pvwatts' or 'tcsmolten' means that this + expects a SolarResource file. If 'nsrdb' is in the res_file name, + the NSRDB handler will be used. + + Returns + ------- + res_handler : SolarResource | WindResource | NSRDB + Solar or Wind resource handler based on input. + """ + + try: + res_handler = SamResourceRetriever.RESOURCE_TYPES[module.lower()] + + except KeyError as e: + msg = ('Cannot interpret what kind of resource handler the SAM ' + 'module or reV technology "{}" requires. Expecting one of ' + 'the following SAM modules or reV technologies: {}' + .format(module, + list(SamResourceRetriever.RESOURCE_TYPES.keys()))) + logger.exception(msg) + raise SAMExecutionError(msg) from e + + if res_handler == SolarResource and 'nsrdb' in res_file.lower(): + # Use NSRDB handler if definitely an NSRDB file + res_handler = NSRDB + + return res_handler + + @staticmethod + def _parse_gid_map_sites(gen_gids, gid_map=None): + """Parse resource gids based on the generation gids used by + project_points and a gid_map. If gid_map is None, the input gen_gids + are just passed through as the res_gids. + + Parameters + ---------- + gen_gids : list + List of project_points "sites" that are the generation gids. + gid_map : None | dict + Mapping of unique integer generation gids (keys) to single integer + resource gids (values). This enables the user to input unique + generation gids in the project points that map to non-unique + resource gids. This can be None or a pre-extracted dict. + + Returns + ------- + res_gids : list + List of resource gids corresponding to the generation gids used by + project points. If gid_map is None, then this is the same as the + input gen_gids. + """ + if gid_map is None: + res_gids = gen_gids + else: + res_gids = [gid_map[i] for i in gen_gids] + return res_gids + + @classmethod + def _make_res_kwargs(cls, res_handler, project_points, output_request, + gid_map): + """ + Make Resource.preloadSam args and kwargs + + Parameters + ---------- + res_handler : Resource handler + Wind resource handler. + project_points : reV.config.ProjectPoints + reV Project Points instance used to retrieve resource data at a + specific set of sites. + output_request : list + Outputs to retrieve from SAM. + gid_map : None | dict + Mapping of unique integer generation gids (keys) to single integer + resource gids (values). This enables the user to input unique + generation gids in the project points that map to non-unique + resource gids. This can be None or a pre-extracted dict. + + Returns + ------- + kwargs : dict + Extra input args to preload sam resource. + args : tuple + Args for res_handler.preload_SAM class method + """ + sites = cls._parse_gid_map_sites(project_points.sites, gid_map=gid_map) + args = (sites,) + + kwargs = {} + if res_handler in (SolarResource, NSRDB): + # check for clearsky irradiation analysis for NSRDB + kwargs['clearsky'] = project_points.sam_config_obj.clearsky + kwargs['bifacial'] = project_points.sam_config_obj.bifacial + kwargs['tech'] = project_points.tech + + downscale = project_points.sam_config_obj.downscale + # check for downscaling request + if downscale is not None: + # make sure that downscaling is only requested for NSRDB + # resource + if res_handler != NSRDB: + msg = ('Downscaling was requested for a non-NSRDB ' + 'resource file. reV does not have this capability ' + 'at the current time. Please contact a developer ' + 'for more information on this feature.') + logger.warning(msg) + warn(msg, SAMInputWarning) + else: + # pass through the downscaling request + kwargs['downscale'] = downscale + + elif res_handler == WindResource: + args += (project_points.h, ) + kwargs['icing'] = project_points.sam_config_obj.icing + if project_points.curtailment is not None: + if project_points.curtailment.precipitation: + # make precip rate available for curtailment analysis + kwargs['precip_rate'] = True + + elif res_handler == GeothermalResource: + args += (project_points.d, ) + + # Check for resource means + if any(req.endswith('_mean') for req in output_request): + kwargs['means'] = True + + return kwargs, args + + @staticmethod + def _multi_file_mods(res_handler, kwargs, res_file): + """ + Check if res_file is a multi-file resource dir and update handler + + Parameters + ---------- + res_handler : Resource + Resource handler. + kwargs : dict + Key word arguments for resource init. + res_file : str + Single resource file (with full path) or multi h5 dir. + + Returns + ------- + res_handler : Resource | MultiFileResource + Resource handler, replaced by the multi file resource handler if + necessary. + kwargs : dict + Key word arguments for resource init with h5_dir, prefix, + and suffix. + res_file : str + Single resource file (with full path) or multi h5 dir. + """ + if res_handler == WindResource: + res_handler = MultiFileWTK + elif res_handler in (NSRDB, SolarResource): + res_handler = MultiFileNSRDB + else: + res_handler = MultiFileResource + + return res_handler, kwargs, res_file + +
[docs] @classmethod + def get(cls, res_file, project_points, module, + output_request=('cf_mean', ), gid_map=None, + lr_res_file=None, nn_map=None, bias_correct=None): + """Get the SAM resource iterator object (single year, single file). + + Parameters + ---------- + res_file : str + Single resource file (with full path) to retrieve. + project_points : reV.config.ProjectPoints + reV Project Points instance used to retrieve resource data at a + specific set of sites. + module : str + SAM module name or reV technology to force interpretation + of the resource file type. + Example: module set to 'pvwatts' or 'tcsmolten' means that this + expects a SolarResource file. If 'nsrdb' is in the res_file name, + the NSRDB handler will be used. + output_request : list | tuple, optional + Outputs to retrieve from SAM, by default ('cf_mean', ) + gid_map : None | dict + Mapping of unique integer generation gids (keys) to single integer + resource gids (values). This enables the user to input unique + generation gids in the project points that map to non-unique + resource gids. This can be None or a pre-extracted dict. + lr_res_file : str | None + Optional low resolution resource file that will be dynamically + mapped+interpolated to the nominal-resolution res_file. This + needs to be of the same format as resource_file, e.g. they both + need to be handled by the same rex Resource handler such as + WindResource + nn_map : np.ndarray + Optional 1D array of nearest neighbor mappings associated with the + res_file to lr_res_file spatial mapping. For details on this + argument, see the rex.MultiResolutionResource docstring. + bias_correct : None | pd.DataFrame + None if not provided or extracted DataFrame with wind or solar + resource bias correction table. This has columns: gid (can be index + name), adder, scalar. The gid field should match the true resource + gid regardless of the optional gid_map input. If both adder and + scalar are present, the wind or solar resource is corrected by + (res*scalar)+adder. If either adder or scalar is not present, + scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are + corrected depending on the technology. GHI and DNI are corrected + with the same correction factors. + + + Returns + ------- + res : reV.resource.SAMResource + Resource iterator object to pass to SAM. + """ + + res_handler = cls._get_base_handler(res_file, module) + kwargs, args = cls._make_res_kwargs(res_handler, project_points, + output_request, gid_map) + + multi_h5_res, hsds = check_res_file(res_file) + if multi_h5_res: + res_handler, kwargs, res_file = cls._multi_file_mods(res_handler, + kwargs, + res_file) + else: + kwargs['hsds'] = hsds + + kwargs['time_index_step'] = \ + project_points.sam_config_obj.time_index_step + + if lr_res_file is None: + res = res_handler.preload_SAM(res_file, *args, **kwargs) + else: + kwargs['handler_class'] = res_handler + kwargs['nn_map'] = nn_map + res = MultiResolutionResource.preload_SAM(res_file, lr_res_file, + *args, **kwargs) + + if bias_correct is not None: + res.bias_correct(bias_correct) + + return res
+ + +
[docs]class Sam: + """reV wrapper on the PySAM framework.""" + + # PySAM object wrapped by this class + PYSAM = generic + + # callable attributes to be ignored in the get/set logic + IGNORE_ATTRS = ['assign', 'execute', 'export'] + + def __init__(self): + self._pysam = self.PYSAM.new() + self._attr_dict = None + self._inputs = [] + self.sam_sys_inputs = {} + if 'constant' in self.input_list: + self['constant'] = 0.0 + + def __getitem__(self, key): + """Get the value of a PySAM attribute (either input or output). + + Parameters + ---------- + key : str + Lowest level attribute name. + + Returns + ------- + out : object + PySAM data. + """ + + group = self._get_group(key) + try: + out = getattr(getattr(self.pysam, group), key) + except Exception: + out = None + + return out + + def __setitem__(self, key, value): + """Set a PySAM input data attribute. + + Parameters + ---------- + key : str + Lowest level attribute name. + value : object + Data to set to the key. + """ + + if key not in self.input_list: + msg = ('Could not set input key "{}". Attribute not ' + 'found in PySAM object: "{}"' + .format(key, self.pysam)) + logger.exception(msg) + raise SAMInputError(msg) + else: + self.sam_sys_inputs[key] = value + group = self._get_group(key, outputs=False) + try: + setattr(getattr(self.pysam, group), key, value) + except Exception as e: + msg = ('Could not set input key "{}" to ' + 'group "{}" in "{}".\n' + 'Data is: {} ({})\n' + 'Received the following error: "{}"' + .format(key, group, self.pysam, value, type(value), e)) + logger.exception(msg) + raise SAMInputError(msg) from e + + @property + def pysam(self): + """Get the pysam object.""" + return self._pysam + +
[docs] @classmethod + def default(cls): + """Get the executed default pysam object. + + Returns + ------- + PySAM.GenericSystem + """ + obj = cls.PYSAM.default('GenericSystemNone') + obj.execute() + + return obj
+ + @property + def attr_dict(self): + """Get the heirarchical PySAM object attribute dictionary. + + Returns + ------- + _attr_dict : dict + Dictionary with: + keys: variable groups + values: lowest level attribute/variable names + """ + if self._attr_dict is None: + keys = self._get_pysam_attrs(self.pysam) + self._attr_dict = {k: self._get_pysam_attrs(getattr(self.pysam, k)) + for k in keys} + + return self._attr_dict + + @property + def input_list(self): + """Get the list of lowest level input attribute/variable names. + + Returns + ------- + _inputs : list + List of lowest level input attributes. + """ + if not any(self._inputs): + for k, v in self.attr_dict.items(): + if k.lower() != 'outputs': + self._inputs += v + + return self._inputs + + def _get_group(self, key, outputs=True): + """Get the group that the input key belongs to. + + Parameters + ---------- + key : str + Lowest level PySAM attribute/variable name. + outputs : bool + Flag if this key might be in outputs group. False ignores the + outputs group (looks for inputs only). + + Returns + ------- + group : str | None + PySAM attribute group that key belongs to. None if not found. + """ + group = None + + temp = self.attr_dict + if not outputs: + temp = {k: v for (k, v) in temp.items() + if k.lower() != 'outputs'} + + for k, v in temp.items(): + if key in v: + group = k + break + + return group + + def _get_pysam_attrs(self, obj): + """Get a list of attributes from obj with ignore logic. + + Parameters + ---------- + obj : PySAM object + PySAM object to get attribute list from. + + Returns + ------- + attrs : list + List of attrs belonging to obj with dunder attrs and IGNORE_ATTRS + not included. + """ + attrs = [a for a in dir(obj) if not a.startswith('__') + and a not in self.IGNORE_ATTRS] + return attrs + +
[docs] def execute(self): + """Call the PySAM execute method. Raise SAMExecutionError if error.""" + try: + self.pysam.execute() + except Exception as e: + msg = 'PySAM raised an error while executing: "{}"'.format(e) + logger.exception(msg) + raise SAMExecutionError(msg) from e
+ + @staticmethod + def _filter_inputs(key, value): + """Perform any necessary filtering of input keys and values for PySAM. + + Parameters + ---------- + key : str + SAM input key. + value : str | int | float | list | np.ndarray + Input value associated with key. + + Returns + ------- + key : str + Filtered SAM input key. + value : str | int | float | list | np.ndarray + Filtered Input value associated with key. + """ + + if '.' in key: + key = key.replace('.', '_') + + if ':constant' in key and 'adjust:' in key: + key = key.replace('adjust:', '') + + if isinstance(value, str) and '[' in value and ']' in value: + try: + value = json.loads(value) + except json.JSONDecodeError: + msg = ('Found a weird SAM config input for "{}" that looks ' + 'like a stringified-list but could not run through ' + 'json.loads() so skipping: {}'.format(key, value)) + logger.warning(msg) + warn(msg) + + return key, value + +
[docs] def assign_inputs(self, inputs, raise_warning=False): + """Assign a flat dictionary of inputs to the PySAM object. + + Parameters + ---------- + inputs : dict + Flat (single-level) dictionary of PySAM inputs. + raise_warning : bool + Flag to raise a warning for inputs that are not set because they + are not found in the PySAM object. + """ + + for k, v in inputs.items(): + k, v = self._filter_inputs(k, v) + if k in self.input_list and v is not None: + self[k] = v + elif raise_warning: + wmsg = ('Not setting input "{}" to: {}.' + .format(k, v)) + warn(wmsg, SAMInputWarning) + logger.warning(wmsg)
+ + +
[docs]class RevPySam(Sam): + """Base class for reV-SAM simulations (generation and econ).""" + + DIR = os.path.dirname(os.path.realpath(__file__)) + MODULE = None + + def __init__(self, meta, sam_sys_inputs, output_request, + site_sys_inputs=None): + """Initialize a SAM object. + + Parameters + ---------- + meta : pd.DataFrame | pd.Series | None + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. Can be None for econ runs. + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + output_request : list + Requested SAM outputs (e.g., 'cf_mean', 'annual_energy', + 'cf_profile', 'gen_profile', 'energy_yield', 'ppa_price', + 'lcoe_fcr'). + site_sys_inputs : dict + Optional set of site-specific SAM system inputs to complement the + site-agnostic inputs. + """ + + super().__init__() + self._site = None + self.time_interval = 1 + self.outputs = {} + self.sam_sys_inputs = sam_sys_inputs + self.site_sys_inputs = site_sys_inputs + self.output_request = output_request + if self.output_request is None: + self.output_request = [] + + self._meta = self._parse_meta(meta) + self._parse_site_sys_inputs(site_sys_inputs) + + @property + def meta(self): + """Get meta data property.""" + return self._meta + + @property + def module(self): + """Get module property.""" + return self.MODULE + + @property + def site(self): + """Get the site number for this SAM simulation.""" + return self._site + +
[docs] @staticmethod + def get_sam_res(*args, **kwargs): + """Get the SAM resource iterator object (single year, single file).""" + return SamResourceRetriever.get(*args, **kwargs)
+ +
[docs] @staticmethod + def drop_leap(resource): + """Drop Feb 29th from resource df with time index. + + Parameters + ---------- + resource : pd.DataFrame + Resource dataframe with an index containing a pandas + time index object with month and day attributes. + + Returns + ------- + resource : pd.DataFrame + Resource dataframe with all February 29th timesteps removed. + """ + + if hasattr(resource, 'index'): + if (hasattr(resource.index, 'month') + and hasattr(resource.index, 'day')): + leap_day = ((resource.index.month == 2) + & (resource.index.day == 29)) + resource = resource.drop(resource.index[leap_day]) + + return resource
+ +
[docs] @staticmethod + def ensure_res_len(arr, time_index): + """ + Ensure time_index has a constant time-step and only covers 365 days + (no leap days). If not remove last day + + Parameters + ---------- + arr : ndarray + Array to truncate if time_index has a leap day + time_index : pandas.DatatimeIndex + Time index associated with arr, used to check time-series + frequency and number of days + + Returns + ------- + arr : ndarray + Truncated array of data such that there are 365 days + """ + msg = ('A valid time_index must be supplied to ensure the proper ' + 'resource length! Instead {} was supplied' + .format(type(time_index))) + assert isinstance(time_index, pd.DatetimeIndex) + + msg = ('arr length {} does not match time_index length {}!' + .format(len(arr), len(time_index))) + assert len(arr) == len(time_index) + + if time_index.is_leap_year.all(): + mask = time_index.month == 2 + mask &= time_index.day == 29 + if not mask.any(): + mask = time_index.month == 2 + mask &= time_index.day == 28 + s = np.where(mask)[0][-1] + + freq = pd.infer_freq(time_index[:s]) + msg = 'frequencies do not match before and after 2/29' + assert freq == pd.infer_freq(time_index[s + 1:]), msg + else: + freq = pd.infer_freq(time_index) + else: + freq = pd.infer_freq(time_index) + + if freq is None: + msg = ('Resource time_index does not have a consistent time-step ' + '(frequency)!') + logger.error(msg) + raise ResourceError(msg) + + doy = time_index.dayofyear + n_doy = len(doy.unique()) + + if n_doy > 365: + # Drop last day of year + doy_max = doy.max() + mask = doy != doy_max + arr = arr[mask] + + return arr
+ +
[docs] @staticmethod + def make_datetime(series): + """Ensure that pd series is a datetime series with dt accessor""" + if not hasattr(series, 'dt'): + series = pd.to_datetime(pd.Series(series)) + + return series
+ +
[docs] @classmethod + def get_time_interval(cls, time_index): + """Get the time interval. + + Parameters + ---------- + time_index : pd.series + Datetime series. Must have a dt attribute to access datetime + properties (added using make_datetime method). + + Returns + ------- + time_interval : int: + This value is the number of indices over which an hour is counted. + So if the timestep is 0.5 hours, time_interval is 2. + """ + + time_index = cls.make_datetime(time_index) + x = time_index.dt.hour.diff() + time_interval = 0 + + # iterate through the hourly time diffs and count indices between flips + for t in x[1:]: + if t == 1.0: + time_interval += 1 + break + elif t == 0.0: + time_interval += 1 + + return int(time_interval)
+ + @staticmethod + def _parse_meta(meta): + """Make sure the meta data corresponds to a single location and convert + to pd.Series. + + Parameters + ---------- + meta : pd.DataFrame | pd.Series | None + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. Can be None for econ runs. + + Parameters + ---------- + meta : pd.Series | None + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. Can be None for econ runs. + """ + + if isinstance(meta, pd.DataFrame): + msg = ('Meta data must only be for a single site but received: {}' + .format(meta)) + assert len(meta) == 1, msg + meta = meta.iloc[0] + + if meta is not None: + assert isinstance(meta, pd.Series) + + return meta + + def _parse_site_sys_inputs(self, site_sys_inputs): + """Parse site-specific parameters and add to parameter dict. + + Parameters + ---------- + site_sys_inputs : dict + Optional set of site-specific SAM system inputs to complement the + site-agnostic inputs. + """ + + if site_sys_inputs is not None: + for k, v in site_sys_inputs.items(): + if isinstance(v, float) and np.isnan(v): + pass + else: + self.sam_sys_inputs[k] = v + + @staticmethod + def _is_arr_like(val): + """Returns true if SAM data is array-like. False if scalar.""" + if isinstance(val, (int, float, str)): + return False + else: + try: + len(val) + except TypeError: + return False + else: + return True + + @classmethod + def _is_hourly(cls, val): + """Returns true if SAM data is hourly or sub-hourly. False otherise.""" + if not cls._is_arr_like(val): + return False + else: + L = len(val) + return L >= 8760 + +
[docs] def outputs_to_utc_arr(self): + """Convert array-like SAM outputs to UTC np.ndarrays""" + if self.outputs is not None: + for key, output in self.outputs.items(): + if self._is_arr_like(output): + output = np.asarray(output) + + if output.dtype == np.float64: + output = output.astype(np.float32) + elif output.dtype == np.int64: + output = output.astype(np.int32) + + if self._is_hourly(output): + n_roll = int(-1 * self.meta['timezone'] + * self.time_interval) + output = np.roll(output, n_roll) + + self.outputs[key] = output
+ +
[docs] def collect_outputs(self, output_lookup): + """Collect SAM output_request, convert timeseries outputs to UTC, and + save outputs to self.outputs property. + + Parameters + ---------- + output_lookup : dict + Lookup dictionary mapping output keys to special output methods. + """ + bad_requests = [] + for req in self.output_request: + if req in output_lookup: + self.outputs[req] = output_lookup[req]() + elif req in self.sam_sys_inputs: + self.outputs[req] = self.sam_sys_inputs[req] + else: + try: + self.outputs[req] = getattr(self.pysam.Outputs, req) + except AttributeError: + bad_requests.append(req) + + if any(bad_requests): + msg = ('Could not retrieve outputs "{}" from PySAM object "{}".' + .format(bad_requests, self.pysam)) + logger.error(msg) + raise SAMExecutionError(msg) + + self.outputs_to_utc_arr()
+ +
[docs] def assign_inputs(self): + """Assign the self.sam_sys_inputs attribute to the PySAM object.""" + super().assign_inputs(copy.deepcopy(self.sam_sys_inputs))
+ +
[docs] def execute(self): + """Call the PySAM execute method. Raise SAMExecutionError if error. + Include the site index if available. + """ + try: + self.pysam.execute() + except Exception as e: + msg = ('PySAM raised an error while executing: "{}"' + .format(self.module)) + if self.site is not None: + msg += ' for site {}'.format(self.site) + logger.exception(msg) + raise SAMExecutionError(msg) from e
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/SAM/defaults.html b/_modules/reV/SAM/defaults.html new file mode 100644 index 000000000..3d6984db7 --- /dev/null +++ b/_modules/reV/SAM/defaults.html @@ -0,0 +1,869 @@ + + + + + + reV.SAM.defaults — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.SAM.defaults

+# -*- coding: utf-8 -*-
+"""PySAM default implementations."""
+from abc import abstractmethod
+import json
+import os
+import pandas as pd
+import PySAM.Geothermal as PySamGeothermal
+import PySAM.Pvwattsv5 as PySamPV5
+import PySAM.Pvwattsv8 as PySamPV8
+import PySAM.Pvsamv1 as PySamDetailedPV
+import PySAM.Windpower as PySamWindPower
+import PySAM.TcsmoltenSalt as PySamCSP
+import PySAM.Swh as PySamSWH
+import PySAM.TroughPhysicalProcessHeat as PySamTPPH
+import PySAM.LinearFresnelDsgIph as PySamLDS
+import PySAM.Lcoefcr as PySamLCOE
+import PySAM.Singleowner as PySamSingleOwner
+import PySAM.MhkWave as PySamMhkWave
+
+
+DEFAULTSDIR = os.path.dirname(os.path.realpath(__file__))
+DEFAULTSDIR = os.path.join(DEFAULTSDIR, 'defaults')
+
+
+
[docs]class AbstractDefaultFromConfigFile: + """Class for default PySAM object from a config file.""" + + @property + @abstractmethod + def CONFIG_FILE_NAME(self): + """Name of JSON config file containing default PySAM inputs.""" + raise NotImplementedError + + @property + @abstractmethod + def PYSAM_MODULE(self): + """PySAM module to initialize (e.g. Pvwattsv5, Geothermal, etc.). """ + raise NotImplementedError + +
[docs] @classmethod + def init_default_pysam_obj(cls): + """Initialize a defualt PySM object from a config file.""" + config_file = os.path.join(DEFAULTSDIR, cls.CONFIG_FILE_NAME) + + # pylint: disable=no-member + obj = cls.PYSAM_MODULE.new() + with open(config_file, 'r') as f: + config = json.load(f) + + for k, v in config.items(): + if 'adjust:' in k or'file' in k : + continue + if 'geotherm.cost' in k: + k = k.replace(".", "_") + obj.value(k, v) + + obj.AdjustmentFactors.constant = 0.0 + return obj
+ + +
[docs]class DefaultGeothermal(AbstractDefaultFromConfigFile): + """Class for default Geothermal""" + CONFIG_FILE_NAME = 'geothermal.json' + PYSAM_MODULE = PySamGeothermal + +
[docs] @staticmethod + def default(): + """Get the default PySAM Geothermal object""" + obj = DefaultGeothermal.init_default_pysam_obj() + res_file = os.path.join(DEFAULTSDIR, + 'USA AZ Phoenix Sky Harbor Intl Ap (TMY3).csv') + obj.GeoHourly.file_name = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultPvWattsv5(AbstractDefaultFromConfigFile): + """Class for default PVWattsv5""" + CONFIG_FILE_NAME = 'i_pvwattsv5.json' + PYSAM_MODULE = PySamPV5 + +
[docs] @staticmethod + def default(): + """Get the default PySAM pvwattsv5 object""" + obj = DefaultPvWattsv5.init_default_pysam_obj() + res_file = os.path.join(DEFAULTSDIR, + 'USA AZ Phoenix Sky Harbor Intl Ap (TMY3).csv') + obj.SolarResource.solar_resource_file = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultPvWattsv8: + """class for default PVWattsv8""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM pvwattsv8 object""" + res_file = os.path.join(DEFAULTSDIR, + 'USA AZ Phoenix Sky Harbor Intl Ap (TMY3).csv') + obj = PySamPV8.default('PVWattsNone') + obj.SolarResource.solar_resource_file = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultPvSamv1: + """class for default detailed PV""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM Pvsamv1 object""" + res_file = os.path.join(DEFAULTSDIR, + 'USA AZ Phoenix Sky Harbor Intl Ap (TMY3).csv') + obj = PySamDetailedPV.default('FlatPlatePVNone') + obj.SolarResource.solar_resource_file = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultTcsMoltenSalt: + """Class for default CSP""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM object""" + res_file = os.path.join(DEFAULTSDIR, + 'USA AZ Phoenix Sky Harbor Intl Ap (TMY3).csv') + obj = PySamCSP.default('MSPTSingleOwner') + obj.SolarResource.solar_resource_file = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultWindPower: + """Class for default windpower""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM object""" + res_file = os.path.join(DEFAULTSDIR, 'WY Southern-Flat Lands.srw') + obj = PySamWindPower.default('WindPowerNone') + obj.Resource.wind_resource_filename = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultSwh: + """Class for default solar water heating""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM object""" + res_file = os.path.join(DEFAULTSDIR, + 'USA AZ Phoenix Sky Harbor Intl Ap (TMY3).csv') + obj = PySamSWH.default('SolarWaterHeatingNone') + obj.Weather.solar_resource_file = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultTroughPhysicalProcessHeat: + """Class for default parabolic trough process heat""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM object""" + res_file = os.path.join(DEFAULTSDIR, + 'USA AZ Phoenix Sky Harbor Intl Ap (TMY3).csv') + obj = PySamTPPH.default('PhysicalTroughIPHNone') + obj.Weather.file_name = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultLinearFresnelDsgIph: + """Class for default linear direct steam heat""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM object""" + res_file = os.path.join(DEFAULTSDIR, 'USA CA Daggett (TMY2).csv') + obj = PySamLDS.default('DSGLIPHNone') + obj.Weather.file_name = res_file + obj.execute() + + return obj
+ + +
[docs]class DefaultMhkWave: + """Class for default mhkwave""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM object""" + data_dict = {} + data_dict['lat'] = 40.8418 + data_dict['lon'] = 124.2477 + data_dict['tz'] = -7 + res_file = os.path.join(DEFAULTSDIR, 'US_Wave.csv') + df = pd.read_csv(res_file) + for col in df.columns: + data_dict[col] = df[col].values.flatten().tolist() + + obj = PySamMhkWave.default('MEwaveLCOECalculator') + obj.MHKWave.wave_resource_model_choice = 1 + obj.unassign('significant_wave_height') + obj.unassign('energy_period') + obj.MHKWave.wave_resource_data = data_dict + obj.execute() + + return obj
+ + +
[docs]class DefaultLCOE: + """Class for default LCOE calculator""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM object""" + pv = DefaultPvWattsv5.default() + obj = PySamLCOE.default('PVWattsLCOECalculator') + obj.SimpleLCOE.annual_energy = pv.Outputs.annual_energy + obj.execute() + + return obj
+ + +
[docs]class DefaultSingleOwner: + """class for default Single Owner (PPA) calculator""" + +
[docs] @staticmethod + def default(): + """Get the default PySAM object""" + pv = DefaultPvWattsv5.default() + obj = PySamSingleOwner.default('PVWattsSingleOwner') + obj.SystemOutput.gen = pv.Outputs.ac + obj.execute() + + return obj
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/SAM/econ.html b/_modules/reV/SAM/econ.html new file mode 100644 index 000000000..05457a4cc --- /dev/null +++ b/_modules/reV/SAM/econ.html @@ -0,0 +1,1208 @@ + + + + + + reV.SAM.econ — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.SAM.econ

+# -*- coding: utf-8 -*-
+"""reV-to-SAM econ interface module.
+
+Wraps the NREL-PySAM lcoefcr and singleowner modules with
+additional reV features.
+"""
+from copy import deepcopy
+import logging
+import numpy as np
+from warnings import warn
+import PySAM.Lcoefcr as PySamLCOE
+import PySAM.Singleowner as PySamSingleOwner
+
+from reV.SAM.defaults import DefaultSingleOwner, DefaultLCOE
+from reV.handlers.outputs import Outputs
+from reV.SAM.windbos import WindBos
+from reV.SAM.SAM import RevPySam
+from reV.utilities.exceptions import SAMExecutionError
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Economic(RevPySam): + """Base class for SAM economic models.""" + MODULE = None + + def __init__(self, sam_sys_inputs, site_sys_inputs=None, + output_request='lcoe_fcr'): + """Initialize a SAM economic model object. + + Parameters + ---------- + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + site_sys_inputs : dict + Optional set of site-specific SAM system inputs to complement the + site-agnostic inputs. + output_request : list | tuple | str + Requested SAM output(s) (e.g., 'ppa_price', 'lcoe_fcr'). + """ + + self._site = None + + if isinstance(output_request, (list, tuple)): + self.output_request = output_request + else: + self.output_request = (output_request,) + + super().__init__(meta=None, sam_sys_inputs=sam_sys_inputs, + site_sys_inputs=site_sys_inputs, + output_request=output_request) + + @staticmethod + def _parse_sys_cap(site, inputs, site_df): + """Find the system capacity variable in either inputs or df. + + Parameters + ---------- + site : int + Site gid. + inputs : dict + Generic system inputs (not site-specific). + site_df : pd.DataFrame + Site-specific inputs table with index = site gid's + + Returns + ------- + sys_cap : int | float + System nameplate capacity in native units (SAM is kW). + """ + + if ('system_capacity' not in inputs + and 'turbine_capacity' not in inputs + and 'system_capacity' not in site_df + and 'turbine_capacity' not in site_df): + raise SAMExecutionError('Input parameter "system_capacity" ' + 'or "turbine_capacity" ' + 'must be included in the SAM config ' + 'inputs or site-specific inputs in ' + 'order to calculate annual energy ' + 'yield for LCOE. Received the following ' + 'inputs, site_df:\n{}\n{}' + .format(inputs, site_df.head())) + + if 'system_capacity' in inputs: + sys_cap = inputs['system_capacity'] + elif 'turbine_capacity' in inputs: + sys_cap = inputs['turbine_capacity'] + elif 'system_capacity' in site_df: + sys_cap = site_df.loc[site, 'system_capacity'] + elif 'turbine_capacity' in site_df: + sys_cap = site_df.loc[site, 'turbine_capacity'] + + return sys_cap + + @classmethod + def _get_annual_energy(cls, site, site_df, site_gids, cf_arr, inputs, + calc_aey): + """Get the single-site cf and annual energy and add to site_df. + + Parameters + ---------- + site : int + Site gid. + site_df : pd.DataFrame + Dataframe of site-specific input variables. Row index corresponds + to site number/gid (via df.loc not df.iloc), column labels are the + variable keys that will be passed forward as SAM parameters. + site_gids : list + List of all site gid values from the cf_file. + cf_arr : np.ndarray + Array of cf_mean values for all sites in the cf_file for the + given year. + inputs : dict + Dictionary of SAM input parameters. + calc_aey : bool + Flag to add annual_energy to df. + + Returns + ------- + site_df : pd.DataFrame + Same as input but with added labels "capacity_factor" and + "annual_energy" (latter is dependent on calc_aey flag). + """ + + # get the index location of the site in question + isite = site_gids.index(site) + + # calculate the capacity factor + cf = cf_arr[isite] + if cf > 1: + warn('Capacity factor > 1. Dividing by 100.') + cf /= 100 + site_df.loc[site, 'capacity_factor'] = cf + + # calculate the annual energy yield if not input; + if calc_aey: + # get the system capacity + sys_cap = cls._parse_sys_cap(site, inputs, site_df) + + # Calc annual energy, mult by 8760 to convert kW to kWh + aey = sys_cap * cf * 8760 + + # add aey to site-specific inputs + site_df.loc[site, 'annual_energy'] = aey + + return site_df + + @staticmethod + def _get_cf_profiles(sites, cf_file, year): + """Get the multi-site capacity factor time series profiles. + + Parameters + ---------- + sites : list + List of all site GID's to get gen profiles for. + cf_file : str + reV generation capacity factor output file with path. + year : int | str | None + reV generation year to calculate econ for. Looks for cf_mean_{year} + or cf_profile_{year}. None will default to a non-year-specific cf + dataset (cf_mean, cf_profile). + + Returns + ------- + profiles : np.ndarray + 2D array (time, n_sites) of all capacity factor profiles for all + the requested sites. + """ + + # Retrieve the generation profile for single owner input + with Outputs(cf_file) as cfh: + + # get the index location of the site in question + site_gids = list(cfh.get_meta_arr('gid')) + isites = [site_gids.index(s) for s in sites] + + # look for the cf_profile dataset + if 'cf_profile' in cfh.datasets: + dset = 'cf_profile' + elif 'cf_profile-{}'.format(year) in cfh.datasets: + dset = 'cf_profile-{}'.format(year) + elif 'cf_profile_{}'.format(year) in cfh.datasets: + dset = 'cf_profile_{}'.format(year) + else: + msg = ('Could not find cf_profile values for ' + 'input to SingleOwner. Available datasets: {}' + .format(cfh.datasets)) + logger.error(msg) + raise KeyError(msg) + + profiles = cfh[dset, :, isites] + + return profiles + + @classmethod + def _make_gen_profile(cls, isite, site, profiles, site_df, inputs): + """Get the single-site generation time series and add to inputs dict. + + Parameters + ---------- + isite : int + Site index in the profiles array. + site : int + Site resource GID. + profiles : np.ndarray + 2D array (time, n_sites) of all capacity factor profiles for all + the requested sites. + site_df : pd.DataFrame + Dataframe of site-specific input variables. Row index corresponds + to site number/gid (via df.loc not df.iloc), column labels are the + variable keys that will be passed forward as SAM parameters. + inputs : dict + Dictionary of SAM input parameters. + + Returns + ------- + inputs : dict + Dictionary of SAM input parameters with the generation profile + added. + """ + + sys_cap = cls._parse_sys_cap(site, inputs, site_df) + inputs['gen'] = profiles[:, isite] * sys_cap + + return inputs + +
[docs] def ppa_price(self): + """Get PPA price ($/MWh). + + Native units are cents/kWh, mult by 10 for $/MWh. + """ + return self['ppa'] * 10
+ +
[docs] def npv(self): + """Get net present value (NPV) ($). + + Native units are dollars. + """ + return self['project_return_aftertax_npv']
+ +
[docs] def lcoe_fcr(self): + """Get LCOE ($/MWh). + + Native units are $/kWh, mult by 1000 for $/MWh. + """ + if 'lcoe_fcr' in self.outputs: + lcoe = self.outputs['lcoe_fcr'] + else: + lcoe = self['lcoe_fcr'] * 1000 + return lcoe
+ +
[docs] def lcoe_nom(self): + """Get nominal LCOE ($/MWh) (from PPA/SingleOwner model). + + Native units are cents/kWh, mult by 10 for $/MWh. + """ + return self['lcoe_nom'] * 10
+ +
[docs] def lcoe_real(self): + """Get real LCOE ($/MWh) (from PPA/SingleOwner model). + + Native units are cents/kWh, mult by 10 for $/MWh. + """ + return self['lcoe_real'] * 10
+ +
[docs] def flip_actual_irr(self): + """Get actual IRR (from PPA/SingleOwner model). + + Native units are %. + """ + return self['flip_actual_irr']
+ +
[docs] def gross_revenue(self): + """Get cash flow total revenue (from PPA/SingleOwner model). + + Native units are $. + """ + cf_tr = np.array(self['cf_total_revenue'], dtype=np.float32) + cf_tr = np.sum(cf_tr, axis=0) + return cf_tr
+ +
[docs] def collect_outputs(self): + """Collect SAM output_request, convert timeseries outputs to UTC, and + save outputs to self.outputs property. + """ + + output_lookup = {'ppa_price': self.ppa_price, + 'project_return_aftertax_npv': self.npv, + 'lcoe_fcr': self.lcoe_fcr, + 'lcoe_nom': self.lcoe_nom, + 'lcoe_real': self.lcoe_real, + 'flip_actual_irr': self.flip_actual_irr, + 'gross_revenue': self.gross_revenue, + } + + super().collect_outputs(output_lookup)
+ +
[docs] @classmethod + def reV_run(cls, site, site_df, inputs, output_request): + """Run the SAM econ model for a single site. + + Parameters + ---------- + site : int + Site gid. + site_df : pd.DataFrame + Dataframe of site-specific input variables. Row index corresponds + to site number/gid (via df.loc not df.iloc), column labels are the + variable keys that will be passed forward as SAM parameters. + inputs : dict + Dictionary of SAM system input parameters. + output_request : list | tuple | str + Requested SAM output(s) (e.g., 'ppa_price', 'lcoe_fcr'). + + Returns + ------- + sim.outputs : dict + Dictionary keyed by SAM variable names with SAM numerical results. + """ + + # Create SAM econ instance and calculate requested output. + sim = cls(sam_sys_inputs=inputs, + site_sys_inputs=dict(site_df.loc[site, :]), + output_request=output_request) + sim._site = site + + sim.assign_inputs() + sim.execute() + sim.collect_outputs() + + return sim.outputs
+ + +
[docs]class LCOE(Economic): + """SAM LCOE model. + """ + MODULE = 'lcoefcr' + PYSAM = PySamLCOE + + def __init__(self, sam_sys_inputs, site_sys_inputs=None, + output_request=('lcoe_fcr',)): + """Initialize a SAM LCOE economic model object.""" + super().__init__(sam_sys_inputs, site_sys_inputs=site_sys_inputs, + output_request=output_request) + + @staticmethod + def _parse_lcoe_inputs(site_df, cf_file, year): + """Parse for non-site-specific LCOE inputs. + + Parameters + ---------- + site_df : pd.DataFrame + Dataframe of site-specific input variables. Row index corresponds + to site number/gid (via df.loc not df.iloc), column labels are the + variable keys that will be passed forward as SAM parameters. + cf_file : str + reV generation capacity factor output file with path. + year : int | str | None + reV generation year to calculate econ for. Looks for cf_mean_{year} + or cf_profile_{year}. None will default to a non-year-specific cf + dataset (cf_mean, cf_profile). + + Returns + ------- + site_gids : list + List of all site gid values from the cf_file. + calc_aey : bool + Flag to require calculation of the annual energy yield before + running LCOE. + cf_arr : np.ndarray + Array of cf_mean values for all sites in the cf_file for the + given year. + """ + + # get the cf_file meta data gid's to use as indexing tools + with Outputs(cf_file) as cfh: + site_gids = list(cfh.meta['gid']) + + calc_aey = False + if 'annual_energy' not in site_df: + # annual energy yield has not been input, flag to calculate + site_df.loc[:, 'annual_energy'] = np.nan + calc_aey = True + + # make sure capacity factor is present in site-specific data + if 'capacity_factor' not in site_df: + site_df.loc[:, 'capacity_factor'] = np.nan + + # pull all cf mean values for LCOE calc + with Outputs(cf_file) as cfh: + if 'cf_mean' in cfh.datasets: + cf_arr = cfh['cf_mean'] + elif 'cf_mean-{}'.format(year) in cfh.datasets: + cf_arr = cfh['cf_mean-{}'.format(year)] + elif 'cf_mean_{}'.format(year) in cfh.datasets: + cf_arr = cfh['cf_mean_{}'.format(year)] + elif 'cf' in cfh.datasets: + cf_arr = cfh['cf'] + else: + raise KeyError('Could not find cf_mean values for LCOE. ' + 'Available datasets: {}'.format(cfh.datasets)) + return site_gids, calc_aey, cf_arr + +
[docs] @staticmethod + def default(): + """Get the executed default pysam LCOE FCR object. + + Returns + ------- + PySAM.Lcoefcr + """ + return DefaultLCOE.default()
+ +
[docs] @classmethod + def reV_run(cls, points_control, site_df, cf_file, year, + output_request=('lcoe_fcr',)): + """Execute SAM LCOE simulations based on a reV points control instance. + + Parameters + ---------- + points_control : config.PointsControl + PointsControl instance containing project points site and SAM + config info. + site_df : pd.DataFrame + Dataframe of site-specific input variables. Row index corresponds + to site number/gid (via df.loc not df.iloc), column labels are the + variable keys that will be passed forward as SAM parameters. + cf_file : str + reV generation capacity factor output file with path. + year : int | str | None + reV generation year to calculate econ for. Looks for cf_mean_{year} + or cf_profile_{year}. None will default to a non-year-specific cf + dataset (cf_mean, cf_profile). + output_request : list | tuple | str + Output(s) to retrieve from SAM. + + Returns + ------- + out : dict + Nested dictionaries where the top level key is the site index, + the second level key is the variable name, second level value is + the output variable value. + """ + + out = {} + + site_gids, calc_aey, cf_arr = cls._parse_lcoe_inputs(site_df, cf_file, + year) + + for site in points_control.sites: + # get SAM inputs from project_points based on the current site + _, inputs = points_control.project_points[site] + + site_df = cls._get_annual_energy(site, site_df, site_gids, cf_arr, + inputs, calc_aey) + + out[site] = super().reV_run(site, site_df, inputs, output_request) + + return out
+ + +
[docs]class SingleOwner(Economic): + """SAM single owner economic model. + """ + MODULE = 'singleowner' + PYSAM = PySamSingleOwner + + def __init__(self, sam_sys_inputs, site_sys_inputs=None, + output_request=('ppa_price',)): + """Initialize a SAM single owner economic model object. + """ + super().__init__(sam_sys_inputs, site_sys_inputs=site_sys_inputs, + output_request=output_request) + + # run balance of system cost model if required + self.sam_sys_inputs, self.windbos_outputs = \ + self._windbos(self.sam_sys_inputs) + + @staticmethod + def _windbos(inputs): + """Run SAM Wind Balance of System cost model if requested. + + Parameters + ---------- + inputs : dict + Dictionary of SAM key-value pair inputs. + "total_installed_cost": "windbos" will trigger the windbos method. + + Returns + ------- + inputs : dict + Dictionary of SAM key-value pair inputs with the total installed + cost replaced with WindBOS values if requested. + output : dict + Dictionary of windbos cost breakdowns. + """ + + outputs = {} + if inputs is not None: + if 'total_installed_cost' in inputs: + if isinstance(inputs['total_installed_cost'], str): + if inputs['total_installed_cost'].lower() == 'windbos': + wb = WindBos(inputs) + inputs['total_installed_cost'] = \ + wb.total_installed_cost + outputs = wb.output + return inputs, outputs + +
[docs] @staticmethod + def default(): + """Get the executed default pysam Single Owner object. + + Returns + ------- + PySAM.Singleowner + """ + return DefaultSingleOwner.default()
+ +
[docs] def collect_outputs(self): + """Collect SAM output_request, convert timeseries outputs to UTC, and + save outputs to self.outputs property. This includes windbos outputs. + """ + + windbos_out_vars = [v for v in self.output_request + if v in self.windbos_outputs] + self.output_request = [v for v in self.output_request + if v not in windbos_out_vars] + + super().collect_outputs() + + windbos_results = {} + for request in windbos_out_vars: + windbos_results[request] = self.windbos_outputs[request] + + self.outputs.update(windbos_results)
+ +
[docs] @classmethod + def reV_run(cls, points_control, site_df, cf_file, year, + output_request=('ppa_price',)): + """Execute SAM SingleOwner simulations based on reV points control. + + Parameters + ---------- + points_control : config.PointsControl + PointsControl instance containing project points site and SAM + config info. + site_df : pd.DataFrame + Dataframe of site-specific input variables. Row index corresponds + to site number/gid (via df.loc not df.iloc), column labels are the + variable keys that will be passed forward as SAM parameters. + cf_file : str + reV generation capacity factor output file with path. + year : int | str | None + reV generation year to calculate econ for. Looks for cf_mean_{year} + or cf_profile_{year}. None will default to a non-year-specific cf + dataset (cf_mean, cf_profile). + output_request : list | tuple | str + Output(s) to retrieve from SAM. + + Returns + ------- + out : dict + Nested dictionaries where the top level key is the site index, + the second level key is the variable name, second level value is + the output variable value. + """ + + out = {} + + profiles = cls._get_cf_profiles(points_control.sites, cf_file, year) + + for i, site in enumerate(points_control.sites): + # get SAM inputs from project_points based on the current site + _, inputs = points_control.project_points[site] + + # ensure that site-specific data is not persisted to other sites + site_inputs = deepcopy(inputs) + + # set the generation profile as an input. + site_inputs = cls._make_gen_profile(i, site, profiles, site_df, + site_inputs) + + out[site] = super().reV_run(site, site_df, site_inputs, + output_request) + + return out
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/SAM/generation.html b/_modules/reV/SAM/generation.html new file mode 100644 index 000000000..9b82db137 --- /dev/null +++ b/_modules/reV/SAM/generation.html @@ -0,0 +1,2707 @@ + + + + + + reV.SAM.generation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.SAM.generation

+# -*- coding: utf-8 -*-
+"""reV-to-SAM generation interface module.
+
+Wraps the NREL-PySAM pvwattsv5, windpower, and tcsmolensalt modules with
+additional reV features.
+"""
+from abc import ABC, abstractmethod
+import copy
+import os
+import logging
+import numpy as np
+import pandas as pd
+from tempfile import TemporaryDirectory
+from warnings import warn
+import PySAM.Geothermal as PySamGeothermal
+import PySAM.Pvwattsv5 as PySamPv5
+import PySAM.Pvwattsv7 as PySamPv7
+import PySAM.Pvwattsv8 as PySamPv8
+import PySAM.Pvsamv1 as PySamDetailedPv
+import PySAM.Windpower as PySamWindPower
+import PySAM.TcsmoltenSalt as PySamCSP
+import PySAM.Swh as PySamSwh
+import PySAM.TroughPhysicalProcessHeat as PySamTpph
+import PySAM.LinearFresnelDsgIph as PySamLds
+import PySAM.MhkWave as PySamMhkWave
+
+from reV.SAM.defaults import (DefaultGeothermal,
+                              DefaultPvWattsv5,
+                              DefaultPvWattsv8,
+                              DefaultPvSamv1,
+                              DefaultWindPower,
+                              DefaultTcsMoltenSalt,
+                              DefaultSwh,
+                              DefaultTroughPhysicalProcessHeat,
+                              DefaultLinearFresnelDsgIph,
+                              DefaultMhkWave)
+from reV.utilities.exceptions import (SAMInputWarning, SAMExecutionError,
+                                      InputError)
+from reV.utilities.curtailment import curtail
+from reV.SAM.SAM import RevPySam
+from reV.SAM.econ import LCOE, SingleOwner
+from reV.losses import ScheduledLossesMixin, PowerCurveLossesMixin
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class AbstractSamGeneration(RevPySam, ScheduledLossesMixin, ABC): + """Base class for SAM generation simulations.""" + + def __init__(self, resource, meta, sam_sys_inputs, site_sys_inputs=None, + output_request=None, drop_leap=False): + """Initialize a SAM generation object. + + Parameters + ---------- + resource : pd.DataFrame + Timeseries solar or wind resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the required + variables to run the respective SAM simulation. Remapping will be + done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> + dn and wind_speed -> windspeed) + meta : pd.DataFrame | pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + site_sys_inputs : dict + Optional set of site-specific SAM system inputs to complement the + site-agnostic inputs. + output_request : list + Requested SAM outputs (e.g., 'cf_mean', 'annual_energy', + 'cf_profile', 'gen_profile', 'energy_yield', 'ppa_price', + 'lcoe_fcr'). + drop_leap : bool + Drops February 29th from the resource data. If False, December + 31st is dropped from leap years. + """ + + # drop the leap day + if drop_leap: + resource = self.drop_leap(resource) + + # make sure timezone and elevation are in the meta data + meta = self.tz_elev_check(sam_sys_inputs, site_sys_inputs, meta) + + # don't pass resource to base class, + # set in concrete generation classes instead + super().__init__(meta, sam_sys_inputs, output_request, + site_sys_inputs=site_sys_inputs) + + # Set the site number using resource + if hasattr(resource, 'name'): + self._site = resource.name + else: + self._site = None + + # let children pass in None resource + if resource is not None: + self.check_resource_data(resource) + self.set_resource_data(resource, meta) + + self.add_scheduled_losses(resource) + + @classmethod + def _get_res(cls, res_df, output_request): + """Get the resource arrays and pass through for output (single site). + + Parameters + ---------- + res_df : pd.DataFrame + 2D table with resource data. + output_request : list + Outputs to retrieve from SAM. + + Returns + ------- + res_mean : dict | None + Dictionary object with variables for resource arrays. + out_req_cleaned : list + Output request list with the resource request entries removed. + """ + + out_req_cleaned = copy.deepcopy(output_request) + res_out = None + + res_reqs = [] + ti = res_df.index + for req in out_req_cleaned: + if req in res_df: + res_reqs.append(req) + if res_out is None: + res_out = {} + res_out[req] = cls.ensure_res_len(res_df[req].values, ti) + + for req in res_reqs: + out_req_cleaned.remove(req) + + return res_out, out_req_cleaned + + @staticmethod + def _get_res_mean(resource, res_gid, output_request): + """Get the resource annual means (single site). + + Parameters + ---------- + resource : rex.sam_resource.SAMResource + SAM resource object for WIND resource + res_gid : int + Site to extract means for + output_request : list + Outputs to retrieve from SAM. + + Returns + ------- + res_mean : dict | None + Dictionary object with variables for resource means. + out_req_nomeans : list + Output request list with the resource mean entries removed. + """ + + out_req_nomeans = copy.deepcopy(output_request) + res_mean = None + idx = resource.sites.index(res_gid) + irrad_means = ('dni_mean', 'dhi_mean', 'ghi_mean', + 'clearsky_dni_mean', 'clearsky_dhi_mean', + 'clearsky_ghi_mean') + + if 'ws_mean' in out_req_nomeans: + out_req_nomeans.remove('ws_mean') + res_mean = {} + res_mean['ws_mean'] = resource['mean_windspeed', idx] + + else: + for var in resource.var_list: + label_1 = '{}_mean'.format(var) + label_2 = 'mean_{}'.format(var) + if label_1 in out_req_nomeans: + out_req_nomeans.remove(label_1) + if res_mean is None: + res_mean = {} + res_mean[label_1] = resource[label_2, idx] + + if label_1 in irrad_means: + # convert to kWh/m2/day + res_mean[label_1] /= 1000 + res_mean[label_1] *= 24 + + return res_mean, out_req_nomeans + +
[docs] def check_resource_data(self, resource): + """Check resource dataframe for NaN values + + Parameters + ---------- + resource : pd.DataFrame + Timeseries solar or wind resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the required + variables to run the respective SAM simulation. Remapping will be + done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> + dn and wind_speed -> windspeed) + """ + if pd.isna(resource).any().any(): + bad_vars = pd.isna(resource).any(axis=0) + bad_vars = resource.columns[bad_vars].values.tolist() + msg = ('Found NaN values for site {} in variables {}' + .format(self.site, bad_vars)) + logger.error(msg) + raise InputError(msg)
+ +
[docs] @abstractmethod + def set_resource_data(self, resource, meta): + """Placeholder for resource data setting (nsrdb or wtk)"""
+ +
[docs] @staticmethod + def tz_elev_check(sam_sys_inputs, site_sys_inputs, meta): + """Check timezone+elevation input and use json config + timezone+elevation if not in resource meta. + + Parameters + ---------- + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + site_sys_inputs : dict + Optional set of site-specific SAM system inputs to complement the + site-agnostic inputs. + meta : pd.DataFrame | pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + + Returns + ------- + meta : pd.DataFrame | pd.Series + Datafram or series for a single site. Will include "timezone" + and "elevation" from the sam and site system inputs if found. + """ + + if meta is not None: + if sam_sys_inputs is not None: + if 'elevation' in sam_sys_inputs: + meta['elevation'] = sam_sys_inputs['elevation'] + if 'timezone' in sam_sys_inputs: + meta['timezone'] = int(sam_sys_inputs['timezone']) + + # site-specific inputs take priority over generic system inputs + if site_sys_inputs is not None: + if 'elevation' in site_sys_inputs: + meta['elevation'] = site_sys_inputs['elevation'] + if 'timezone' in site_sys_inputs: + meta['timezone'] = int(site_sys_inputs['timezone']) + + if 'timezone' not in meta: + msg = ('Need timezone input to run SAM gen. Not found in ' + 'resource meta or technology json input config.') + raise SAMExecutionError(msg) + + return meta
+ + @property + def has_timezone(self): + """ Returns true if instance has a timezone set """ + if self._meta is not None: + if 'timezone' in self.meta: + return True + + return False + +
[docs] def cf_mean(self): + """Get mean capacity factor (fractional) from SAM. + + Returns + ------- + output : float + Mean capacity factor (fractional). + """ + return self['capacity_factor'] / 100
+ +
[docs] def cf_profile(self): + """Get hourly capacity factor (frac) profile in local timezone. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + cf_profile : np.ndarray + 1D numpy array of capacity factor profile. + Datatype is float32 and array length is 8760*time_interval. + """ + return self.gen_profile() / self.sam_sys_inputs['system_capacity']
+ +
[docs] def annual_energy(self): + """Get annual energy generation value in kWh from SAM. + + Returns + ------- + output : float + Annual energy generation (kWh). + """ + return self['annual_energy']
+ +
[docs] def energy_yield(self): + """Get annual energy yield value in kwh/kw from SAM. + + Returns + ------- + output : float + Annual energy yield (kwh/kw). + """ + return self['kwh_per_kw']
+ +
[docs] def gen_profile(self): + """Get power generation profile (local timezone) in kW. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + output : np.ndarray + 1D array of hourly power generation in kW. + Datatype is float32 and array length is 8760*time_interval. + """ + return np.array(self['gen'], dtype=np.float32)
+ +
[docs] def collect_outputs(self, output_lookup=None): + """Collect SAM output_request, convert timeseries outputs to UTC, and + save outputs to self.outputs property. + + + Parameters + ---------- + output_lookup : dict | None + Lookup dictionary mapping output keys to special output methods. + None defaults to generation default outputs. + """ + + if output_lookup is None: + output_lookup = {'cf_mean': self.cf_mean, + 'cf_profile': self.cf_profile, + 'annual_energy': self.annual_energy, + 'energy_yield': self.energy_yield, + 'gen_profile': self.gen_profile, + } + + super().collect_outputs(output_lookup=output_lookup)
+ +
[docs] def run_gen_and_econ(self): + """Run SAM generation with possibility for follow on econ analysis.""" + + lcoe_out_reqs = None + so_out_reqs = None + lcoe_vars = ('lcoe_fcr', 'fixed_charge_rate', 'capital_cost', + 'fixed_operating_cost', 'variable_operating_cost') + so_vars = ('ppa_price', 'lcoe_real', 'lcoe_nom', + 'project_return_aftertax_npv', 'flip_actual_irr', + 'gross_revenue') + if 'lcoe_fcr' in self.output_request: + lcoe_out_reqs = [r for r in self.output_request if r in lcoe_vars] + self.output_request = [r for r in self.output_request + if r not in lcoe_out_reqs] + elif any(x in self.output_request for x in so_vars): + so_out_reqs = [r for r in self.output_request if r in so_vars] + self.output_request = [r for r in self.output_request + if r not in so_out_reqs] + + # Execute the SAM generation compute module (pvwattsv7, windpower, etc) + self.run() + + # Execute a follow-on SAM econ compute module + # (lcoe_fcr, singleowner, etc) + if lcoe_out_reqs is not None: + self.sam_sys_inputs['annual_energy'] = self.annual_energy() + lcoe = LCOE(self.sam_sys_inputs, output_request=lcoe_out_reqs) + lcoe.assign_inputs() + lcoe.execute() + lcoe.collect_outputs() + self.outputs.update(lcoe.outputs) + + elif so_out_reqs is not None: + self.sam_sys_inputs['gen'] = self.gen_profile() + so = SingleOwner(self.sam_sys_inputs, output_request=so_out_reqs) + so.assign_inputs() + so.execute() + so.collect_outputs() + self.outputs.update(so.outputs)
+ +
[docs] def run(self): + """Run a reV-SAM generation object by assigning inputs, executing the + SAM simulation, collecting outputs, and converting all arrays to UTC. + """ + self.assign_inputs() + self.execute() + self.collect_outputs()
+ +
[docs] @classmethod + def reV_run(cls, points_control, res_file, site_df, + lr_res_file=None, output_request=('cf_mean',), + drop_leap=False, gid_map=None, nn_map=None, + bias_correct=None): + """Execute SAM generation based on a reV points control instance. + + Parameters + ---------- + points_control : config.PointsControl + PointsControl instance containing project points site and SAM + config info. + res_file : str + Resource file with full path. + site_df : pd.DataFrame + Dataframe of site-specific input variables. Row index corresponds + to site number/gid (via df.loc not df.iloc), column labels are the + variable keys that will be passed forward as SAM parameters. + lr_res_file : str | None + Optional low resolution resource file that will be dynamically + mapped+interpolated to the nominal-resolution res_file. This + needs to be of the same format as resource_file, e.g. they both + need to be handled by the same rex Resource handler such as + WindResource + output_request : list | tuple + Outputs to retrieve from SAM. + drop_leap : bool + Drops February 29th from the resource data. If False, December + 31st is dropped from leap years. + gid_map : None | dict + Mapping of unique integer generation gids (keys) to single integer + resource gids (values). This enables the user to input unique + generation gids in the project points that map to non-unique + resource gids. This can be None or a pre-extracted dict. + nn_map : np.ndarray + Optional 1D array of nearest neighbor mappings associated with the + res_file to lr_res_file spatial mapping. For details on this + argument, see the rex.MultiResolutionResource docstring. + bias_correct : None | pd.DataFrame + None if not provided or extracted DataFrame with wind or solar + resource bias correction table. This has columns: gid (can be index + name), adder, scalar. The gid field should match the true resource + gid regardless of the optional gid_map input. If both adder and + scalar are present, the wind or solar resource is corrected by + (res*scalar)+adder. If either adder or scalar is not present, + scalar defaults to 1 and adder to 0. Only windspeed or GHI+DNI are + corrected depending on the technology. GHI and DNI are corrected + with the same correction factors. + + Returns + ------- + out : dict + Nested dictionaries where the top level key is the site index, + the second level key is the variable name, second level value is + the output variable value. + """ + # initialize output dictionary + out = {} + + # Get the RevPySam resource object + resources = RevPySam.get_sam_res(res_file, + points_control.project_points, + points_control.project_points.tech, + output_request=output_request, + gid_map=gid_map, + lr_res_file=lr_res_file, + nn_map=nn_map, + bias_correct=bias_correct) + + # run resource through curtailment filter if applicable + curtailment = points_control.project_points.curtailment + if curtailment is not None: + resources = curtail(resources, curtailment, + random_seed=curtailment.random_seed) + + # iterate through project_points gen_gid values + for gen_gid in points_control.project_points.sites: + + # Lookup the resource gid if there's a mapping and get the resource + # data from the SAMResource object using the res_gid. + res_gid = gen_gid if gid_map is None else gid_map[gen_gid] + site_res_df, site_meta = resources._get_res_df(res_gid) + + # drop the leap day + if drop_leap: + site_res_df = cls.drop_leap(site_res_df) + + _, inputs = points_control.project_points[gen_gid] + + # get resource data pass-throughs and resource means + res_outs, out_req_cleaned = cls._get_res(site_res_df, + output_request) + res_mean, out_req_cleaned = cls._get_res_mean(resources, res_gid, + out_req_cleaned) + + # iterate through requested sites. + sim = cls(resource=site_res_df, meta=site_meta, + sam_sys_inputs=inputs, output_request=out_req_cleaned, + site_sys_inputs=dict(site_df.loc[gen_gid, :])) + sim.run_gen_and_econ() + + # collect outputs to dictout + out[gen_gid] = sim.outputs + + if res_outs is not None: + out[gen_gid].update(res_outs) + + if res_mean is not None: + out[gen_gid].update(res_mean) + + return out
+ + +
[docs]class AbstractSamGenerationFromWeatherFile(AbstractSamGeneration, ABC): + """Base class for running sam generation with a weather file on disk. """ + WF_META_DROP_COLS = {'elevation', 'timezone', 'country', 'state', 'county', + 'urban', 'population', 'landcover', 'latitude', + 'longitude'} + + @property + @abstractmethod + def PYSAM_WEATHER_TAG(self): + """Name of the weather file input used by SAM generation module.""" + raise NotImplementedError + +
[docs] def set_resource_data(self, resource, meta): + """Generate the weather file and set the path as an input. + + Some PySAM models require a data file, not raw data. This method + generates the weather data, writes it to a file on disk, and + then sets the file as an input to the generation module. The + function + :meth:`~AbstractSamGenerationFromWeatherFile.run_gen_and_econ` + deletes the file on disk after a run is complete. + + Parameters + ---------- + resource : pd.DataFrame + Time series resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the + required variables to run the respective SAM simulation. + Remapping will be done to convert typical NSRDB/WTK names + into SAM names (e.g. DNI -> dn and wind_speed -> windspeed). + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, + elevation, and timezone. + """ + self.time_interval = self.get_time_interval(resource.index.values) + pysam_w_fname = self._create_pysam_wfile(resource, meta) + self[self.PYSAM_WEATHER_TAG] = pysam_w_fname
+ + def _create_pysam_wfile(self, resource, meta): + """Create PySAM weather input file. + + Parameters + ---------- + resource : pd.DataFrame + Time series resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the + required variables to run the respective SAM simulation. + Remapping will be done to convert typical NSRDB/WTK names + into SAM names (e.g. DNI -> dn and wind_speed -> windspeed). + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, + elevation, and timezone. + + Returns + ------- + fname : str + Name of weather csv file. + + Notes + ----- + PySAM will not accept data on Feb 29th. For leap years, + December 31st is dropped and time steps are shifted to relabel + Feb 29th as March 1st, March 1st as March 2nd, etc. + """ + # pylint: disable=attribute-defined-outside-init,consider-using-with + self._temp_dir = TemporaryDirectory() + fname = os.path.join(self._temp_dir.name, 'weather.csv') + logger.debug('Creating PySAM weather data file: {}'.format(fname)) + + # ------- Process metadata + m = pd.DataFrame(meta).T + timezone = m['timezone'] + m['Source'] = 'NSRDB' + m['Location ID'] = meta.name + m['City'] = '-' + m['State'] = m['state'].apply(lambda x: '-' if x == 'None' else x) + m['Country'] = m['country'].apply(lambda x: '-' if x == 'None' else x) + m['Latitude'] = m['latitude'] + m['Longitude'] = m['longitude'] + m['Time Zone'] = timezone + m['Elevation'] = m['elevation'] + m['Local Time Zone'] = timezone + m['Dew Point Units'] = 'c' + m['DHI Units'] = 'w/m2' + m['DNI Units'] = 'w/m2' + m['Temperature Units'] = 'c' + m['Pressure Units'] = 'mbar' + m['Wind Speed'] = 'm/s' + keep_cols = [c for c in m.columns if c not in self.WF_META_DROP_COLS] + m[keep_cols].to_csv(fname, index=False, mode='w') + + # --------- Process data + var_map = {'dni': 'DNI', + 'dhi': 'DHI', + 'wind_speed': 'Wind Speed', + 'air_temperature': 'Temperature', + 'dew_point': 'Dew Point', + 'surface_pressure': 'Pressure', + } + resource = resource.rename(mapper=var_map, axis='columns') + + time_index = resource.index + # Adjust from UTC to local time + local = np.roll(resource.values, int(timezone * self.time_interval), + axis=0) + resource = pd.DataFrame(local, columns=resource.columns, + index=time_index) + mask = (time_index.month == 2) & (time_index.day == 29) + time_index = time_index[~mask] + + df = pd.DataFrame(index=time_index) + df['Year'] = time_index.year + df['Month'] = time_index.month + df['Day'] = time_index.day + df['Hour'] = time_index.hour + df['Minute'] = time_index.minute + df = df.join(resource.loc[~mask]) + + df.to_csv(fname, index=False, mode='a') + + return fname + +
[docs] def run_gen_and_econ(self): + """Run SAM generation and possibility follow-on econ analysis.""" + try: + super().run_gen_and_econ() + finally: + temp_dir = getattr(self, "_temp_dir", None) + if temp_dir is not None: + temp_dir.cleanup()
+ + +
[docs]class AbstractSamSolar(AbstractSamGeneration, ABC): + """Base Class for Solar generation from SAM""" + +
[docs] @staticmethod + def agg_albedo(time_index, albedo): + """Aggregate a timeseries of albedo data to monthly values w len 12 as + required by pysam Pvsamv1 + + Tech spec from pysam docs: + https://nrel-pysam.readthedocs.io/en/master/modules/Pvsamv1.html + #PySAM.Pvsamv1.Pvsamv1.SolarResource.albedo + + Parameters + ---------- + time_index : pd.DatetimeIndex + Timeseries solar resource datetimeindex + albedo : list + Timeseries Albedo data to be aggregated. Should be 0-1 and likely + hourly or less. + + Returns + ------- + monthly_albedo : list + 1D list of monthly albedo values with length 12 + """ + monthly_albedo = np.zeros(12).tolist() + albedo = np.array(albedo) + for month in range(1, 13): + m = np.where(time_index.month == month)[0] + monthly_albedo[int(month - 1)] = albedo[m].mean() + + return monthly_albedo
+ +
[docs] def set_resource_data(self, resource, meta): + """Set NSRDB resource data arrays. + + Parameters + ---------- + resource : pd.DataFrame + Timeseries solar or wind resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the required + variables to run the respective SAM simulation. Remapping will be + done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> + dn and wind_speed -> windspeed) + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + """ + + time_index = resource.index + self.time_interval = self.get_time_interval(resource.index.values) + + # map resource data names to SAM required data names + var_map = {'dni': 'dn', + 'dhi': 'df', + 'ghi': 'gh', + 'clearskydni': 'dn', + 'clearskydhi': 'df', + 'clearskyghi': 'gh', + 'windspeed': 'wspd', + 'airtemperature': 'tdry', + 'temperature': 'tdry', + 'temp': 'tdry', + 'dewpoint': 'tdew', + 'surfacepressure': 'pres', + 'pressure': 'pres', + 'surfacealbedo': 'albedo', + } + lower_case = {k: k.lower().replace(' ', '').replace('_', '') + for k in resource.columns} + irrad_vars = ['dn', 'df', 'gh'] + + resource = resource.rename(mapper=lower_case, axis='columns') + resource = resource.rename(mapper=var_map, axis='columns') + time_index = resource.index + resource = {k: np.array(v) for (k, v) in + resource.to_dict(orient='list').items()} + + # set resource variables + for var, arr in resource.items(): + if var != 'time_index': + + # ensure that resource array length is multiple of 8760 + arr = self.ensure_res_len(arr, time_index) + n_roll = int(self._meta['timezone'] * self.time_interval) + arr = np.roll(arr, n_roll) + + if var in irrad_vars: + if np.min(arr) < 0: + warn('Solar irradiance variable "{}" has a minimum ' + 'value of {}. Truncating to zero.' + .format(var, np.min(arr)), SAMInputWarning) + arr = np.where(arr < 0, 0, arr) + + resource[var] = arr.tolist() + + resource['lat'] = meta['latitude'] + resource['lon'] = meta['longitude'] + resource['tz'] = meta['timezone'] + + if 'elevation' in meta: + resource['elev'] = meta['elevation'] + else: + resource['elev'] = 0.0 + + time_index = self.ensure_res_len(time_index, time_index) + resource['minute'] = time_index.minute + resource['hour'] = time_index.hour + resource['month'] = time_index.month + resource['year'] = time_index.year + resource['day'] = time_index.day + + if 'albedo' in resource: + self['albedo'] = self.agg_albedo( + time_index, resource.pop('albedo')) + + self['solar_resource_data'] = resource
+ + +
[docs]class AbstractSamPv(AbstractSamSolar, ABC): + """Photovoltaic (PV) generation with either pvwatts of detailed pv. + """ + + # set these class attrs in concrete subclasses + MODULE = None + PYSAM = None + + def __init__(self, resource, meta, sam_sys_inputs, site_sys_inputs=None, + output_request=None, drop_leap=False): + """Initialize a SAM solar object. + + See the PySAM :py:class:`~PySAM.Pvwattsv8.Pvwattsv8` (or older + version model) documentation for the configuration keys required + in the `sam_sys_inputs` config. You may also include the + following ``reV``-specific keys: + + - ``reV_outages`` : Specification for ``reV``-scheduled + stochastic outage losses. For example:: + + outage_info = [ + { + 'count': 6, + 'duration': 24, + 'percentage_of_capacity_lost': 100, + 'allowed_months': ['January', 'March'], + 'allow_outage_overlap': True + }, + { + 'count': 10, + 'duration': 1, + 'percentage_of_capacity_lost': 10, + 'allowed_months': ['January'], + 'allow_outage_overlap': False + }, + ... + ] + + See the description of + :meth:`~reV.losses.scheduled.ScheduledLossesMixin.add_scheduled_losses` + or the + `reV losses demo notebook <https://tinyurl.com/4d7uutt3/>`_ + for detailed instructions on how to specify this input. + - ``reV_outages_seed`` : Integer value used to seed the RNG + used to compute stochastic outage losses. + - ``time_index_step`` : Integer representing the step size + used to sample the ``time_index`` in the resource data. + This can be used to reduce temporal resolution (i.e. for + 30 minute NSRDB input data, ``time_index_step=1`` yields + the full 30 minute time series as output, while + ``time_index_step=2`` yields hourly output, and so forth). + + .. Note:: The reduced data shape (i.e. after applying a + step size of `time_index_step`) must still be an + integer multiple of 8760, or the execution will + fail. + + - ``clearsky`` : Boolean flag value indicating wether + computation should use clearsky resource data to compute + generation data. + + Parameters + ---------- + resource : pd.DataFrame + Timeseries solar or wind resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the required + variables to run the respective SAM simulation. Remapping will be + done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> + dn and wind_speed -> windspeed) + meta : pd.DataFrame | pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + site_sys_inputs : dict + Optional set of site-specific SAM system inputs to complement the + site-agnostic inputs. + output_request : list + Requested SAM outputs (e.g., 'cf_mean', 'annual_energy', + 'cf_profile', 'gen_profile', 'energy_yield', 'ppa_price', + 'lcoe_fcr'). + drop_leap : bool + Drops February 29th from the resource data. If False, December + 31st is dropped from leap years. + """ + + # need to check tilt=lat and azimuth for pv systems + meta = self._parse_meta(meta) + sam_sys_inputs = self.set_latitude_tilt_az(sam_sys_inputs, meta) + + super().__init__(resource, meta, sam_sys_inputs, + site_sys_inputs=site_sys_inputs, + output_request=output_request, + drop_leap=drop_leap) + +
[docs] def set_resource_data(self, resource, meta): + """Set NSRDB resource data arrays. + + Parameters + ---------- + resource : pd.DataFrame + Timeseries solar or wind resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the required + variables to run the respective SAM simulation. Remapping will be + done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> + dn and wind_speed -> windspeed) + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + + Raises + ------ + ValueError : If lat/lon outside of -90 to 90 and -180 to 180, + respectively. + + """ + bad_location_input = ((meta['latitude'] < -90) + | (meta['latitude'] > 90) + | (meta['longitude'] < -180) + | (meta['longitude'] > 180)) + if bad_location_input.any(): + raise ValueError("Detected latitude/longitude values outside of " + "the range -90 to 90 and -180 to 180, " + "respectively. Please ensure input resource data" + "locations conform to these ranges. ") + return super().set_resource_data(resource, meta)
+ +
[docs] @staticmethod + def set_latitude_tilt_az(sam_sys_inputs, meta): + """Check if tilt is specified as latitude and set tilt=lat, az=180 or 0 + + Parameters + ---------- + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + + Returns + ------- + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + If for a pv simulation the "tilt" parameter was originally not + present or set to 'lat' or 'latitude', the tilt will be set to + the absolute value of the latitude found in meta and the azimuth + will be 180 if lat>0, 0 if lat<0. + """ + + set_tilt = False + if sam_sys_inputs is not None and meta is not None: + if 'tilt' not in sam_sys_inputs: + warn('No tilt specified, setting at latitude.', + SAMInputWarning) + set_tilt = True + else: + if (sam_sys_inputs['tilt'] == 'lat' + or sam_sys_inputs['tilt'] == 'latitude'): + set_tilt = True + + if set_tilt: + # set tilt to abs(latitude) + sam_sys_inputs['tilt'] = np.abs(meta['latitude']) + if meta['latitude'] > 0: + # above the equator, az = 180 + sam_sys_inputs['azimuth'] = 180 + else: + # below the equator, az = 0 + sam_sys_inputs['azimuth'] = 0 + + logger.debug('Tilt specified at "latitude", setting tilt to: {}, ' + 'azimuth to: {}' + .format(sam_sys_inputs['tilt'], + sam_sys_inputs['azimuth'])) + return sam_sys_inputs
+ +
[docs] def system_capacity_ac(self): + """Get AC system capacity from SAM inputs. + + NOTE: AC nameplate = DC nameplate / ILR + + Returns + ------- + cf_profile : float + AC nameplate = DC nameplate / ILR + """ + return (self.sam_sys_inputs['system_capacity'] + / self.sam_sys_inputs['dc_ac_ratio'])
+ +
[docs] def cf_mean(self): + """Get mean capacity factor (fractional) from SAM. + + NOTE: PV capacity factor is the AC power production / the DC nameplate + + Returns + ------- + output : float + Mean capacity factor (fractional). + PV CF is calculated as AC power / DC nameplate. + """ + return self['capacity_factor'] / 100
+ +
[docs] def cf_mean_ac(self): + """Get mean AC capacity factor (fractional) from SAM. + + NOTE: This value only available in PVWattsV8 and up. + + Returns + ------- + output : float + Mean AC capacity factor (fractional). + PV AC CF is calculated as AC power / AC nameplate. + """ + return self['capacity_factor_ac'] / 100
+ +
[docs] def cf_profile(self): + """Get hourly capacity factor (frac) profile in local timezone. + See self.outputs attribute for collected output data in UTC. + + NOTE: PV capacity factor is the AC power production / the DC nameplate + + Returns + ------- + cf_profile : np.ndarray + 1D numpy array of capacity factor profile. + Datatype is float32 and array length is 8760*time_interval. + PV CF is calculated as AC power / DC nameplate. + """ + return self.gen_profile() / self.sam_sys_inputs['system_capacity']
+ +
[docs] def cf_profile_ac(self): + """Get hourly AC capacity factor (frac) profile in local timezone. + See self.outputs attribute for collected output data in UTC. + + NOTE: PV AC capacity factor is the AC power production / the AC + nameplate. AC nameplate = DC nameplate / ILR + + Returns + ------- + cf_profile : np.ndarray + 1D numpy array of capacity factor profile. + Datatype is float32 and array length is 8760*time_interval. + PV AC CF is calculated as AC power / AC nameplate. + """ + return self.gen_profile() / self.system_capacity_ac()
+ +
[docs] def gen_profile(self): + """Get AC inverter power generation profile (local timezone) in kW. + This is an alias of the "ac" SAM output variable if PySAM version>=3. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + output : np.ndarray + 1D array of AC inverter power generation in kW. + Datatype is float32 and array length is 8760*time_interval. + """ + return np.array(self['gen'], dtype=np.float32)
+ +
[docs] def ac(self): + """Get AC inverter power generation profile (local timezone) in kW. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + output : np.ndarray + 1D array of AC inverter power generation in kW. + Datatype is float32 and array length is 8760*time_interval. + """ + return np.array(self['ac'], dtype=np.float32) / 1000
+ +
[docs] def dc(self): + """ + Get DC array power generation profile (local timezone) in kW. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + output : np.ndarray + 1D array of DC array power generation in kW. + Datatype is float32 and array length is 8760*time_interval. + """ + return np.array(self['dc'], dtype=np.float32) / 1000
+ +
[docs] def clipped_power(self): + """ + Get the clipped DC power generated behind the inverter + (local timezone) in kW. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + clipped : np.ndarray + 1D array of clipped DC power in kW. + Datatype is float32 and array length is 8760*time_interval. + """ + ac = self.ac() + dc = self.dc() + + return np.where(ac < ac.max(), 0, dc - ac)
+ +
[docs] @staticmethod + @abstractmethod + def default(): + """Get the executed default pysam object."""
+ +
[docs] def collect_outputs(self, output_lookup=None): + """Collect SAM output_request, convert timeseries outputs to UTC, and + save outputs to self.outputs property. + + Parameters + ---------- + output_lookup : dict | None + Lookup dictionary mapping output keys to special output methods. + None defaults to generation default outputs. + """ + + if output_lookup is None: + output_lookup = {'cf_mean': self.cf_mean, + 'cf_mean_ac': self.cf_mean_ac, + 'cf_profile': self.cf_profile, + 'cf_profile_ac': self.cf_profile_ac, + 'annual_energy': self.annual_energy, + 'energy_yield': self.energy_yield, + 'gen_profile': self.gen_profile, + 'ac': self.ac, + 'dc': self.dc, + 'clipped_power': self.clipped_power, + 'system_capacity_ac': self.system_capacity_ac, + } + + super().collect_outputs(output_lookup=output_lookup)
+ + +
[docs]class PvWattsv5(AbstractSamPv): + """Photovoltaic (PV) generation with pvwattsv5. + """ + MODULE = 'pvwattsv5' + PYSAM = PySamPv5 + +
[docs] @staticmethod + def default(): + """Get the executed default pysam PVWATTSV5 object. + + Returns + ------- + PySAM.Pvwattsv5 + """ + return DefaultPvWattsv5.default()
+ + +
[docs]class PvWattsv7(AbstractSamPv): + """Photovoltaic (PV) generation with pvwattsv7. + """ + MODULE = 'pvwattsv7' + PYSAM = PySamPv7 + +
[docs] @staticmethod + def default(): + """Get the executed default pysam PVWATTSV7 object. + + Returns + ------- + PySAM.Pvwattsv7 + """ + raise NotImplementedError("Pvwattsv7 default file no longer exists!")
+ + +
[docs]class PvWattsv8(AbstractSamPv): + """Photovoltaic (PV) generation with pvwattsv8. + """ + MODULE = 'pvwattsv8' + PYSAM = PySamPv8 + +
[docs] @staticmethod + def default(): + """Get the executed default pysam PVWATTSV8 object. + + Returns + ------- + PySAM.Pvwattsv8 + """ + return DefaultPvWattsv8.default()
+ + +
[docs]class PvSamv1(AbstractSamPv): + """Detailed PV model""" + + MODULE = 'Pvsamv1' + PYSAM = PySamDetailedPv + +
[docs] def ac(self): + """Get AC inverter power generation profile (local timezone) in kW. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + output : np.ndarray + 1D array of AC inverter power generation in kW. + Datatype is float32 and array length is 8760*time_interval. + """ + return np.array(self['gen'], dtype=np.float32)
+ +
[docs] def dc(self): + """ + Get DC array power generation profile (local timezone) in kW. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + output : np.ndarray + 1D array of DC array power generation in kW. + Datatype is float32 and array length is 8760*time_interval. + """ + return np.array(self['dc_net'], dtype=np.float32)
+ +
[docs] @staticmethod + def default(): + """Get the executed default pysam Pvsamv1 object. + + Returns + ------- + PySAM.Pvsamv1 + """ + return DefaultPvSamv1.default()
+ + +
[docs]class TcsMoltenSalt(AbstractSamSolar): + """Concentrated Solar Power (CSP) generation with tower molten salt + """ + MODULE = 'tcsmolten_salt' + PYSAM = PySamCSP + +
[docs] def cf_profile(self): + """Get absolute value hourly capacity factor (frac) profile in + local timezone. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + cf_profile : np.ndarray + 1D numpy array of capacity factor profile. + Datatype is float32 and array length is 8760*time_interval. + """ + x = np.abs(self.gen_profile() / self.sam_sys_inputs['system_capacity']) + return x
+ +
[docs] @staticmethod + def default(): + """Get the executed default pysam CSP object. + + Returns + ------- + PySAM.TcsmoltenSalt + """ + return DefaultTcsMoltenSalt.default()
+ + +
[docs]class SolarWaterHeat(AbstractSamGenerationFromWeatherFile): + """ + Solar Water Heater generation + """ + MODULE = 'solarwaterheat' + PYSAM = PySamSwh + PYSAM_WEATHER_TAG = 'solar_resource_file' + +
[docs] @staticmethod + def default(): + """Get the executed default pysam swh object. + + Returns + ------- + PySAM.Swh + """ + return DefaultSwh.default()
+ + +
[docs]class LinearDirectSteam(AbstractSamGenerationFromWeatherFile): + """ + Process heat linear Fresnel direct steam generation + """ + MODULE = 'lineardirectsteam' + PYSAM = PySamLds + PYSAM_WEATHER_TAG = 'file_name' + +
[docs] def cf_mean(self): + """Calculate mean capacity factor (fractional) from SAM. + + Returns + ------- + output : float + Mean capacity factor (fractional). + """ + net_power = self['annual_field_energy'] \ + - self['annual_thermal_consumption'] # kW-hr + # q_pb_des is in MW, convert to kW-hr + name_plate = self['q_pb_des'] * 8760 * 1000 + + return net_power / name_plate
+ +
[docs] @staticmethod + def default(): + """Get the executed default pysam linear Fresnel object. + + Returns + ------- + PySAM.LinearFresnelDsgIph + """ + return DefaultLinearFresnelDsgIph.default()
+ + +
[docs]class TroughPhysicalHeat(AbstractSamGenerationFromWeatherFile): + """ + Trough Physical Process Heat generation + """ + MODULE = 'troughphysicalheat' + PYSAM = PySamTpph + PYSAM_WEATHER_TAG = 'file_name' + +
[docs] def cf_mean(self): + """Calculate mean capacity factor (fractional) from SAM. + + Returns + ------- + output : float + Mean capacity factor (fractional). + """ + net_power = self['annual_gross_energy'] \ + - self['annual_thermal_consumption'] # kW-hr + # q_pb_des is in MW, convert to kW-hr + name_plate = self['q_pb_design'] * 8760 * 1000 + + return net_power / name_plate
+ +
[docs] @staticmethod + def default(): + """Get the executed default pysam trough object. + + Returns + ------- + PySAM.TroughPhysicalProcessHeat + """ + return DefaultTroughPhysicalProcessHeat.default()
+ + +
[docs]class Geothermal(AbstractSamGenerationFromWeatherFile): + """reV-SAM geothermal generation. + + As of 12/20/2022, the resource potential input in SAM is only used + to calculate the number of well replacements during the lifetime of + a geothermal plant. It was decided that reV would not model well + replacements. Therefore, reV sets the resource potential to match + (or be just above) the gross potential so that SAM does not throw + any errors. + + Also as of 12/20/2022, the SAM GETEM module requires a weather file, + but does not actually require any weather data to run. Therefore, + reV currently generates an empty weather file to pass to SAM. This + behavior can be easily updated in the future should the SAM GETEM + module start using weather data. + + See the PySAM :py:class:`~PySAM.Geothermal.Geothermal` documentation + for the configuration keys required in the `sam_sys_inputs` config. + Some notable keys include (non-exhaustive): + + - ``resource_type`` : Integer flag representing either + Hydrothermal (0) or EGS (1) resource. Only values of 0 or 1 + allowed. + - ``resource_potential`` : Total resource potential at location + (in MW). + + .. Important:: ``reV`` automatically sets the resource + potential to match the gross potential (see documentation + above), so this key should be left out of the config (it + will be overridden in any case). + + - ``resource_temp`` : Temperature of resource (in C). + + .. Important:: This value is set by ``reV`` based on the + user's geothermal resource data input. To override this + behavior, users *may* specify their own ``resource_temp`` + value (either a single value for all sites in the SAM + geothermal config or a site-dependent value in the project + points CSV). In this case, the resource temperature from + the input data will be ignored completely, and the + temperature at each location will be determined solely from + this input. + + - ``resource_depth`` : Depth to geothermal resource (in m). + - ``analysis_type`` : Integer flag representing the plant + configuration. If the ``nameplate`` input is to be used to + specify the plant capacity, then this flag should be set to 0 + (this is the default ``reV`` assumption). Otherwise, if the + ``num_wells`` input is to be used to specify the plant site, + then this flag should be set to 1. Only values of 0 or 1 + allowed. + + - ``nameplate`` : Geothermal plant size (in kW). Only affects + the output if ``analysis_type=0``. + + .. Important:: Unlike wind or solar, ``reV`` geothermal + dynamically sets the size of a geothermal plant. In + particular, the plant capacity is set to match the resource + potential (obtained from the input data) for each site. For + this to work, users **must** leave out the ``nameplate`` + key from the SAM config. + + Alternatively, users *may* specify their own ``nameplate`` + capacity value (either a single value for all sites in the + SAM geothermal config or a site-dependent value in the + project points CSV). In this case, the resource potential + from the input data will be ignored completely, and the + capacity at each location will be determined solely from + this input. + + - ``num_wells`` : Number of wells at each plant. This value is + used to determined plant capacity if ``analysis_type=1``. + Otherwise this input has no effect. + - ``num_wells_getem`` : Number of wells assumed at each plant + for power block calculations. Only affects power block outputs + if ``analysis_type=0`` (otherwise the ``num_wells`` input is + used in power block calculations). + + .. Note:: ``reV`` does not currently adjust this value based + on the resource input (as it probably should). If any + power block outputs are required in the future, there may + need to be extra development to set this value based on + the dynamically calculated plant size. + + - ``conversion_type`` : Integer flag representing the conversion + plant type. Either Binary (0) or Flash (1). Only values of 0 + or 1 allowed. + - ``design_temp`` : EGS plant design temperature (in C). Only + affects EGS runs. If this value is set lower than the + resource temperature input, ``reV`` will adjust it to match + the latter in order to avoid SAM errors. + - ``geotherm.cost.inj_prod_well_ratio`` : Fraction representing + the injection to production well ratio (0-1). SAM GUI defaults + to 0.5 for this value, but it is recommended to set this to + the GETEM default of 0.75. + + + You may also include the following ``reV``-specific keys: + + - ``num_confirmation_wells`` : Number of confirmation wells that + can also be used as production wells. This number is used to + determined to total number of wells required at each plant, + and therefore the total drilling costs. This value defaults to + 2 (to match the SAM GUI as of 8/1/2023). However, the default + value can lead to negative costs if the plant size is small + (e.g. only 1 production well is needed, so the costs equal + -1 * ``drill_cost_per_well``). This is a limitation of the + SAM calculations (as of 8/1/2023), and it is therefore useful + to set ``num_confirmation_wells=0`` when performing ``reV`` + runs for small plant sizes. + - ``capital_cost_per_kw`` : Capital cost values in $/kW. If + this value is specified in the config, reV calculates and + overrides the total ``capital_cost`` value based on the + geothermal plant size (capacity) at each location. + - ``fixed_operating_cost`` : Fixed operating cost values in + $/kW. If this value is specified in the config, reV calculates + and overrides the total ``fixed_operating_cost`` value based + on the geothermal plant size (capacity) at each location. + - ``drill_cost_per_well`` : Drilling cost per well, in $. If + this value is specified in the config, reV calculates the + total drilling costs based on the number of wells that need to + be drilled at each location. The drilling costs are added to + the total ``capital_cost`` at each location. + - ``reV_outages`` : Specification for ``reV``-scheduled + stochastic outage losses. For example:: + + outage_info = [ + { + 'count': 6, + 'duration': 24, + 'percentage_of_capacity_lost': 100, + 'allowed_months': ['January', 'March'], + 'allow_outage_overlap': True + }, + { + 'count': 10, + 'duration': 1, + 'percentage_of_capacity_lost': 10, + 'allowed_months': ['January'], + 'allow_outage_overlap': False + }, + ... + ] + + See the description of + :meth:`~reV.losses.scheduled.ScheduledLossesMixin.add_scheduled_losses` + or the + `reV losses demo notebook <https://tinyurl.com/4d7uutt3/>`_ + for detailed instructions on how to specify this input. + - ``reV_outages_seed`` : Integer value used to seed the RNG + used to compute stochastic outage losses. + - ``time_index_step`` : Integer representing the step size + used to sample the ``time_index`` in the resource data. + This can be used to reduce temporal resolution (i.e. for + 30 minute NSRDB input data, ``time_index_step=1`` yields + the full 30 minute time series as output, while + ``time_index_step=2`` yields hourly output, and so forth). + + """ + + MODULE = 'geothermal' + PYSAM = PySamGeothermal + PYSAM_WEATHER_TAG = "file_name" + _RESOURCE_POTENTIAL_MULT = 1.001 + _DEFAULT_NUM_CONFIRMATION_WELLS = 2 # SAM GUI default as of 5/26/23 + +
[docs] @staticmethod + def default(): + """Get the executed default PySAM Geothermal object. + + Returns + ------- + PySAM.Geothermal + """ + return DefaultGeothermal.default()
+ +
[docs] def cf_profile(self): + """Get hourly capacity factor (frac) profile in local timezone. + See self.outputs attribute for collected output data in UTC. + + Returns + ------- + cf_profile : np.ndarray + 1D numpy array of capacity factor profile. + Datatype is float32 and array length is 8760*time_interval. + """ + return self.gen_profile() / self.sam_sys_inputs['nameplate']
+ +
[docs] def assign_inputs(self): + """Assign the self.sam_sys_inputs attribute to the PySAM object.""" + if self.sam_sys_inputs.get("ui_calculations_only"): + msg = ('reV requires model run - cannot set ' + '"ui_calculations_only" to `True` (1). Automatically ' + 'setting to `False` (0)!') + logger.warning(msg) + warn(msg) + self.sam_sys_inputs["ui_calculations_only"] = 0 + super().assign_inputs()
+ +
[docs] def set_resource_data(self, resource, meta): + """Generate the weather file and set the path as an input. + + The Geothermal PySAM model requires a data file, not raw data. + This method generates the weather data, writes it to a file on + disk, and then sets the file as an input to the Geothermal + generation module. The function + :meth:`~AbstractSamGenerationFromWeatherFile.run_gen_and_econ` + deletes the file on disk after a run is complete. + + Parameters + ---------- + resource : pd.DataFrame + Time series resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the + required variables to run the respective SAM simulation. + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, + elevation, and timezone. + """ + super().set_resource_data(resource, meta) + self._set_resource_temperature(resource) + self._set_egs_plant_design_temperature() + self._set_nameplate_to_match_resource_potential(resource) + self._set_resource_potential_to_match_gross_output() + self._set_costs()
+ + def _set_resource_temperature(self, resource): + """Set resource temp from data if user did not specify it. """ + + if "resource_temp" in self.sam_sys_inputs: + logger.debug("Found 'resource_temp' value in SAM config: {:.2f}" + .format(self.sam_sys_inputs["resource_temp"])) + return + + val = set(resource["temperature"].unique()) + logger.debug("Found {} value(s) for 'temperature' in resource data" + .format(len(val))) + if len(val) > 1: + msg = ("Found multiple values for 'temperature' for site {}: {}" + .format(self.site, val)) + logger.error(msg) + raise InputError(msg) + + val = val.pop() + logger.debug("Input 'resource_temp' not found in SAM config - setting " + "to {:.2f} based on input resource data." + .format(val)) + self.sam_sys_inputs["resource_temp"] = val + + def _set_egs_plant_design_temperature(self): + """Set the EGS plant temp to match resource, if necessary""" + if self.sam_sys_inputs.get("resource_type") != 1: + return # Not EGS run + + egs_plant_design_temp = self.sam_sys_inputs.get("design_temp", 0) + resource_temp = self.sam_sys_inputs["resource_temp"] + if egs_plant_design_temp > resource_temp: + msg = ('EGS plant design temperature ({}C) exceeds resource ' + 'temperature ({}C). Lowering EGS plant design temperature ' + 'to match resource temperature' + .format(egs_plant_design_temp, resource_temp)) + logger.warning(msg) + warn(msg) + self.sam_sys_inputs["design_temp"] = resource_temp + + def _set_nameplate_to_match_resource_potential(self, resource): + """Set the nameplate capacity to match the resource potential. """ + + if "nameplate" in self.sam_sys_inputs: + msg = ('Found "nameplate" input in config! Resource potential ' + 'from input data will be ignored. Nameplate capacity is {}' + .format(self.sam_sys_inputs["nameplate"])) + logger.info(msg) + return + + val = set(resource["potential_MW"].unique()) + if len(val) > 1: + msg = ('Found multiple values for "potential_MW" for site {}: {}' + .format(self.site, val)) + logger.error(msg) + raise InputError(msg) + + val = val.pop() * 1000 + + logger.debug("Setting the nameplate to {}".format(val)) + self.sam_sys_inputs["nameplate"] = val + + def _set_resource_potential_to_match_gross_output(self): + """Set the resource potential input to match the gross generation. + + If SAM throws an error during the UI calculation of the gross + output, the resource_potential is simply set to -1 since + SAM will error out for this point regardless of the + resource_potential input. + """ + + super().assign_inputs() + self["ui_calculations_only"] = 1 + try: + self.execute() + except SAMExecutionError: + self["ui_calculations_only"] = 0 + self.sam_sys_inputs["resource_potential"] = -1 + return + + gross_gen = (getattr(self.pysam.Outputs, "gross_output") + * self._RESOURCE_POTENTIAL_MULT) + if "resource_potential" in self.sam_sys_inputs: + msg = ('Setting "resource_potential" is not allowed! Updating ' + 'user input of {} to match the gross generation: {}' + .format(self.sam_sys_inputs["resource_potential"], + gross_gen)) + logger.warning(msg) + warn(msg) + + logger.debug("Setting the resource potential to {} MW" + .format(gross_gen)) + self.sam_sys_inputs["resource_potential"] = gross_gen + + ncw = self.sam_sys_inputs.pop("num_confirmation_wells", + self._DEFAULT_NUM_CONFIRMATION_WELLS) + self.sam_sys_inputs["prod_and_inj_wells_to_drill"] = ( + getattr(self.pysam.Outputs, "num_wells_getem_output") + - ncw + + getattr(self.pysam.Outputs, "num_wells_getem_inj")) + self["ui_calculations_only"] = 0 + + def _set_costs(self): + """Set the costs based on gross plant generation.""" + plant_size_kw = (self.sam_sys_inputs["resource_potential"] + / self._RESOURCE_POTENTIAL_MULT) * 1000 + + cc_per_kw = self.sam_sys_inputs.pop("capital_cost_per_kw", None) + if cc_per_kw is not None: + capital_cost = cc_per_kw * plant_size_kw + logger.debug("Setting the capital_cost to ${:,.2f}" + .format(capital_cost)) + self.sam_sys_inputs["capital_cost"] = capital_cost + + dc_per_well = self.sam_sys_inputs.pop("drill_cost_per_well", None) + num_wells = self.sam_sys_inputs.pop("prod_and_inj_wells_to_drill", + None) + if dc_per_well is not None: + if num_wells is None: + msg = ('Could not determine number of wells to be drilled. ' + 'No drilling costs added!') + logger.warning(msg) + warn(msg) + else: + capital_cost = self.sam_sys_inputs["capital_cost"] + drill_cost = dc_per_well * num_wells + logger.debug("Setting the drilling cost to ${:,.2f} " + "({:.2f} wells at ${:,.2f} per well)" + .format(drill_cost, num_wells, dc_per_well)) + self.sam_sys_inputs["capital_cost"] = capital_cost + drill_cost + + foc_per_kw = self.sam_sys_inputs.pop("fixed_operating_cost_per_kw", + None) + if foc_per_kw is not None: + fixed_operating_cost = foc_per_kw * plant_size_kw + logger.debug("Setting the fixed_operating_cost to ${:,.2f}" + .format(capital_cost)) + self.sam_sys_inputs["fixed_operating_cost"] = fixed_operating_cost + + def _create_pysam_wfile(self, resource, meta): + """Create PySAM weather input file. + + Geothermal module requires a weather file, but does not actually + require any weather data to run. Therefore, an empty file is + generated and passed through. + + Parameters + ---------- + resource : pd.DataFrame + Time series resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the + required variables to run the respective SAM simulation. + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, + and timezone. + + Returns + ------- + fname : str + Name of weather csv file. + + Notes + ----- + PySAM will not accept data on Feb 29th. For leap years, + December 31st is dropped and time steps are shifted to relabel + Feb 29th as March 1st, March 1st as March 2nd, etc. + """ + # pylint: disable=attribute-defined-outside-init, consider-using-with + self._temp_dir = TemporaryDirectory() + fname = os.path.join(self._temp_dir.name, 'weather.csv') + logger.debug('Creating PySAM weather data file: {}'.format(fname)) + + # ------- Process metadata + m = pd.DataFrame(meta).T + m = m.rename({"latitude": "Latitude", "longitude": "Longitude", + "timezone": "Time Zone"}, axis=1) + + m[["Latitude", "Longitude", "Time Zone"]].to_csv(fname, index=False, + mode='w') + + # --------- Process data, blank for geothermal + time_index = resource.index + mask = (time_index.month == 2) & (time_index.day == 29) + time_index = time_index[~mask] + + df = pd.DataFrame(index=time_index) + df['Year'] = time_index.year + df['Month'] = time_index.month + df['Day'] = time_index.day + df['Hour'] = time_index.hour + df['Minute'] = time_index.minute + df.to_csv(fname, index=False, mode='a') + + return fname + +
[docs] def run_gen_and_econ(self): + """Run SAM generation and possibility follow-on econ analysis.""" + try: + super().run_gen_and_econ() + except SAMExecutionError as e: + logger.error("Skipping site {}; received sam error: {}" + .format(self._site, str(e))) + self.outputs = {}
+ + +
[docs]class AbstractSamWind(AbstractSamGeneration, PowerCurveLossesMixin, ABC): + """AbstractSamWind""" + + def __init__(self, *args, **kwargs): + """Wind generation from SAM. + + See the PySAM :py:class:`~PySAM.Windpower.Windpower` + documentation for the configuration keys required in the + `sam_sys_inputs` config. You may also include the following + ``reV``-specific keys: + + - ``reV_power_curve_losses`` : A dictionary that can be used + to initialize + :class:`~reV.losses.power_curve.PowerCurveLossesInput`. + For example:: + + reV_power_curve_losses = { + 'target_losses_percent': 9.8, + 'transformation': 'exponential_stretching' + } + + See the description of the class mentioned above or the + `reV losses demo notebook <https://tinyurl.com/4d7uutt3/>`_ + for detailed instructions on how to specify this input. + - ``reV_outages`` : Specification for ``reV``-scheduled + stochastic outage losses. For example:: + + outage_info = [ + { + 'count': 6, + 'duration': 24, + 'percentage_of_capacity_lost': 100, + 'allowed_months': ['January', 'March'], + 'allow_outage_overlap': True + }, + { + 'count': 10, + 'duration': 1, + 'percentage_of_capacity_lost': 10, + 'allowed_months': ['January'], + 'allow_outage_overlap': False + }, + ... + ] + + See the description of + :meth:`~reV.losses.scheduled.ScheduledLossesMixin.add_scheduled_losses` + or the + `reV losses demo notebook <https://tinyurl.com/4d7uutt3/>`_ + for detailed instructions on how to specify this input. + - ``reV_outages_seed`` : Integer value used to seed the RNG + used to compute stochastic outage losses. + - ``time_index_step`` : Integer representing the step size + used to sample the ``time_index`` in the resource data. + This can be used to reduce temporal resolution (i.e. for + 30 minute input data, ``time_index_step=1`` yields the + full 30 minute time series as output, while + ``time_index_step=2`` yields hourly output, and so forth). + + .. Note:: The reduced data shape (i.e. after applying a + step size of `time_index_step`) must still be + an integer multiple of 8760, or the execution + will fail. + + Parameters + ---------- + resource : pd.DataFrame + Timeseries solar or wind resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the required + variables to run the respective SAM simulation. Remapping will be + done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> + dn and wind_speed -> windspeed) + meta : pd.DataFrame | pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + site_sys_inputs : dict + Optional set of site-specific SAM system inputs to complement the + site-agnostic inputs. + output_request : list + Requested SAM outputs (e.g., 'cf_mean', 'annual_energy', + 'cf_profile', 'gen_profile', 'energy_yield', 'ppa_price', + 'lcoe_fcr'). + drop_leap : bool + Drops February 29th from the resource data. If False, December + 31st is dropped from leap years. + """ + super().__init__(*args, **kwargs) + self.add_power_curve_losses()
+ + +
[docs]class WindPower(AbstractSamWind): + """Class for Wind generation from SAM + """ + MODULE = 'windpower' + PYSAM = PySamWindPower + +
[docs] def set_resource_data(self, resource, meta): + """Set WTK resource data arrays. + + Parameters + ---------- + resource : pd.DataFrame + Timeseries solar or wind resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the required + variables to run the respective SAM simulation. Remapping will be + done to convert typical NSRDB/WTK names into SAM names (e.g. DNI -> + dn and wind_speed -> windspeed) + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + """ + + # map resource data names to SAM required data names + var_map = {'speed': 'windspeed', + 'direction': 'winddirection', + 'airtemperature': 'temperature', + 'temp': 'temperature', + 'surfacepressure': 'pressure', + 'relativehumidity': 'rh', + 'humidity': 'rh', + } + lower_case = {k: k.lower().replace(' ', '').replace('_', '') + for k in resource.columns} + resource = resource.rename(mapper=lower_case, axis='columns') + resource = resource.rename(mapper=var_map, axis='columns') + + data_dict = {} + var_list = ['temperature', 'pressure', 'windspeed', 'winddirection'] + if 'winddirection' not in resource: + resource['winddirection'] = 0.0 + + time_index = resource.index + self.time_interval = self.get_time_interval(resource.index.values) + + data_dict['fields'] = [1, 2, 3, 4] + data_dict['heights'] = 4 * [self.sam_sys_inputs['wind_turbine_hub_ht']] + + if 'rh' in resource: + # set relative humidity for icing. + rh = self.ensure_res_len(resource['rh'].values, time_index) + n_roll = int(meta['timezone'] * self.time_interval) + rh = np.roll(rh, n_roll, axis=0) + data_dict['rh'] = rh.tolist() + + # must be set as matrix in [temperature, pres, speed, direction] order + # ensure that resource array length is multiple of 8760 + # roll the truncated resource array to local timezone + temp = self.ensure_res_len(resource[var_list].values, time_index) + n_roll = int(meta['timezone'] * self.time_interval) + temp = np.roll(temp, n_roll, axis=0) + data_dict['data'] = temp.tolist() + + data_dict['lat'] = meta['latitude'] + data_dict['lon'] = meta['longitude'] + data_dict['tz'] = meta['timezone'] + data_dict['elev'] = meta['elevation'] + + time_index = self.ensure_res_len(time_index, time_index) + data_dict['minute'] = time_index.minute + data_dict['hour'] = time_index.hour + data_dict['year'] = time_index.year + data_dict['month'] = time_index.month + data_dict['day'] = time_index.day + + # add resource data to self.data and clear + self['wind_resource_data'] = data_dict + self['wind_resource_model_choice'] = 0
+ +
[docs] @staticmethod + def default(): + """Get the executed default pysam WindPower object. + + Returns + ------- + PySAM.Windpower + """ + return DefaultWindPower.default()
+ + +# pylint: disable=too-many-ancestors +
[docs]class WindPowerPD(AbstractSamGeneration, PowerCurveLossesMixin): + """WindPower analysis with wind speed/direction joint probabilty + distrubtion input""" + + MODULE = 'windpower' + PYSAM = PySamWindPower + + def __init__(self, ws_edges, wd_edges, wind_dist, + meta, sam_sys_inputs, + site_sys_inputs=None, output_request=None): + """Initialize a SAM generation object for windpower with a + speed/direction joint probability distribution. + + Parameters + ---------- + ws_edges : np.ndarray + 1D array of windspeed (m/s) values that set the bin edges for the + wind probability distribution. Same len as wind_dist.shape[0] + 1 + wd_edges : np.ndarray + 1D array of winddirections (deg) values that set the bin edges + for the wind probability dist. Same len as wind_dist.shape[1] + 1 + wind_dist : np.ndarray + 2D array probability distribution of (windspeed, winddirection). + meta : pd.DataFrame | pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + sam_sys_inputs : dict + Site-agnostic SAM system model inputs arguments. + site_sys_inputs : dict + Optional set of site-specific SAM system inputs to complement the + site-agnostic inputs. + output_request : list + Requested SAM outputs (e.g., 'cf_mean', 'annual_energy', + 'cf_profile', 'gen_profile', 'energy_yield', 'ppa_price', + 'lcoe_fcr'). + """ + + # make sure timezone and elevation are in the meta data + meta = self.tz_elev_check(sam_sys_inputs, site_sys_inputs, meta) + + # don't pass resource to base class, + # set in concrete generation classes instead + super().__init__(None, meta, sam_sys_inputs, + site_sys_inputs=site_sys_inputs, + output_request=output_request, + drop_leap=False) + + # Set the site number using meta data + if hasattr(meta, 'name'): + self._site = meta.name + else: + self._site = None + + self.set_resource_data(ws_edges, wd_edges, wind_dist) + self.add_power_curve_losses() + +
[docs] def set_resource_data(self, ws_edges, wd_edges, wind_dist): + """Send wind PD to pysam + + Parameters + ---------- + ws_edges : np.ndarray + 1D array of windspeed (m/s) values that set the bin edges for the + wind probability distribution. Same len as wind_dist.shape[0] + 1 + wd_edges : np.ndarray + 1D array of winddirections (deg) values that set the bin edges + for the wind probability dist. Same len as wind_dist.shape[1] + 1 + wind_dist : np.ndarray + 2D array probability distribution of (windspeed, winddirection). + """ + + assert len(ws_edges) == wind_dist.shape[0] + 1 + assert len(wd_edges) == wind_dist.shape[1] + 1 + + wind_dist /= wind_dist.sum() + + # SAM wants the midpoints of the sample bins + ws_points = ws_edges[:-1] + np.diff(ws_edges) / 2 + wd_points = wd_edges[:-1] + np.diff(wd_edges) / 2 + + wd_points, ws_points = np.meshgrid(wd_points, ws_points) + vstack = (ws_points.flatten(), + wd_points.flatten(), + wind_dist.flatten()) + wrd = np.vstack(vstack).T.tolist() + + self['wind_resource_model_choice'] = 2 + self['wind_resource_distribution'] = wrd
+ + +
[docs]class MhkWave(AbstractSamGeneration): + """Class for Wave generation from SAM + """ + MODULE = 'mhkwave' + PYSAM = PySamMhkWave + +
[docs] def set_resource_data(self, resource, meta): + """Set Hindcast US Wave resource data arrays. + + Parameters + ---------- + resource : pd.DataFrame + Timeseries resource data for a single location with a + pandas DatetimeIndex. There must be columns for all the required + variables to run the respective SAM simulation. + meta : pd.Series + Meta data corresponding to the resource input for the single + location. Should include values for latitude, longitude, elevation, + and timezone. + """ + + # map resource data names to SAM required data names + var_map = {'significantwaveheight': 'significant_wave_height', + 'waveheight': 'significant_wave_height', + 'height': 'significant_wave_height', + 'swh': 'significant_wave_height', + 'energyperiod': 'energy_period', + 'waveperiod': 'energy_period', + 'period': 'energy_period', + 'ep': 'energy_period', + } + lower_case = {k: k.lower().replace(' ', '').replace('_', '') + for k in resource.columns} + resource = resource.rename(mapper=lower_case, axis='columns') + resource = resource.rename(mapper=var_map, axis='columns') + + data_dict = {} + + time_index = resource.index + self.time_interval = self.get_time_interval(resource.index.values) + + # must be set as matrix in [temperature, pres, speed, direction] order + # ensure that resource array length is multiple of 8760 + # roll the truncated resource array to local timezone + for var in ['significant_wave_height', 'energy_period']: + arr = self.ensure_res_len(resource[var].values, time_index) + n_roll = int(meta['timezone'] * self.time_interval) + data_dict[var] = np.roll(arr, n_roll, axis=0).tolist() + + data_dict['lat'] = meta['latitude'] + data_dict['lon'] = meta['longitude'] + data_dict['tz'] = meta['timezone'] + + time_index = self.ensure_res_len(time_index, time_index) + data_dict['minute'] = time_index.minute + data_dict['hour'] = time_index.hour + data_dict['year'] = time_index.year + data_dict['month'] = time_index.month + data_dict['day'] = time_index.day + + # add resource data to self.data and clear + self['wave_resource_data'] = data_dict
+ +
[docs] @staticmethod + def default(): + """Get the executed default PySAM MhkWave object. + + Returns + ------- + PySAM.MhkWave + """ + return DefaultMhkWave.default()
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/SAM/version_checker.html b/_modules/reV/SAM/version_checker.html new file mode 100644 index 000000000..8c317013f --- /dev/null +++ b/_modules/reV/SAM/version_checker.html @@ -0,0 +1,751 @@ + + + + + + reV.SAM.version_checker — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.SAM.version_checker

+# -*- coding: utf-8 -*-
+"""Module to check PySAM versions and correct input keys to new SAM 2 keys.
+
+Created on Mon Feb  3 14:40:42 2020
+
+@author: gbuster
+"""
+import logging
+from warnings import warn
+from pkg_resources import get_distribution
+from packaging import version
+from reV.utilities.exceptions import PySAMVersionError, PySAMVersionWarning
+
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class PySamVersionChecker: + """Check the PySAM version and modify input keys if required.""" + + WIND = {'wind_farm_losses_percent': 'turb_generic_loss'} + V2_CORRECTION_KEYS = {'windpower': WIND} + + def __init__(self, requirement='2'): + """ + Parameters + ---------- + requirement : str + PySAM version requirement. + """ + self._requirement = requirement + self._check_version() + + def _check_version(self, exception=True): + """Check the PySAM version and raise exception or warning.""" + check = (version.parse(self.pysam_version) + < version.parse(self._requirement)) + if check: + m = ('Bad PySAM version "{}". Requires: "{}".' + .format(self.pysam_version, self._requirement)) + if exception: + logger.error(m) + raise PySAMVersionError(m) + else: + logger.warning(m) + warn(m, PySAMVersionWarning) + + def _check_inputs(self, tech, parameters): + """Check PySAM inputs and modify keys to reflect different + PySAM versions. Currently set to only correct inputs for PySAM v2. + + Parameters + ---------- + tech : str + reV-SAM technology string and key to the V2_CORRECTION_KEYS dict + parameters : dict + SAM input dictionary. + + Returns + ------- + parameters : dict + Updated input parameters dictionary + """ + + if version.parse(self.pysam_version) >= version.parse('2'): + parameters = self._check_inputs_v2(tech, parameters) + + return parameters + + def _check_inputs_v2(self, tech, parameters): + """Check PySAM inputs and modify keys to reflect PySAM 2. + + Parameters + ---------- + tech : str + reV-SAM technology string and key to the V2_CORRECTION_KEYS dict + parameters : dict + SAM input dictionary. Will be checked for valid keys if + PySAM version > 2. + + Returns + ------- + parameters : dict + Updated input parameters dictionary + """ + + corrections = None + for key, value in self.V2_CORRECTION_KEYS.items(): + if key in tech: + corrections = value + break + + if corrections is not None: + for key in corrections: + if key in parameters: + new_key = corrections[key] + parameters[new_key] = parameters.pop(key) + m = ('It appears old SAM v1 keys are being used. ' + 'Updated key "{}" to "{}".'.format(key, new_key)) + logger.warning(m) + warn(m, PySAMVersionWarning) + + return parameters + + @property + def pysam_version(self): + """Get the PySAM distribution version""" + return str(get_distribution('nrel-pysam')).split(' ')[1] + +
[docs] @classmethod + def run(cls, tech, parameters): + """Run PySAM version and inputs checker and modify keys to reflect + PySAM 2 updates. + + Parameters + ---------- + tech : str + reV-SAM technology string and key to the V2_CORRECTION_KEYS dict + parameters : dict + SAM input dictionary. Will be checked for valid keys if + PySAM version > 2. + + Returns + ------- + parameters : dict + Updated input parameters dictionary + """ + x = cls() + parameters = x._check_inputs(tech, parameters) + return parameters
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/SAM/windbos.html b/_modules/reV/SAM/windbos.html new file mode 100644 index 000000000..23b5b4c27 --- /dev/null +++ b/_modules/reV/SAM/windbos.html @@ -0,0 +1,843 @@ + + + + + + reV.SAM.windbos — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.SAM.windbos

+# -*- coding: utf-8 -*-
+"""
+SAM Wind Balance of System Cost Model
+"""
+from copy import deepcopy
+import numpy as np
+from PySAM.PySSC import ssc_sim_from_dict
+
+from reV.utilities.exceptions import SAMInputError
+
+
+
[docs]class WindBos: + """Wind Balance of System Cost Model.""" + + MODULE = 'windbos' + + # keys for the windbos input data dictionary. + # Some keys may not be found explicitly in the SAM input. + KEYS = ('tech_model', + 'financial_model', + 'machine_rating', + 'rotor_diameter', + 'hub_height', + 'number_of_turbines', + 'interconnect_voltage', + 'distance_to_interconnect', + 'site_terrain', + 'turbine_layout', + 'soil_condition', + 'construction_time', + 'om_building_size', + 'quantity_test_met_towers', + 'quantity_permanent_met_towers', + 'weather_delay_days', + 'crane_breakdowns', + 'access_road_entrances', + 'turbine_capital_cost', + 'turbine_cost_per_kw', + 'tower_top_mass', + 'delivery_assist_required', + 'pad_mount_transformer_required', + 'new_switchyard_required', + 'rock_trenching_required', + 'mv_thermal_backfill', + 'mv_overhead_collector', + 'performance_bond', + 'contingency', + 'warranty_management', + 'sales_and_use_tax', + 'overhead', + 'profit_margin', + 'development_fee', + 'turbine_transportation') + + def __init__(self, inputs): + """ + Parameters + ---------- + inputs : dict + SAM key value pair inputs. + """ + + self._turbine_capital_cost = 0.0 + self._datadict = {} + + self._inputs = inputs + self._special = {'tech_model': 'windbos', + 'financial_model': 'none', + 'machine_rating': self.machine_rating, + 'hub_height': self.hub_height, + 'rotor_diameter': self.rotor_diameter, + 'number_of_turbines': self.number_of_turbines, + 'turbine_capital_cost': self.turbine_capital_cost, + } + self._parse_inputs() + self._out = ssc_sim_from_dict(self._datadict) + + def _parse_inputs(self): + """Parse SAM inputs into a windbos input dict and perform any + required special operations.""" + + for k in self.KEYS: + if k in self._special: + self._datadict[k] = self._special[k] + elif k not in self._inputs: + raise SAMInputError('Windbos requires input key: "{}"' + .format(k)) + else: + self._datadict[k] = self._inputs[k] + + @property + def machine_rating(self): + """Single turbine machine rating either from input or power curve.""" + if 'machine_rating' in self._inputs: + return self._inputs['machine_rating'] + else: + return np.max(self._inputs['wind_turbine_powercurve_powerout']) + + @property + def hub_height(self): + """Turbine hub height.""" + if 'wind_turbine_hub_ht' in self._inputs: + return self._inputs['wind_turbine_hub_ht'] + else: + return self._inputs['hub_height'] + + @property + def rotor_diameter(self): + """Turbine rotor diameter.""" + if 'wind_turbine_rotor_diameter' in self._inputs: + return self._inputs['wind_turbine_rotor_diameter'] + else: + return self._inputs['rotor_diameter'] + + @property + def number_of_turbines(self): + """Number of turbines either based on input or system (farm) capacity + and machine rating""" + + if 'number_of_turbines' in self._inputs: + return self._inputs['number_of_turbines'] + else: + return self._inputs['system_capacity'] / self.machine_rating + + @property + def turbine_capital_cost(self): + """Returns zero (no turbine capital cost for WindBOS input, + and assigns any input turbine_capital_cost to an attr""" + + if 'turbine_capital_cost' in self._inputs: + self._turbine_capital_cost = self._inputs['turbine_capital_cost'] + else: + self._turbine_capital_cost = 0.0 + return 0.0 + + @property + def bos_cost(self): + """Get the balance of system cost ($).""" + return self._out['project_total_budgeted_cost'] + + @property + def turbine_cost(self): + """Get the turbine cost ($).""" + tcost = ((self._inputs['turbine_cost_per_kw'] + * self.machine_rating + * self.number_of_turbines) + + (self._turbine_capital_cost + * self.number_of_turbines)) + return tcost + + @property + def sales_tax_mult(self): + """Get a sales tax multiplier (frac of the total installed cost).""" + basis = self._inputs.get('sales_tax_basis', 0) / 100 + tax = self._datadict.get('sales_and_use_tax', 0) / 100 + return basis * tax + + @property + def sales_tax_cost(self): + """Get the cost of sales tax ($).""" + return (self.bos_cost + self.turbine_cost) * self.sales_tax_mult + + @property + def total_installed_cost(self): + """Get the total installed cost ($) (bos + turbine).""" + return self.bos_cost + self.turbine_cost + self.sales_tax_cost + + @property + def output(self): + """Get a dictionary containing the cost breakdown.""" + output = {'total_installed_cost': self.total_installed_cost, + 'turbine_cost': self.turbine_cost, + 'sales_tax_cost': self.sales_tax_cost, + 'bos_cost': self.bos_cost} + return output + + # pylint: disable-msg=W0613 +
[docs] @classmethod + def reV_run(cls, points_control, site_df, + output_request=('total_installed_cost',), **kwargs): + """Execute SAM SingleOwner simulations based on reV points control. + + Parameters + ---------- + points_control : config.PointsControl + PointsControl instance containing project points site and SAM + config info. + site_df : pd.DataFrame + Dataframe of site-specific input variables. Row index corresponds + to site number/gid (via df.loc not df.iloc), column labels are the + variable keys that will be passed forward as SAM parameters. + output_request : list | tuple | str + Output(s) to retrieve from SAM. + kwargs : dict + Not used but maintained for polymorphic calls with other + SAM econ reV_run() methods (lcoe and single owner). + Breaks pylint error W0613: unused argument. + + Returns + ------- + out : dict + Nested dictionaries where the top level key is the site index, + the second level key is the variable name, second level value is + the output variable value. + """ + out = {} + + for site in points_control.sites: + # get SAM inputs from project_points based on the current site + _, inputs = points_control.project_points[site] + + # ensure that site-specific data is not persisted to other sites + site_inputs = deepcopy(inputs) + + site_inputs.update(dict(site_df.loc[site, :])) + + wb = cls(site_inputs) + + out[site] = {k: v for k, v in wb.output.items() + if k in output_request} + + return out
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/bespoke/bespoke.html b/_modules/reV/bespoke/bespoke.html new file mode 100644 index 000000000..c35dd3c8e --- /dev/null +++ b/_modules/reV/bespoke/bespoke.html @@ -0,0 +1,2930 @@ + + + + + + reV.bespoke.bespoke — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.bespoke.bespoke

+# -*- coding: utf-8 -*-
+"""
+reV bespoke wind plant analysis tools
+"""
+# TODO update docstring
+# TODO check on outputs
+import time
+import logging
+import copy
+import pandas as pd
+import numpy as np
+import os
+import json
+import psutil
+from importlib import import_module
+from numbers import Number
+from concurrent.futures import as_completed
+from warnings import warn
+
+from reV.config.output_request import SAMOutputRequest
+from reV.generation.generation import Gen
+from reV.SAM.generation import WindPower, WindPowerPD
+from reV.econ.utilities import lcoe_fcr
+from reV.handlers.outputs import Outputs
+from reV.handlers.exclusions import ExclusionLayers
+from reV.supply_curve.extent import SupplyCurveExtent
+from reV.supply_curve.points import AggregationSupplyCurvePoint as AggSCPoint
+from reV.supply_curve.points import SupplyCurvePoint
+from reV.supply_curve.aggregation import BaseAggregation, AggFileHandler
+from reV.utilities.exceptions import (EmptySupplyCurvePointError,
+                                      FileInputError)
+from reV.utilities import log_versions, ModuleName
+
+from rex.joint_pd.joint_pd import JointPD
+from rex.renewable_resource import WindResource
+from rex.multi_year_resource import MultiYearWindResource
+from rex.utilities.loggers import log_mem, create_dirs
+from rex.utilities.utilities import parse_year
+from rex.utilities.execution import SpawnProcessPool
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class BespokeMultiPlantData: + """Multi-plant preloaded data. + + This object is intended to facilitate the use of pre-loaded data for + running :class:`BespokeWindPlants` on systems with slow parallel + reads to a single HDF5 file. + """ + + def __init__(self, res_fpath, sc_gid_to_hh, sc_gid_to_res_gid): + """Initialize BespokeMultiPlantData + + Parameters + ---------- + res_fpath : str + Path to resource h5 file. + sc_gid_to_hh : dict + Dictionary mapping SC GID values to hub-heights. Data for + each SC GID will be pulled for the corresponding hub-height + given in this dictionary. + sc_gid_to_res_gid : dict + Dictionary mapping SC GID values to an iterable oif resource + GID values. Resource GID values should correspond to GID + values in teh HDF5 file, so any GID map must be applied + before initializing :class`BespokeMultiPlantData`. + """ + self.res_fpath = res_fpath + self.sc_gid_to_hh = sc_gid_to_hh + self.sc_gid_to_res_gid = sc_gid_to_res_gid + self.hh_to_res_gids = {} + self._wind_dirs = None + self._wind_speeds = None + self._temps = None + self._pressures = None + self._time_index = None + self._pre_load_data() + + def _pre_load_data(self): + """Pre-load the resource data. """ + + for sc_gid, gids in self.sc_gid_to_res_gid.items(): + hh = self.sc_gid_to_hh[sc_gid] + self.hh_to_res_gids.setdefault(hh, set()).update(gids) + + self.hh_to_res_gids = {hh: sorted(gids) + for hh, gids in self.hh_to_res_gids.items()} + + start_time = time.time() + if '*' in self.res_fpath: + handler = MultiYearWindResource + else: + handler = WindResource + + with handler(self.res_fpath) as res: + self._wind_dirs = {hh: res[f"winddirection_{hh}m", :, gids] + for hh, gids in self.hh_to_res_gids.items()} + self._wind_speeds = {hh: res[f"windspeed_{hh}m", :, gids] + for hh, gids in self.hh_to_res_gids.items()} + self._temps = {hh: res[f"temperature_{hh}m", :, gids] + for hh, gids in self.hh_to_res_gids.items()} + self._pressures = {hh: res[f"pressure_{hh}m", :, gids] + for hh, gids in self.hh_to_res_gids.items()} + self._time_index = res.time_index + + logger.debug(f"Data took {(time.time() - start_time) / 60:.2f} " + f"min to load") + +
[docs] def get_preloaded_data_for_gid(self, sc_gid): + """Get the pre-loaded data for a single SC GID. + + Parameters + ---------- + sc_gid : int + SC GID to load resource data for. + + Returns + ------- + BespokeSinglePlantData + A loaded ``BespokeSinglePlantData`` object that can act as + an HDF5 handler stand-in *for this SC GID only*. + """ + hh = self.sc_gid_to_hh[sc_gid] + sc_point_res_gids = sorted(self.sc_gid_to_res_gid[sc_gid]) + data_inds = np.searchsorted(self.hh_to_res_gids[hh], sc_point_res_gids) + return BespokeSinglePlantData(sc_point_res_gids, + self._wind_dirs[hh][:, data_inds], + self._wind_speeds[hh][:, data_inds], + self._temps[hh][:, data_inds], + self._pressures[hh][:, data_inds], + self._time_index)
+ + +
[docs]class BespokeSinglePlantData: + """Single-plant preloaded data. + + This object is intended to facilitate the use of pre-loaded data for + running :class:`BespokeSinglePlant` on systems with slow parallel + reads to a single HDF5 file. + """ + + def __init__(self, data_inds, wind_dirs, wind_speeds, temps, pressures, + time_index): + """Initialize BespokeSinglePlantData + + Parameters + ---------- + data_inds : 1D np.array + Array of res GIDs. This array should be the same length as + the second dimension of `wind_dirs`, `wind_speeds`, `temps`, + and `pressures`. The GID value of data_inds[0] should + correspond to the `wind_dirs[:, 0]` data, etc. + wind_dirs, wind_speeds, temps, pressures : 2D np.array + Array of wind directions, wind speeds, temperatures, and + pressures, respectively. Dimensions should be correspond to + [time, location]. See documentation for `data_inds` for + required spatial mapping of GID values. + time_index : 1D np.array + Time index array corresponding to the temporal dimension of + the 2D data. Will be exposed directly to user. + + """ + self.data_inds = data_inds + self.wind_dirs = wind_dirs + self.wind_speeds = wind_speeds + self.temps = temps + self.pressures = pressures + self.time_index = time_index + + def __getitem__(self, key): + dset_name, t_idx, gids = key + data_inds = np.searchsorted(self.data_inds, gids) + if "winddirection" in dset_name: + return self.wind_dirs[t_idx, data_inds] + if "windspeed" in dset_name: + return self.wind_speeds[t_idx, data_inds] + if "temperature" in dset_name: + return self.temps[t_idx, data_inds] + if "pressure" in dset_name: + return self.pressures[t_idx, data_inds] + msg = f"Unknown dataset name: {dset_name!r}" + logger.error(msg) + raise ValueError(msg)
+ + +
[docs]class BespokeSinglePlant: + """Framework for analyzing and optimized a wind plant layout specific to + the local wind resource and exclusions for a single reV supply curve point. + """ + + DEPENDENCIES = ('shapely',) + OUT_ATTRS = copy.deepcopy(Gen.OUT_ATTRS) + + def __init__(self, gid, excl, res, tm_dset, sam_sys_inputs, + objective_function, capital_cost_function, + fixed_operating_cost_function, + variable_operating_cost_function, + min_spacing='5x', wake_loss_multiplier=1, ga_kwargs=None, + output_request=('system_capacity', 'cf_mean'), + ws_bins=(0.0, 20.0, 5.0), wd_bins=(0.0, 360.0, 45.0), + excl_dict=None, inclusion_mask=None, data_layers=None, + resolution=64, excl_area=None, exclusion_shape=None, + eos_mult_baseline_cap_mw=200, prior_meta=None, gid_map=None, + bias_correct=None, pre_loaded_data=None, close=True): + """ + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + excl : str | ExclusionMask + Filepath to exclusions h5 or ExclusionMask file handler. + res : str | Resource + Filepath to .h5 wind resource file or pre-initialized Resource + handler + tm_dset : str + Dataset name in the exclusions file containing the + exclusions-to-resource mapping data. + sam_sys_inputs : dict + SAM windpower compute module system inputs not including the + wind resource data. + objective_function : str + The objective function of the optimization as a string, should + return the objective to be minimized during layout optimization. + Variables available are: + + - n_turbines: the number of turbines + - system_capacity: wind plant capacity + - aep: annual energy production + - fixed_charge_rate: user input fixed_charge_rate if included + as part of the sam system config. + - self.wind_plant: the SAM wind plant object, through which + all SAM variables can be accessed + - capital_cost: plant capital cost as evaluated + by `capital_cost_function` + - fixed_operating_cost: plant fixed annual operating cost as + evaluated by `fixed_operating_cost_function` + - variable_operating_cost: plant variable annual operating cost + as evaluated by `variable_operating_cost_function` + + capital_cost_function : str + The plant capital cost function as a string, must return the total + capital cost in $. Has access to the same variables as the + objective_function. + fixed_operating_cost_function : str + The plant annual fixed operating cost function as a string, must + return the fixed operating cost in $/year. Has access to the same + variables as the objective_function. + variable_operating_cost_function : str + The plant annual variable operating cost function as a string, must + return the variable operating cost in $/kWh. Has access to the same + variables as the objective_function. + min_spacing : float | int | str + Minimum spacing between turbines in meters. Can also be a string + like "5x" (default) which is interpreted as 5 times the turbine + rotor diameter. + wake_loss_multiplier : float, optional + A multiplier used to scale the annual energy lost due to + wake losses. + .. WARNING:: This multiplier will ONLY be applied during the + optimization process and will NOT be come through in output + values such as the hourly profiles, + aep, any of the cost functions, or even the output objective. + ga_kwargs : dict | None + Dictionary of keyword arguments to pass to GA initialization. + If `None`, default initialization values are used. + See :class:`~reV.bespoke.gradient_free.GeneticAlgorithm` for + a description of the allowed keyword arguments. + output_request : list | tuple + Outputs requested from the SAM windpower simulation after the + bespoke plant layout optimization. Can also request resource means + like ws_mean, windspeed_mean, temperature_mean, pressure_mean. + ws_bins : tuple + 3-entry tuple with (start, stop, step) for the windspeed binning of + the wind joint probability distribution. The stop value is + inclusive, so ws_bins=(0, 20, 5) would result in four bins with bin + edges (0, 5, 10, 15, 20). + wd_bins : tuple + 3-entry tuple with (start, stop, step) for the winddirection + binning of the wind joint probability distribution. The stop value + is inclusive, so ws_bins=(0, 360, 90) would result in four bins + with bin edges (0, 90, 180, 270, 360). + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + None if excl input is pre-initialized. + inclusion_mask : np.ndarray + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. The shape of this will be checked against the input + resolution. + data_layers : None | dict + Aggregation data layers. Must be a dictionary keyed by data label + name. Each value must be another dictionary with "dset", "method", + and "fpath". + resolution : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + excl_area : float | None, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + exclusion_shape : tuple + Shape of the full exclusions extent (rows, cols). Inputing this + will speed things up considerably. + eos_mult_baseline_cap_mw : int | float, optional + Baseline plant capacity (MW) used to calculate economies of + scale (EOS) multiplier from the `capital_cost_function`. EOS + multiplier is calculated as the $-per-kW of the wind plant + divided by the $-per-kW of a plant with this baseline + capacity. By default, `200` (MW), which aligns the baseline + with ATB assumptions. See here: https://tinyurl.com/y85hnu6h. + prior_meta : pd.DataFrame | None + Optional meta dataframe belonging to a prior run. This will only + run the timeseries power generation step and assume that all of the + wind plant layouts are fixed given the prior run. The meta data + needs columns "capacity", "turbine_x_coords", and + "turbine_y_coords". + gid_map : None | str | dict + Mapping of unique integer generation gids (keys) to single integer + resource gids (values). This can be None, a pre-extracted dict, or + a filepath to json or csv. If this is a csv, it must have the + columns "gid" (which matches the techmap) and "gid_map" (gids to + extract from the resource input). This is useful if you're running + forecasted resource data (e.g., ECMWF) to complement historical + meteorology (e.g., WTK). + bias_correct : str | pd.DataFrame | None + Optional DataFrame or csv filepath to a wind bias correction table. + This has columns: gid (can be index name), adder, scalar. If both + adder and scalar are present, the wind is corrected by + (res*scalar)+adder. If either is not present, scalar defaults to 1 + and adder to 0. Only windspeed is corrected. Note that if gid_map + is provided, the bias_correct gid corresponds to the actual + resource data gid and not the techmap gid. + pre_loaded_data : BespokeSinglePlantData, optional + A pre-loaded :class:`BespokeSinglePlantData` object, or + ``None``. Can be useful to speed up execution on file + systems with slow parallel reads. + close : bool + Flag to close object file handlers on exit. + """ + + logger.debug('Initializing BespokeSinglePlant for gid {}...' + .format(gid)) + logger.debug('Resource filepath: {}'.format(res)) + logger.debug('Exclusion filepath: {}'.format(excl)) + logger.debug('Exclusion dict: {}'.format(excl_dict)) + logger.debug('Bespoke objective function: {}' + .format(objective_function)) + logger.debug('Bespoke cost function: {}'.format(objective_function)) + logger.debug('Bespoke wake loss multiplier: {}' + .format(wake_loss_multiplier)) + logger.debug('Bespoke GA initialization kwargs: {}'.format(ga_kwargs)) + logger.debug('Bespoke EOS multiplier baseline capacity: {:,} MW' + .format(eos_mult_baseline_cap_mw)) + + if isinstance(min_spacing, str) and min_spacing.endswith('x'): + rotor_diameter = sam_sys_inputs["wind_turbine_rotor_diameter"] + min_spacing = float(min_spacing.strip('x')) * rotor_diameter + + if not isinstance(min_spacing, (int, float)): + try: + min_spacing = float(min_spacing) + except Exception as e: + msg = ('min_spacing must be numeric but received: {}, {}' + .format(min_spacing, type(min_spacing))) + logger.error(msg) + raise TypeError(msg) from e + + self.objective_function = objective_function + self.capital_cost_function = capital_cost_function + self.fixed_operating_cost_function = fixed_operating_cost_function + self.variable_operating_cost_function = \ + variable_operating_cost_function + self.min_spacing = min_spacing + self.wake_loss_multiplier = wake_loss_multiplier + self.ga_kwargs = ga_kwargs or {} + + self._sam_sys_inputs = sam_sys_inputs + self._out_req = list(output_request) + self._ws_bins = ws_bins + self._wd_bins = wd_bins + self._baseline_cap_mw = eos_mult_baseline_cap_mw + + self._res_df = None + self._prior_meta = prior_meta is not None + self._meta = prior_meta + self._wind_dist = None + self._ws_edges = None + self._wd_edges = None + self._wind_plant_pd = None + self._wind_plant_ts = None + self._plant_optm = None + self._gid_map = self._parse_gid_map(gid_map) + self._bias_correct = Gen._parse_bc(bias_correct) + self._pre_loaded_data = pre_loaded_data + self._outputs = {} + + Handler = self.get_wind_handler(res) + res = res if not isinstance(res, str) else Handler(res) + + self._sc_point = AggSCPoint(gid, excl, res, tm_dset, + excl_dict=excl_dict, + inclusion_mask=inclusion_mask, + resolution=resolution, + excl_area=excl_area, + exclusion_shape=exclusion_shape, + close=close) + + self._parse_output_req() + self._data_layers = data_layers + self._parse_prior_run() + + def __str__(self): + s = ('BespokeSinglePlant for reV SC gid {} with resolution {}' + .format(self.sc_point.gid, self.sc_point.resolution)) + return s + + def __repr__(self): + s = ('BespokeSinglePlant for reV SC gid {} with resolution {}' + .format(self.sc_point.gid, self.sc_point.resolution)) + return s + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + if type is not None: + raise + + def _parse_output_req(self): + """Make sure that the output request has basic important parameters + (cf_mean, annual_energy) and process mean wind resource datasets + (ws_mean, *_mean) if requested. + """ + + required = ('cf_mean', 'annual_energy') + for req in required: + if req not in self._out_req: + self._out_req.append(req) + + if 'ws_mean' in self._out_req: + self._out_req.remove('ws_mean') + self._outputs['ws_mean'] = self.res_df['windspeed'].mean() + + for req in copy.deepcopy(self._out_req): + if req in self.res_df: + self._out_req.remove(req) + for annual_ti in self.annual_time_indexes: + year = annual_ti.year[0] + mask = self.res_df.index.isin(annual_ti) + arr = self.res_df.loc[mask, req].values.flatten() + self._outputs[req + f'-{year}'] = arr + + elif req.replace('_mean', '') in self.res_df: + self._out_req.remove(req) + dset = req.replace('_mean', '') + self._outputs[req] = self.res_df[dset].mean() + + if ('lcoe_fcr' in self._out_req + and 'fixed_charge_rate' not in self.original_sam_sys_inputs): + msg = ('User requested "lcoe_fcr" but did not input ' + '"fixed_charge_rate" in the SAM system config.') + logger.error(msg) + raise KeyError(msg) + + def _parse_prior_run(self): + """Parse prior bespoke wind plant optimization run meta data and make + sure the SAM system inputs are set accordingly.""" + + # {meta_column: sam_sys_input_key} + required = {'capacity': 'system_capacity', + 'turbine_x_coords': 'wind_farm_xCoordinates', + 'turbine_y_coords': 'wind_farm_yCoordinates'} + + if self._prior_meta: + missing = [k for k in required if k not in self.meta] + msg = ('Prior bespoke run meta data is missing the following ' + 'required columns: {}'.format(missing)) + assert not any(missing), msg + + for meta_col, sam_sys_key in required.items(): + prior_value = self.meta[meta_col].values[0] + self._sam_sys_inputs[sam_sys_key] = prior_value + + # convert reV supply curve cap in MW to SAM capacity in kW + self._sam_sys_inputs['system_capacity'] *= 1e3 + + @staticmethod + def _parse_gid_map(gid_map): + """Parse the gid map and return the extracted dictionary or None if not + provided + + Parameters + ---------- + gid_map : None | str | dict + Mapping of unique integer generation gids (keys) to single integer + resource gids (values). This can be None, a pre-extracted dict, or + a filepath to json or csv. If this is a csv, it must have the + columns "gid" (which matches the techmap) and "gid_map" (gids to + extract from the resource input). This is useful if you're running + forecasted resource data (e.g., ECMWF) to complement historical + meteorology (e.g., WTK). + + Returns + ------- + gid_map : dict | None + Pre-extracted gid_map dictionary if provided or None if not. + """ + + if isinstance(gid_map, str): + if gid_map.endswith('.csv'): + gid_map = pd.read_csv(gid_map).to_dict() + assert 'gid' in gid_map, 'Need "gid" in gid_map column' + assert 'gid_map' in gid_map, 'Need "gid_map" in gid_map column' + gid_map = {gid_map['gid'][i]: gid_map['gid_map'][i] + for i in gid_map['gid'].keys()} + + elif gid_map.endswith('.json'): + with open(gid_map, 'r') as f: + gid_map = json.load(f) + + return gid_map + +
[docs] def close(self): + """Close any open file handlers via the sc point attribute. If this + class was initialized with close=False, this will not close any + handlers.""" + self.sc_point.close()
+ +
[docs] def get_weighted_res_ts(self, dset): + """Special method for calculating the exclusion-weighted mean resource + timeseries data for the BespokeSinglePlant. + + Returns + ------- + data : np.ndarray + Timeseries data of shape (n_time,) for the wind plant weighted by + the plant inclusions mask. + """ + gids = self.sc_point.h5_gid_set + h5_gids = copy.deepcopy(gids) + if self._gid_map is not None: + h5_gids = [self._gid_map[g] for g in gids] + + if self._pre_loaded_data is None: + data = self.sc_point.h5[dset, :, h5_gids] + else: + data = self._pre_loaded_data[dset, :, h5_gids] + + if self._bias_correct is not None and dset.startswith('windspeed_'): + missing = [g for g in h5_gids if g not in self._bias_correct.index] + for missing_gid in missing: + self._bias_correct.loc[missing_gid, 'scalar'] = 1 + self._bias_correct.loc[missing_gid, 'adder'] = 0 + + scalar = self._bias_correct.loc[h5_gids, 'scalar'].values + adder = self._bias_correct.loc[h5_gids, 'adder'].values + data = data * scalar + adder + data = np.maximum(data, 0) + + weights = np.zeros(len(gids)) + for i, gid in enumerate(gids): + mask = self.sc_point._h5_gids == gid + weights[i] = self.sc_point.include_mask_flat[mask].sum() + + weights /= weights.sum() + data *= weights + data = np.sum(data, axis=1) + + return data
+ +
[docs] def get_weighted_res_dir(self): + """Special method for calculating the exclusion-weighted mean wind + direction for the BespokeSinglePlant + + Returns + ------- + mean_wind_dirs : np.ndarray + Timeseries array of winddirection data in shape (n_time,) in units + of degrees from north. + """ + + dset = f'winddirection_{self.hub_height}m' + gids = self.sc_point.h5_gid_set + h5_gids = copy.deepcopy(gids) + if self._gid_map is not None: + h5_gids = [self._gid_map[g] for g in gids] + + if self._pre_loaded_data is None: + dirs = self.sc_point.h5[dset, :, h5_gids] + else: + dirs = self._pre_loaded_data[dset, :, h5_gids] + angles = np.radians(dirs, dtype=np.float32) + + weights = np.zeros(len(gids)) + for i, gid in enumerate(gids): + mask = self.sc_point._h5_gids == gid + weights[i] = self.sc_point.include_mask_flat[mask].sum() + + weights /= weights.sum() + sin = np.sum(np.sin(angles) * weights, axis=1) + cos = np.sum(np.cos(angles) * weights, axis=1) + + mean_wind_dirs = np.degrees(np.arctan2(sin, cos)) + mean_wind_dirs[(mean_wind_dirs < 0)] += 360 + + return mean_wind_dirs
+ + @property + def gid(self): + """SC point gid for this bespoke plant. + + Returns + ------- + int + """ + return self.sc_point.gid + + @property + def include_mask(self): + """Get the supply curve point 2D inclusion mask (included is 1, + excluded is 0) + + Returns + ------- + np.ndarray + """ + return self.sc_point.include_mask + + @property + def pixel_side_length(self): + """Get the length of a single exclusion pixel side (meters) + + Returns + ------- + float + """ + return np.sqrt(self.sc_point.pixel_area) * 1000.0 + + @property + def original_sam_sys_inputs(self): + """Get the original (pre-optimized) SAM windpower system inputs. + + Returns + ------- + dict + """ + return self._sam_sys_inputs + + @property + def sam_sys_inputs(self): + """Get the SAM windpower system inputs. If the wind plant has not yet + been optimized, this returns the initial SAM config. If the wind plant + has been optimized using the wind_plant_pd object, this returns the + final optimized SAM plant config. + + Returns + ------- + dict + """ + config = copy.deepcopy(self._sam_sys_inputs) + if self._wind_plant_pd is None: + return config + + config.update(self._wind_plant_pd.sam_sys_inputs) + return config + + @property + def sc_point(self): + """Get the reV supply curve point object. + + Returns + ------- + AggSCPoint + """ + return self._sc_point + + @property + def meta(self): + """Get the basic supply curve point meta data + + Returns + ------- + pd.DataFrame + """ + if self._meta is None: + res_gids = json.dumps([int(g) for g in self.sc_point.h5_gid_set]) + gid_counts = json.dumps([float(np.round(n, 1)) + for n in self.sc_point.gid_counts]) + + with SupplyCurveExtent(self.sc_point._excl_fpath, + resolution=self.sc_point.resolution) as sc: + row_ind, col_ind = sc.get_sc_row_col_ind(self.sc_point.gid) + + self._meta = pd.DataFrame( + {'sc_point_gid': self.sc_point.gid, + 'sc_row_ind': row_ind, + 'sc_col_ind': col_ind, + 'gid': self.sc_point.gid, + 'latitude': self.sc_point.latitude, + 'longitude': self.sc_point.longitude, + 'timezone': self.sc_point.timezone, + 'country': self.sc_point.country, + 'state': self.sc_point.state, + 'county': self.sc_point.county, + 'elevation': self.sc_point.elevation, + 'offshore': self.sc_point.offshore, + 'res_gids': res_gids, + 'gid_counts': gid_counts, + 'n_gids': self.sc_point.n_gids, + 'area_sq_km': self.sc_point.area, + }, index=[self.sc_point.gid]) + + return self._meta + + @property + def hub_height(self): + """Get the integer SAM system config turbine hub height (meters) + + Returns + ------- + int + """ + return int(self.sam_sys_inputs['wind_turbine_hub_ht']) + + @property + def res_df(self): + """Get the reV compliant wind resource dataframe representing the + aggregated and included wind resource in the current reV supply curve + point at the turbine hub height. Includes a DatetimeIndex and columns + for temperature, pressure, windspeed, and winddirection. + + Returns + ------- + pd.DataFrame + """ + if self._res_df is None: + if self._pre_loaded_data is None: + ti = self.sc_point.h5.time_index + else: + ti = self._pre_loaded_data.time_index + + wd = self.get_weighted_res_dir() + ws = self.get_weighted_res_ts(f'windspeed_{self.hub_height}m') + temp = self.get_weighted_res_ts(f'temperature_{self.hub_height}m') + pres = self.get_weighted_res_ts(f'pressure_{self.hub_height}m') + + # convert mbar to atm + if np.nanmax(pres) > 1000: + pres *= 9.86923e-6 + + self._res_df = pd.DataFrame({'temperature': temp, + 'pressure': pres, + 'windspeed': ws, + 'winddirection': wd}, index=ti) + return self._res_df + + @property + def years(self): + """Get the sorted list of analysis years. + + Returns + ------- + list + """ + return sorted(list(self.res_df.index.year.unique())) + + @property + def annual_time_indexes(self): + """Get an ordered list of single-year time index objects that matches + the profile outputs from the wind_plant_ts object. + + Returns + ------- + list + """ + tis = [] + for year in self.years: + ti = self.res_df.index[(self.res_df.index.year == year)] + tis.append(WindPower.ensure_res_len(ti, ti)) + return tis + + @property + def wind_dist(self): + """Get the wind joint probability distribution and corresonding bin + edges + + Returns + ------- + wind_dist : np.ndarray + 2D array probability distribution of (windspeed, winddirection) + normalized so the sum of all values = 1. + ws_edges : np.ndarray + 1D array of windspeed (m/s) values that set the bin edges for the + wind probability distribution. Same len as wind_dist.shape[0] + 1 + wd_edges : np.ndarray + 1D array of winddirections (deg) values that set the bin edges + for the wind probability dist. Same len as wind_dist.shape[1] + 1 + """ + if self._wind_dist is None: + ws_bins = JointPD._make_bins(*self._ws_bins) + wd_bins = JointPD._make_bins(*self._wd_bins) + + hist_out = np.histogram2d(self.res_df['windspeed'], + self.res_df['winddirection'], + bins=(ws_bins, wd_bins)) + self._wind_dist, self._ws_edges, self._wd_edges = hist_out + self._wind_dist /= self._wind_dist.sum() + + return self._wind_dist, self._ws_edges, self._wd_edges + +
[docs] def initialize_wind_plant_ts(self): + """Initialize the annual wind plant timeseries analysis object(s) using + the annual resource data and the sam system inputs from the optimized + plant. + + Returns + ------- + wind_plant_ts : dict + Annual reV.SAM.generation.WindPower object(s) keyed by year. + """ + wind_plant_ts = {} + for year in self.years: + res_df = self.res_df[(self.res_df.index.year == year)] + sam_inputs = copy.deepcopy(self.sam_sys_inputs) + + if 'lcoe_fcr' in self._out_req: + lcoe_kwargs = self.get_lcoe_kwargs() + sam_inputs.update(lcoe_kwargs) + + i_wp = WindPower(res_df, self.meta, sam_inputs, + output_request=self._out_req) + wind_plant_ts[year] = i_wp + + return wind_plant_ts
+ + @property + def wind_plant_pd(self): + """reV WindPowerPD compute object for plant layout optimization based + on wind joint probability distribution + + Returns + ------- + reV.SAM.generation.WindPowerPD + """ + + if self._wind_plant_pd is None: + wind_dist, ws_edges, wd_edges = self.wind_dist + self._wind_plant_pd = WindPowerPD(ws_edges, wd_edges, wind_dist, + self.meta, self.sam_sys_inputs, + output_request=self._out_req) + return self._wind_plant_pd + + @property + def wind_plant_ts(self): + """reV WindPower compute object(s) based on wind resource timeseries + data keyed by year + + Returns + ------- + dict + """ + return self._wind_plant_ts + + @property + def plant_optimizer(self): + """Bespoke plant turbine placement optimizer object. + + Returns + ------- + PlaceTurbines + """ + if self._plant_optm is None: + # put import here to delay breaking due to special dependencies + from reV.bespoke.place_turbines import PlaceTurbines + self._plant_optm = PlaceTurbines( + self.wind_plant_pd, + self.objective_function, + self.capital_cost_function, + self.fixed_operating_cost_function, + self.variable_operating_cost_function, + self.include_mask, + self.pixel_side_length, + self.min_spacing, + self.wake_loss_multiplier) + + return self._plant_optm + +
[docs] def recalc_lcoe(self): + """Recalculate the multi-year mean LCOE based on the multi-year mean + annual energy production (AEP)""" + + if 'lcoe_fcr-means' in self.outputs: + lcoe_kwargs = self.get_lcoe_kwargs() + + logger.debug('Recalulating multi-year mean LCOE using ' + 'multi-year mean AEP.') + + fcr = lcoe_kwargs['fixed_charge_rate'] + cap_cost = lcoe_kwargs['capital_cost'] + foc = lcoe_kwargs['fixed_operating_cost'] + voc = lcoe_kwargs['variable_operating_cost'] + aep = self.outputs['annual_energy-means'] + + my_mean_lcoe = lcoe_fcr(fcr, cap_cost, foc, aep, voc) + + self._outputs['lcoe_fcr-means'] = my_mean_lcoe + self._meta['mean_lcoe'] = my_mean_lcoe
+ +
[docs] def get_lcoe_kwargs(self): + """Get a namespace of arguments for calculating LCOE based on the + bespoke optimized wind plant capacity + + Returns + ------- + lcoe_kwargs : dict + kwargs for the SAM lcoe model. These are based on the original + sam_sys_inputs, normalized to the original system_capacity, and + updated based on the bespoke optimized system_capacity, includes + fixed_charge_rate, system_capacity (kW), capital_cost ($), + fixed_operating_cos ($), variable_operating_cost ($/kWh) + """ + + if 'system_capacity' not in self.outputs: + msg = ('Could not find system_capacity in the outputs, need to ' + 'run_plant_optimization() to get the optimized ' + 'system_capacity before calculating LCOE!') + logger.error(msg) + raise RuntimeError(msg) + + lcoe_kwargs = { + 'fixed_charge_rate': + self.original_sam_sys_inputs['fixed_charge_rate'], + 'system_capacity': self.plant_optimizer.capacity, + 'capital_cost': self.plant_optimizer.capital_cost, + 'fixed_operating_cost': self.plant_optimizer.fixed_operating_cost, + 'variable_operating_cost': + self.plant_optimizer.variable_operating_cost} + + for k, v in lcoe_kwargs.items(): + self._meta[k] = v + + return lcoe_kwargs
+ +
[docs] @staticmethod + def get_wind_handler(res): + """Get a wind resource handler for a resource filepath. + + Parameters + ---------- + res : str + Resource filepath to wtk .h5 file. Can include * wildcards + for multi year resource. + + Returns + ------- + handler : WindResource | MultiYearWindResource + Wind resource handler or multi year handler + """ + handler = res + if isinstance(res, str): + if '*' in res: + handler = MultiYearWindResource + else: + handler = WindResource + return handler
+ +
[docs] @classmethod + def check_dependencies(cls): + """Check special dependencies for bespoke""" + + missing = [] + for name in cls.DEPENDENCIES: + try: + import_module(name) + except ModuleNotFoundError: + missing.append(name) + + if any(missing): + msg = ('The reV bespoke module depends on the following special ' + 'dependencies that were not found in the active ' + 'environment: {}'.format(missing)) + logger.error(msg) + raise ModuleNotFoundError(msg)
+ + @staticmethod + def _check_sys_inputs(plant1, plant2, + ignore=('wind_resource_model_choice', + 'wind_resource_data', + 'wind_turbine_powercurve_powerout', + 'hourly', + 'capital_cost', + 'fixed_operating_cost', + 'variable_operating_cost')): + """Check two reV-SAM models for matching system inputs. + + Parameters + ---------- + plant1/plant2 : reV.SAM.generation.WindPower + Two WindPower analysis objects to check. + """ + bad = [] + for k, v in plant1.sam_sys_inputs.items(): + if k not in plant2.sam_sys_inputs: + bad.append(k) + elif str(v) != str(plant2.sam_sys_inputs[k]): + bad.append(k) + bad = [b for b in bad if b not in ignore] + if any(bad): + msg = 'Inputs no longer match: {}'.format(bad) + logger.error(msg) + raise RuntimeError(msg) + +
[docs] def run_wind_plant_ts(self): + """Run the wind plant multi-year timeseries analysis and export output + requests to outputs property. + + Returns + ------- + outputs : dict + Output dictionary for the full BespokeSinglePlant object. The + multi-year timeseries data is also exported to the + BespokeSinglePlant.outputs property. + """ + + logger.debug('Running {} years of SAM timeseries analysis for {}' + .format(len(self.years), self)) + self._wind_plant_ts = self.initialize_wind_plant_ts() + for year, plant in self.wind_plant_ts.items(): + self._check_sys_inputs(plant, self.wind_plant_pd) + try: + plant.run_gen_and_econ() + except Exception as e: + msg = ('{} failed while trying to run SAM WindPower ' + 'timeseries analysis for {}'.format(self, year)) + logger.exception(msg) + raise RuntimeError(msg) from e + + for k, v in plant.outputs.items(): + self._outputs[k + '-{}'.format(year)] = v + + means = {} + for k1, v1 in self._outputs.items(): + if isinstance(v1, Number) and parse_year(k1, option='boolean'): + year = parse_year(k1) + base_str = k1.replace(str(year), '') + all_values = [v2 for k2, v2 in self._outputs.items() + if base_str in k2] + means[base_str + 'means'] = np.mean(all_values) + + self._outputs.update(means) + + # copy dataset outputs to meta data for supply curve table summary + if 'cf_mean-means' in self.outputs: + self._meta['mean_cf'] = self.outputs['cf_mean-means'] + if 'lcoe_fcr-means' in self.outputs: + self._meta['mean_lcoe'] = self.outputs['lcoe_fcr-means'] + self.recalc_lcoe() + + logger.debug('Timeseries analysis complete!') + + return self.outputs
+ +
[docs] def run_plant_optimization(self): + """Run the wind plant layout optimization and export outputs + to outputs property. + + Returns + ------- + outputs : dict + Output dictionary for the full BespokeSinglePlant object. The + layout optimization output data is also exported to the + BespokeSinglePlant.outputs property. + """ + + logger.debug('Running plant layout optimization for {}'.format(self)) + try: + self.plant_optimizer.place_turbines(**self.ga_kwargs) + except Exception as e: + msg = ('{} failed while trying to run the ' + 'turbine placement optimizer' + .format(self)) + logger.exception(msg) + raise RuntimeError(msg) from e + + # TODO need to add: + # total cell area + # cell capacity density + + txc = [int(np.round(c)) for c in self.plant_optimizer.turbine_x] + tyc = [int(np.round(c)) for c in self.plant_optimizer.turbine_y] + pxc = [int(np.round(c)) for c in self.plant_optimizer.x_locations] + pyc = [int(np.round(c)) for c in self.plant_optimizer.y_locations] + + txc = json.dumps(txc) + tyc = json.dumps(tyc) + pxc = json.dumps(pxc) + pyc = json.dumps(pyc) + + self._meta["turbine_x_coords"] = txc + self._meta["turbine_y_coords"] = tyc + self._meta["possible_x_coords"] = pxc + self._meta["possible_y_coords"] = pyc + + self._outputs["full_polygons"] = self.plant_optimizer.full_polygons + self._outputs["packing_polygons"] = \ + self.plant_optimizer.packing_polygons + self._outputs["system_capacity"] = self.plant_optimizer.capacity + + self._meta["n_turbines"] = self.plant_optimizer.nturbs + self._meta["bespoke_aep"] = self.plant_optimizer.aep + self._meta["bespoke_objective"] = self.plant_optimizer.objective + self._meta["bespoke_capital_cost"] = \ + self.plant_optimizer.capital_cost + self._meta["bespoke_fixed_operating_cost"] = \ + self.plant_optimizer.fixed_operating_cost + self._meta["bespoke_variable_operating_cost"] = \ + self.plant_optimizer.variable_operating_cost + self._meta["included_area"] = self.plant_optimizer.area + self._meta["included_area_capacity_density"] = \ + self.plant_optimizer.capacity_density + self._meta["convex_hull_area"] = \ + self.plant_optimizer.convex_hull_area + self._meta["convex_hull_capacity_density"] = \ + self.plant_optimizer.convex_hull_capacity_density + self._meta["full_cell_capacity_density"] = \ + self.plant_optimizer.full_cell_capacity_density + + logger.debug('Plant layout optimization complete!') + + # copy dataset outputs to meta data for supply curve table summary + # convert SAM system capacity in kW to reV supply curve cap in MW + self._meta['capacity'] = self.outputs['system_capacity'] / 1e3 + + # add required ReEDS multipliers to meta + baseline_cost = self.plant_optimizer.capital_cost_per_kw( + capacity_mw=self._baseline_cap_mw) + self._meta['eos_mult'] = (self.plant_optimizer.capital_cost + / self.plant_optimizer.capacity + / baseline_cost) + self._meta['reg_mult'] = (self.sam_sys_inputs + .get("capital_cost_multiplier", 1)) + + return self.outputs
+ +
[docs] def agg_data_layers(self): + """Aggregate optional data layers if requested and save to self.meta""" + if self._data_layers is not None: + logger.debug('Aggregating {} extra data layers.' + .format(len(self._data_layers))) + point_summary = self.meta.to_dict() + point_summary = self.sc_point.agg_data_layers(point_summary, + self._data_layers) + self._meta = pd.DataFrame(point_summary) + logger.debug('Finished aggregating extra data layers.')
+ + @property + def outputs(self): + """Saved outputs for the single wind plant bespoke optimization. + + Returns + ------- + dict + """ + return self._outputs + +
[docs] @classmethod + def run(cls, *args, **kwargs): + """Run the bespoke optimization for a single wind plant. + + Parameters + ---------- + See the class initialization parameters. + + Returns + ------- + bsp : dict + Bespoke single plant outputs namespace keyed by dataset name + including a dataset "meta" for the BespokeSinglePlant meta data. + """ + + with cls(*args, **kwargs) as bsp: + if bsp._prior_meta: + logger.debug('Skipping bespoke plant optimization for gid {}. ' + 'Received prior meta data for this point.' + .format(bsp.gid)) + else: + _ = bsp.run_plant_optimization() + + _ = bsp.run_wind_plant_ts() + bsp.agg_data_layers() + + meta = bsp.meta + out = bsp.outputs + out['meta'] = meta + for year, ti in zip(bsp.years, bsp.annual_time_indexes): + out['time_index-{}'.format(year)] = ti + + return out
+ + +
[docs]class BespokeWindPlants(BaseAggregation): + """BespokeWindPlants""" + + def __init__(self, excl_fpath, res_fpath, tm_dset, objective_function, + capital_cost_function, fixed_operating_cost_function, + variable_operating_cost_function, project_points, + sam_files, min_spacing='5x', wake_loss_multiplier=1, + ga_kwargs=None, output_request=('system_capacity', 'cf_mean'), + ws_bins=(0.0, 20.0, 5.0), wd_bins=(0.0, 360.0, 45.0), + excl_dict=None, area_filter_kernel='queen', min_area=None, + resolution=64, excl_area=None, data_layers=None, + pre_extract_inclusions=False, prior_run=None, gid_map=None, + bias_correct=None, pre_load_data=False): + """reV bespoke analysis class. + + Much like generation, ``reV`` bespoke analysis runs SAM + simulations by piping in renewable energy resource data (usually + from the WTK), loading the SAM config, and then executing the + :py:class:`PySAM.Windpower.Windpower` compute module. + However, unlike ``reV`` generation, bespoke analysis is + performed on the supply-curve grid resolution, and the plant + layout is optimized for every supply-curve point based on an + optimization objective specified by the user. See the NREL + publication on the bespoke methodology for more information. + + See the documentation for the ``reV`` SAM class (e.g. + :class:`reV.SAM.generation.WindPower`, + :class:`reV.SAM.generation.PvWattsv8`, + :class:`reV.SAM.generation.Geothermal`, etc.) for info on the + allowed and/or required SAM config file inputs. + + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions data HDF5 file. The exclusions HDF5 + file should contain the layers specified in `excl_dict` + and `data_layers`. These layers may also be spread out + across multiple HDF5 files, in which case this input should + be a list or tuple of filepaths pointing to the files + containing the layers. Note that each data layer must be + uniquely defined (i.e.only appear once and in a single + input file). + res_fpath : str + Filepath to wind resource data in NREL WTK format. This + input can be path to a single resource HDF5 file or a path + including a wildcard input like ``/h5_dir/prefix*suffix`` to + run bespoke on multiple years of resource data. The former + must be readable by + :py:class:`rex.renewable_resource.WindResource` while the + latter must be readable by + or :py:class:`rex.multi_year_resource.MultiYearWindResource` + (i.e. the resource data conform to the + `rex data format <https://tinyurl.com/3fy7v5kx>`_). This + means the data file(s) must contain a 1D ``time_index`` + dataset indicating the UTC time of observation, a 1D + ``meta`` dataset represented by a DataFrame with + site-specific columns, and 2D resource datasets that match + the dimensions of (time_index, meta). The time index must + start at 00:00 of January 1st of the year under + consideration, and its shape must be a multiple of 8760. + tm_dset : str + Dataset name in the `excl_fpath` file containing the + techmap (exclusions-to-resource mapping data). This data + layer links the supply curve GID's to the generation GID's + that are used to evaluate the performance metrics of each + wind plant. By default, the generation GID's are assumed to + match the resource GID's, but this mapping can be customized + via the `gid_map` input (see the documentation for `gid_map` + for more details). + + .. Important:: This dataset uniquely couples the (typically + high-resolution) exclusion layers to the (typically + lower-resolution) resource data. Therefore, a separate + techmap must be used for every unique combination of + resource and exclusion coordinates. + + objective_function : str + The objective function of the optimization written out as a + string. This expression should compute the objective to be + minimized during layout optimization. Variables available + for computation are: + + - ``n_turbines``: the number of turbines + - ``system_capacity``: wind plant capacity + - ``aep``: annual energy production + - ``fixed_charge_rate``: user input fixed_charge_rate if + included as part of the sam system config. + - ``self.wind_plant``: the SAM wind plant object, + through which all SAM variables can be accessed + - ``capital_cost``: plant capital cost as evaluated + by `capital_cost_function` + - ``fixed_operating_cost``: plant fixed annual operating + cost as evaluated by `fixed_operating_cost_function` + - ``variable_operating_cost``: plant variable annual + operating cost, as evaluated by + `variable_operating_cost_function` + + capital_cost_function : str + The plant capital cost function written out as a string. + This expression must return the total plant capital cost in + $. This expression has access to the same variables as the + `objective_function` argument above. + fixed_operating_cost_function : str + The plant annual fixed operating cost function written out + as a string. This expression must return the fixed operating + cost in $/year. This expression has access to the same + variables as the `objective_function` argument above. + variable_operating_cost_function : str + The plant annual variable operating cost function written + out as a string. This expression must return the variable + operating cost in $/kWh. This expression has access to the + same variables as the `objective_function` argument above. + project_points : int | list | tuple | str | dict | pd.DataFrame | slice + Input specifying which sites to process. A single integer + representing the supply curve GID of a site may be specified + to evaluate ``reV`` at a supply curve point. A list or tuple + of integers (or slice) representing the supply curve GIDs of + multiple sites can be specified to evaluate ``reV`` at + multiple specific locations. A string pointing to a project + points CSV file may also be specified. Typically, the CSV + contains two columns: + + - ``gid``: Integer specifying the supply curve GID of + each site. + - ``config``: Key in the `sam_files` input dictionary + (see below) corresponding to the SAM configuration to + use for each particular site. This value can also be + ``None`` (or left out completely) if you specify only + a single SAM configuration file as the `sam_files` + input. + + The CSV file may also contain site-specific inputs by + including a column named after a config keyword (e.g. a + column called ``capital_cost`` may be included to specify a + site-specific capital cost value for each location). Columns + that do not correspond to a config key may also be included, + but they will be ignored. The CSV file input can also have + these extra columns: + + - ``capital_cost_multiplier`` + - ``fixed_operating_cost_multiplier`` + - ``variable_operating_cost_multiplier`` + + These particular inputs are treated as multipliers to be + applied to the respective cost curves + (`capital_cost_function`, `fixed_operating_cost_function`, + and `variable_operating_cost_function`) both during and + after the optimization. A DataFrame following the same + guidelines as the CSV input (or a dictionary that can be + used to initialize such a DataFrame) may be used for this + input as well. If you would like to obtain all available + ``reV`` supply curve points to run, you can use the + :class:`reV.supply_curve.extent.SupplyCurveExtent` class + like so:: + + import pandas as pd + from reV.supply_curve.extent import SupplyCurveExtent + + excl_fpath = "..." + resolution = ... + with SupplyCurveExtent(excl_fpath, resolution) as sc: + points = sc.valid_sc_points(tm_dset).tolist() + points = pd.DataFrame({"gid": points}) + points["config"] = "default" # or a list of config choices + + # Use the points directly or save them to csv for CLI usage + points.to_csv("project_points.csv", index=False) + + sam_files : dict | str + A dictionary mapping SAM input configuration ID(s) to SAM + configuration(s). Keys are the SAM config ID(s) which + correspond to the ``config`` column in the project points + CSV. Values for each key are either a path to a + corresponding SAM config file or a full dictionary + of SAM config inputs. For example:: + + sam_files = { + "default": "/path/to/default/sam.json", + "onshore": "/path/to/onshore/sam_config.yaml", + "offshore": { + "sam_key_1": "sam_value_1", + "sam_key_2": "sam_value_2", + ... + }, + ... + } + + This input can also be a string pointing to a single SAM + config file. In this case, the ``config`` column of the + CSV points input should be set to ``None`` or left out + completely. See the documentation for the ``reV`` SAM class + (e.g. :class:`reV.SAM.generation.WindPower`, + :class:`reV.SAM.generation.PvWattsv8`, + :class:`reV.SAM.generation.Geothermal`, etc.) for + info on the allowed and/or required SAM config file inputs. + min_spacing : float | int | str, optional + Minimum spacing between turbines (in meters). This input can + also be a string like "5x", which is interpreted as 5 times + the turbine rotor diameter. By default, ``"5x"``. + wake_loss_multiplier : float, optional + A multiplier used to scale the annual energy lost due to + wake losses. + + .. WARNING:: This multiplier will ONLY be applied during the + optimization process and will NOT come through in output + values such as the hourly profiles, aep, any of the cost + functions, or even the output objective. + + By default, ``1``. + ga_kwargs : dict, optional + Dictionary of keyword arguments to pass to GA + initialization. If ``None``, default initialization values + are used. See + :class:`~reV.bespoke.gradient_free.GeneticAlgorithm` for + a description of the allowed keyword arguments. + By default, ``None``. + output_request : list | tuple, optional + Outputs requested from the SAM windpower simulation after + the bespoke plant layout optimization. Can be any of the + parameters in the "Outputs" group of the PySAM module + :py:class:`PySAM.Windpower.Windpower.Outputs`, PySAM module. + This list can also include a select number of SAM + config/resource parameters to include in the output: + any key in any of the + `output attribute JSON files <https://tinyurl.com/4bmrpe3j/>`_ + may be requested. Time-series profiles requested via this + input are output in UTC. This input can also be used to + request resource means like ``"ws_mean"``, + ``"windspeed_mean"``, ``"temperature_mean"``, and + ``"pressure_mean"``. By default, + ``('system_capacity', 'cf_mean')``. + ws_bins : tuple, optional + A 3-entry tuple with ``(start, stop, step)`` for the + windspeed binning of the wind joint probability + distribution. The stop value is inclusive, so + ``ws_bins=(0, 20, 5)`` would result in four bins with bin + edges (0, 5, 10, 15, 20). By default, ``(0.0, 20.0, 5.0)``. + wd_bins : tuple, optional + A 3-entry tuple with ``(start, stop, step)`` for the wind + direction binning of the wind joint probability + distribution. The stop value is inclusive, so + ``wd_bins=(0, 360, 90)`` would result in four bins with bin + edges (0, 90, 180, 270, 360). + By default, ``(0.0, 360.0, 45.0)``. + excl_dict : dict, optional + Dictionary of exclusion keyword arguments of the format + ``{layer_dset_name: {kwarg: value}}``, where + ``layer_dset_name`` is a dataset in the exclusion h5 file + and the ``kwarg: value`` pair is a keyword argument to + the :class:`reV.supply_curve.exclusions.LayerMask` class. + For example:: + + excl_dict = { + "typical_exclusion": { + "exclude_values": 255, + }, + "another_exclusion": { + "exclude_values": [2, 3], + "weight": 0.5 + }, + "exclusion_with_nodata": { + "exclude_range": [10, 100], + "exclude_nodata": True, + "nodata_value": -1 + }, + "partial_setback": { + "use_as_weights": True + }, + "height_limit": { + "exclude_range": [0, 200] + }, + "slope": { + "include_range": [0, 20] + }, + "developable_land": { + "force_include_values": 42 + }, + "more_developable_land": { + "force_include_range": [5, 10] + }, + ... + } + + Note that all the keys given in this dictionary should be + datasets of the `excl_fpath` file. If ``None`` or empty + dictionary, no exclusions are applied. By default, ``None``. + area_filter_kernel : {"queen", "rook"}, optional + Contiguous area filter method to use on final exclusions + mask. The filters are defined as:: + + # Queen: # Rook: + [[1,1,1], [[0,1,0], + [1,1,1], [1,1,1], + [1,1,1]] [0,1,0]] + + These filters define how neighboring pixels are "connected". + Once pixels in the final exclusion layer are connected, the + area of each resulting cluster is computed and compared + against the `min_area` input. Any cluster with an area + less than `min_area` is excluded from the final mask. + This argument has no effect if `min_area` is ``None``. + By default, ``"queen"``. + min_area : float, optional + Minimum area (in km\ :sup:`2`) required to keep an isolated + cluster of (included) land within the resulting exclusions + mask. Any clusters of land with areas less than this value + will be marked as exclusions. See the documentation for + `area_filter_kernel` for an explanation of how the area of + each land cluster is computed. If ``None``, no area + filtering is performed. By default, ``None``. + resolution : int, optional + Supply Curve resolution. This value defines how many pixels + are in a single side of a supply curve cell. For example, + a value of ``64`` would generate a supply curve where the + side of each supply curve cell is ``64x64`` exclusion + pixels. By default, ``64``. + excl_area : float, optional + Area of a single exclusion mask pixel (in km\ :sup:`2`). + If ``None``, this value will be inferred from the profile + transform attribute in `excl_fpath`. By default, ``None``. + data_layers : dict, optional + Dictionary of aggregation data layers of the format:: + + data_layers = { + "output_layer_name": { + "dset": "layer_name", + "method": "mean", + "fpath": "/path/to/data.h5" + }, + "another_output_layer_name": { + "dset": "input_layer_name", + "method": "mode", + # optional "fpath" key omitted + }, + ... + } + + The ``"output_layer_name"`` is the column name under which + the aggregated data will appear in the meta DataFrame of the + output file. The ``"output_layer_name"`` does not have to + match the ``dset`` input value. The latter should match + the layer name in the HDF5 from which the data to aggregate + should be pulled. The ``method`` should be one of + ``{"mode", "mean", "min", "max", "sum", "category"}``, + describing how the high-resolution data should be aggregated + for each supply curve point. ``fpath`` is an optional key + that can point to an HDF5 file containing the layer data. If + left out, the data is assumed to exist in the file(s) + specified by the `excl_fpath` input. If ``None``, no data + layer aggregation is performed. By default, ``None``. + pre_extract_inclusions : bool, optional + Optional flag to pre-extract/compute the inclusion mask from + the `excl_dict` input. It is typically faster to compute + the inclusion mask on the fly with parallel workers. + By default, ``False``. + prior_run : str, optional + Optional filepath to a bespoke output HDF5 file belonging to + a prior run. If specified, this module will only run the + timeseries power generation step and assume that all of the + wind plant layouts are fixed from the prior run. The meta + data of this file must contain the following columns + (automatically satisfied if the HDF5 file was generated by + ``reV`` bespoke): + + - ``capacity`` : Capacity of the plant, in MW. + - ``turbine_x_coords``: A string representation of a + python list containing the X coordinates (in m; origin + of cell at bottom left) of the turbines within the + plant (supply curve cell). + - ``turbine_y_coords`` : A string representation of a + python list containing the Y coordinates (in m; origin + of cell at bottom left) of the turbines within the + plant (supply curve cell). + + If ``None``, no previous run data is considered. + By default, ``None`` + gid_map : str | dict, optional + Mapping of unique integer generation gids (keys) to single + integer resource gids (values). This enables unique + generation gids in the project points to map to non-unique + resource gids, which can be useful when evaluating multiple + resource datasets in ``reV`` (e.g., forecasted ECMWF + resource data to complement historical WTK meteorology). + This input can be a pre-extracted dictionary or a path to a + JSON or CSV file. If this input points to a CSV file, the + file must have the columns ``gid`` (which matches the + project points) and ``gid_map`` (gids to extract from the + resource input). If ``None``, the GID values in the project + points are assumed to match the resource GID values. + By default, ``None``. + bias_correct : str | pd.DataFrame, optional + Optional DataFrame or CSV filepath to a wind or solar + resource bias correction table. This has columns: + + - ``gid``: GID of site (can be index name) + - ``adder``: Value to add to resource at each site + - ``scalar``: Value to scale resource at each site by + + The ``gid`` field should match the true resource ``gid`` + regardless of the optional ``gid_map`` input. If both + ``adder`` and ``scalar`` are present, the wind or solar + resource is corrected by :math:`(res*scalar)+adder`. If + *either* is missing, ``scalar`` defaults to 1 and ``adder`` + to 0. Only `windspeed` **or** `GHI` + `DNI` are corrected, + depending on the technology (wind for the former, solar + for the latter). `GHI` and `DNI` are corrected with the + same correction factors. If ``None``, no corrections are + applied. By default, ``None``. + pre_load_data : bool, optional + Option to pre-load resource data. This step can be + time-consuming up front, but it drastically reduces the + number of parallel reads to the `res_fpath` HDF5 file(s), + and can have a significant overall speedup on systems with + slow parallel I/O capabilities. Pre-loaded data can use a + significant amount of RAM, so be sure to split execution + across many nodes (e.g. 100 nodes, 36 workers each for + CONUS) or request large amounts of memory for a smaller + number of nodes. By default, ``False``. + """ + + log_versions(logger) + logger.info('Initializing BespokeWindPlants...') + logger.info('Resource filepath: {}'.format(res_fpath)) + logger.info('Exclusion filepath: {}'.format(excl_fpath)) + logger.debug('Exclusion dict: {}'.format(excl_dict)) + logger.info('Bespoke objective function: {}' + .format(objective_function)) + logger.info('Bespoke capital cost function: {}' + .format(capital_cost_function)) + logger.info('Bespoke fixed operating cost function: {}' + .format(fixed_operating_cost_function)) + logger.info('Bespoke variable operating cost function: {}' + .format(variable_operating_cost_function)) + logger.info('Bespoke wake loss multiplier: {}' + .format(wake_loss_multiplier)) + logger.info('Bespoke GA initialization kwargs: {}'.format(ga_kwargs)) + + logger.info('Bespoke pre-extracting exclusions: {}' + .format(pre_extract_inclusions)) + logger.info('Bespoke pre-extracting resource data: {}' + .format(pre_load_data)) + logger.info('Bespoke prior run: {}'.format(prior_run)) + logger.info('Bespoke GID map: {}'.format(gid_map)) + logger.info('Bespoke bias correction table: {}'.format(bias_correct)) + + BespokeSinglePlant.check_dependencies() + + self._project_points = self._parse_points(project_points, sam_files) + + super().__init__(excl_fpath, tm_dset, excl_dict=excl_dict, + area_filter_kernel=area_filter_kernel, + min_area=min_area, resolution=resolution, + excl_area=excl_area, gids=self._project_points.gids, + pre_extract_inclusions=pre_extract_inclusions) + + self._res_fpath = res_fpath + self._obj_fun = objective_function + self._cap_cost_fun = capital_cost_function + self._foc_fun = fixed_operating_cost_function + self._voc_fun = variable_operating_cost_function + self._min_spacing = min_spacing + self._wake_loss_multiplier = wake_loss_multiplier + self._ga_kwargs = ga_kwargs or {} + self._output_request = SAMOutputRequest(output_request) + self._ws_bins = ws_bins + self._wd_bins = wd_bins + self._data_layers = data_layers + self._prior_meta = self._parse_prior_run(prior_run) + self._gid_map = BespokeSinglePlant._parse_gid_map(gid_map) + self._bias_correct = Gen._parse_bc(bias_correct) + self._outputs = {} + self._check_files() + + self._pre_loaded_data = None + self._pre_load_data(pre_load_data) + + self._slice_lookup = None + + logger.info('Initialized BespokeWindPlants with project points: {}' + .format(self._project_points)) + + @staticmethod + def _parse_points(points, sam_configs): + """Parse a project points object using a project points file + + Parameters + ---------- + points : int | slice | list | str | PointsControl | None + Slice or list specifying project points, string pointing to a + project points csv, or a fully instantiated PointsControl object. + Can also be a single site integer value. Points csv should have + 'gid' and 'config' column, the config maps to the sam_configs dict + keys. + sam_configs : dict | str | SAMConfig + SAM input configuration ID(s) and file path(s). Keys are the SAM + config ID(s) which map to the config column in the project points + CSV. Values are either a JSON SAM config file or dictionary of SAM + config inputs. Can also be a single config file path or a + pre loaded SAMConfig object. + + Returns + ------- + ProjectPoints : ~reV.config.project_points.ProjectPoints + Project points object laying out the supply curve gids to + analyze. + """ + pc = Gen.get_pc(points, points_range=None, sam_configs=sam_configs, + tech='windpower', sites_per_worker=1) + + return pc.project_points + + @staticmethod + def _parse_prior_run(prior_run): + """Extract bespoke meta data from prior run and verify that the run is + compatible with the new job specs. + + Parameters + ---------- + prior_run : str | None + Optional filepath to a bespoke output .h5 file belonging to a prior + run. This will only run the timeseries power generation step and + assume that all of the wind plant layouts are fixed given the prior + run. The meta data of this file needs columns "capacity", + "turbine_x_coords", and "turbine_y_coords". + + Returns + ------- + meta : pd.DataFrame | None + Meta data from the previous bespoke run. This includes the + previously optimized wind farm layouts. All of the nested list + columns will be json loaded. + """ + + meta = None + + if prior_run is not None: + assert os.path.isfile(prior_run) + assert prior_run.endswith('.h5') + + with Outputs(prior_run, mode='r') as f: + meta = f.meta + + # pylint: disable=no-member + for col in meta.columns: + val = meta[col].values[0] + if isinstance(val, str) and val[0] == '[' and val[-1] == ']': + meta[col] = meta[col].apply(json.loads) + + return meta + + def _get_prior_meta(self, gid): + """Get the meta data for a given gid from the prior run (if available) + + Parameters + ---------- + gid : int + SC point gid for site to pull prior meta for. + + Returns + ------- + meta : pd.DataFrame + Prior meta data for just the requested gid. + """ + meta = None + + if self._prior_meta is not None: + mask = self._prior_meta['gid'] == gid + if any(mask): + meta = self._prior_meta[mask] + + return meta + + def _check_files(self): + """Do a preflight check on input files""" + + paths = self._excl_fpath + if isinstance(self._excl_fpath, str): + paths = [self._excl_fpath] + + for path in paths: + if not os.path.exists(path): + raise FileNotFoundError( + 'Could not find required exclusions file: ' + '{}'.format(path)) + + with ExclusionLayers(paths) as excl: + if self._tm_dset not in excl: + raise FileInputError('Could not find techmap dataset "{}" ' + 'in the exclusions file(s): {}' + .format(self._tm_dset, paths)) + + # just check that this file exists, cannot check res_fpath if *glob + Handler = BespokeSinglePlant.get_wind_handler(self._res_fpath) + with Handler(self._res_fpath) as f: + assert any(f.dsets) + + def _pre_load_data(self, pre_load_data): + """Pre-load resource data, if requested. """ + if not pre_load_data: + return + + sc_gid_to_hh = {gid: self._hh_for_sc_gid(gid) + for gid in self._project_points.df["gid"]} + + with ExclusionLayers(self._excl_fpath) as excl: + tm = excl[self._tm_dset] + + scp_kwargs = {"shape": self.shape, "resolution": self._resolution} + slices = {gid: SupplyCurvePoint.get_agg_slices(gid=gid, **scp_kwargs) + for gid in self._project_points.df["gid"]} + + sc_gid_to_res_gid = {gid: sorted(set(tm[slx, sly].flatten())) + for gid, (slx, sly) in slices.items()} + + for sc_gid, res_gids in sc_gid_to_res_gid.items(): + if res_gids[0] < 0: + sc_gid_to_res_gid[sc_gid] = res_gids[1:] + + if self._gid_map is not None: + for sc_gid, res_gids in sc_gid_to_res_gid.items(): + sc_gid_to_res_gid[sc_gid] = sorted(self._gid_map[g] + for g in res_gids) + + logger.info("Pre-loading resource data for Bespoke run... ") + self._pre_loaded_data = BespokeMultiPlantData(self._res_fpath, + sc_gid_to_hh, + sc_gid_to_res_gid) + + def _hh_for_sc_gid(self, sc_gid): + """Fetch the hh for a given sc_gid""" + config = self.sam_sys_inputs_with_site_data(sc_gid) + return int(config["wind_turbine_hub_ht"]) + + def _pre_loaded_data_for_sc_gid(self, sc_gid): + """Pre-load data for a given SC GID, if requested. """ + if self._pre_loaded_data is None: + return None + + return self._pre_loaded_data.get_preloaded_data_for_gid(sc_gid) + + @property + def outputs(self): + """Saved outputs for the multi wind plant bespoke optimization. Keys + are reV supply curve gids and values are BespokeSinglePlant.outputs + dictionaries. + + Returns + ------- + dict + """ + return self._outputs + + @property + def completed_gids(self): + """Get a sorted list of completed BespokeSinglePlant gids + + Returns + ------- + list + """ + return sorted(list(self.outputs.keys())) + + @property + def meta(self): + """Meta data for all completed BespokeSinglePlant objects. + + Returns + ------- + pd.DataFrame + """ + meta = [self.outputs[g]['meta'] for g in self.completed_gids] + if len(self.completed_gids) > 1: + meta = pd.concat(meta, axis=0) + else: + meta = meta[0] + return meta + + @property + def slice_lookup(self): + """dict | None: Lookup mapping sc_point_gid to exclusion slice. """ + if self._slice_lookup is None and self._inclusion_mask is not None: + with SupplyCurveExtent(self._excl_fpath, + resolution=self._resolution) as sc: + assert self.shape == self._inclusion_mask.shape + self._slice_lookup = sc.get_slice_lookup(self.gids) + + return self._slice_lookup + +
[docs] def sam_sys_inputs_with_site_data(self, gid): + """Update the sam_sys_inputs with site data for the given GID. + + Site data is extracted from the project points DataFrame. Every + column in the project DataFrame becomes a key in the site_data + output dictionary. + + Parameters + ---------- + gid : int + SC point gid for site to pull site data for. + + Returns + ------- + dictionary : dict + SAM system config with extra keys from the project points + DataFrame. + """ + + gid_idx = self._project_points.index(gid) + site_data = self._project_points.df.iloc[gid_idx] + + site_sys_inputs = self._project_points[gid][1] + site_sys_inputs.update({k: v for k, v in site_data.to_dict().items() + if not (isinstance(v, float) and np.isnan(v))}) + return site_sys_inputs
+ + def _init_fout(self, out_fpath, sample): + """Initialize the bespoke output h5 file with meta and time index dsets + + Parameters + ---------- + out_fpath : str + Full filepath to an output .h5 file to save Bespoke data to. The + parent directories will be created if they do not already exist. + sample : dict + A single sample BespokeSinglePlant output dict that has been run + and has output data. + """ + out_dir = os.path.dirname(out_fpath) + if not os.path.exists(out_dir): + create_dirs(out_dir) + + with Outputs(out_fpath, mode='w') as f: + f._set_meta('meta', self.meta, attrs={}) + ti_dsets = [d for d in sample.keys() + if d.startswith('time_index-')] + for dset in ti_dsets: + f._set_time_index(dset, sample[dset], attrs={}) + f._set_time_index('time_index', sample[dset], attrs={}) + + def _collect_out_arr(self, dset, sample): + """Collect single-plant data arrays into complete arrays with data from + all BespokeSinglePlant objects. + + Parameters + ---------- + dset : str + Dataset to collect, this should be an output dataset present in + BespokeSinglePlant.outputs + sample : dict + A single sample BespokeSinglePlant output dict that has been run + and has output data. + + Returns + ------- + full_arr : np.ndarray + Full data array either 1D for scalar data or 2D for timeseries + data (n_time, n_plant) for all BespokeSinglePlant objects + """ + + single_arr = sample[dset] + + if isinstance(single_arr, Number): + shape = (len(self.completed_gids),) + sample_num = single_arr + elif isinstance(single_arr, (list, tuple, np.ndarray)): + shape = (len(single_arr), len(self.completed_gids)) + sample_num = single_arr[0] + else: + msg = ('Not writing dataset "{}" of type "{}" to disk.' + .format(dset, type(single_arr))) + logger.info(msg) + return None + + if isinstance(sample_num, float): + dtype = np.float32 + else: + dtype = type(sample_num) + full_arr = np.zeros(shape, dtype=dtype) + + # collect data from all wind plants + logger.info('Collecting dataset "{}" with final shape {}' + .format(dset, shape)) + for i, gid in enumerate(self.completed_gids): + if len(full_arr.shape) == 1: + full_arr[i] = self.outputs[gid][dset] + else: + full_arr[:, i] = self.outputs[gid][dset] + + return full_arr + +
[docs] def save_outputs(self, out_fpath): + """Save Bespoke Wind Plant optimization outputs to disk. + + Parameters + ---------- + out_fpath : str + Full filepath to an output .h5 file to save Bespoke data to. The + parent directories will be created if they do not already exist. + + Returns + ------- + out_fpath : str + Full filepath to desired .h5 output file, the .h5 extension has + been added if it was not already present. + """ + if not out_fpath.endswith('.h5'): + out_fpath += '.h5' + + if ModuleName.BESPOKE not in out_fpath: + extension_with_module = "_{}.h5".format(ModuleName.BESPOKE) + out_fpath = out_fpath.replace(".h5", extension_with_module) + + if not self.completed_gids: + msg = ("No output data found! It is likely that all requested " + "points are excluded.") + logger.warning(msg) + warn(msg) + return out_fpath + + sample = self.outputs[self.completed_gids[0]] + self._init_fout(out_fpath, sample) + + dsets = [d for d in sample.keys() + if not d.startswith('time_index-') + and d != 'meta'] + with Outputs(out_fpath, mode='a') as f: + for dset in dsets: + full_arr = self._collect_out_arr(dset, sample) + if full_arr is not None: + dset_no_year = dset + if parse_year(dset, option='boolean'): + year = parse_year(dset) + dset_no_year = dset.replace('-{}'.format(year), '') + + attrs = BespokeSinglePlant.OUT_ATTRS.get(dset_no_year, {}) + attrs = copy.deepcopy(attrs) + dtype = attrs.pop('dtype', np.float32) + chunks = attrs.pop('chunks', None) + try: + f.write_dataset(dset, full_arr, dtype, chunks=chunks, + attrs=attrs) + except Exception as e: + msg = 'Failed to write "{}" to disk.'.format(dset) + logger.exception(msg) + raise IOError(msg) from e + + logger.info('Saved output data to: {}'.format(out_fpath)) + return out_fpath
+ + # pylint: disable=arguments-renamed +
[docs] @classmethod + def run_serial(cls, excl_fpath, res_fpath, tm_dset, + sam_sys_inputs, objective_function, + capital_cost_function, + fixed_operating_cost_function, + variable_operating_cost_function, + min_spacing='5x', wake_loss_multiplier=1, ga_kwargs=None, + output_request=('system_capacity', 'cf_mean'), + ws_bins=(0.0, 20.0, 5.0), wd_bins=(0.0, 360.0, 45.0), + excl_dict=None, inclusion_mask=None, + area_filter_kernel='queen', min_area=None, + resolution=64, excl_area=0.0081, data_layers=None, + gids=None, exclusion_shape=None, slice_lookup=None, + prior_meta=None, gid_map=None, bias_correct=None, + pre_loaded_data=None): + """ + Standalone serial method to run bespoke optimization. + See BespokeWindPlants docstring for parameter description. + + This method can only take a single sam_sys_inputs... For a spatially + variant gid-to-config mapping, see the BespokeWindPlants class methods. + + Returns + ------- + out : dict + Bespoke outputs keyed by sc point gid + """ + + out = {} + with SupplyCurveExtent(excl_fpath, resolution=resolution) as sc: + if gids is None: + gids = sc.valid_sc_points(tm_dset) + elif np.issubdtype(type(gids), np.number): + gids = [gids] + if slice_lookup is None: + slice_lookup = sc.get_slice_lookup(gids) + if exclusion_shape is None: + exclusion_shape = sc.exclusions.shape + + cls._check_inclusion_mask(inclusion_mask, gids, exclusion_shape) + Handler = BespokeSinglePlant.get_wind_handler(res_fpath) + + # pre-extract handlers so they are not repeatedly initialized + file_kwargs = {'excl_dict': excl_dict, + 'area_filter_kernel': area_filter_kernel, + 'min_area': min_area, + 'h5_handler': Handler, + } + + with AggFileHandler(excl_fpath, res_fpath, **file_kwargs) as fh: + n_finished = 0 + for gid in gids: + gid_inclusions = cls._get_gid_inclusion_mask( + inclusion_mask, gid, slice_lookup, + resolution=resolution) + try: + bsp_plant_out = BespokeSinglePlant.run( + gid, + fh.exclusions, + fh.h5, + tm_dset, + sam_sys_inputs, + objective_function, + capital_cost_function, + fixed_operating_cost_function, + variable_operating_cost_function, + min_spacing=min_spacing, + wake_loss_multiplier=wake_loss_multiplier, + ga_kwargs=ga_kwargs, + output_request=output_request, + ws_bins=ws_bins, + wd_bins=wd_bins, + excl_dict=excl_dict, + inclusion_mask=gid_inclusions, + resolution=resolution, + excl_area=excl_area, + data_layers=data_layers, + exclusion_shape=exclusion_shape, + prior_meta=prior_meta, + gid_map=gid_map, + bias_correct=bias_correct, + pre_loaded_data=pre_loaded_data, + close=False) + + except EmptySupplyCurvePointError: + logger.debug('SC gid {} is fully excluded or does not ' + 'have any valid source data!'.format(gid)) + except Exception as e: + msg = 'SC gid {} failed!'.format(gid) + logger.exception(msg) + raise RuntimeError(msg) from e + else: + n_finished += 1 + logger.debug('Serial bespoke: ' + '{} out of {} points complete' + .format(n_finished, len(gids))) + log_mem(logger) + out[gid] = bsp_plant_out + + return out
+ +
[docs] def run_parallel(self, max_workers=None): + """Run the bespoke optimization for many supply curve points in + parallel. + + Parameters + ---------- + max_workers : int | None, optional + Number of cores to run summary on. None is all + available cpus, by default None + + Returns + ------- + out : dict + Bespoke outputs keyed by sc point gid + """ + + logger.info('Running bespoke optimization for points {} through {} ' + 'at a resolution of {} on {} cores.' + .format(self.gids[0], self.gids[-1], self._resolution, + max_workers)) + + futures = [] + out = {} + n_finished = 0 + loggers = [__name__, 'reV.supply_curve.point_summary', 'reV'] + with SpawnProcessPool(max_workers=max_workers, loggers=loggers) as exe: + + # iterate through split executions, submitting each to worker + for gid in self.gids: + # submit executions and append to futures list + gid_incl_mask = None + if self._inclusion_mask is not None: + rs, cs = self.slice_lookup[gid] + gid_incl_mask = self._inclusion_mask[rs, cs] + + futures.append(exe.submit( + self.run_serial, + self._excl_fpath, + self._res_fpath, + self._tm_dset, + self.sam_sys_inputs_with_site_data(gid), + self._obj_fun, + self._cap_cost_fun, + self._foc_fun, + self._voc_fun, + self._min_spacing, + wake_loss_multiplier=self._wake_loss_multiplier, + ga_kwargs=self._ga_kwargs, + output_request=self._output_request, + ws_bins=self._ws_bins, + wd_bins=self._wd_bins, + excl_dict=self._excl_dict, + inclusion_mask=gid_incl_mask, + area_filter_kernel=self._area_filter_kernel, + min_area=self._min_area, + resolution=self._resolution, + excl_area=self._excl_area, + data_layers=self._data_layers, + gids=gid, + exclusion_shape=self.shape, + slice_lookup=copy.deepcopy(self.slice_lookup), + prior_meta=self._get_prior_meta(gid), + gid_map=self._gid_map, + bias_correct=self._bias_correct, + pre_loaded_data=self._pre_loaded_data_for_sc_gid(gid))) + + # gather results + for future in as_completed(futures): + n_finished += 1 + out.update(future.result()) + if n_finished % 10 == 0: + mem = psutil.virtual_memory() + logger.info('Parallel bespoke futures collected: ' + '{} out of {}. Memory usage is {:.3f} GB out ' + 'of {:.3f} GB ({:.2f}% utilized).' + .format(n_finished, len(futures), + mem.used / 1e9, mem.total / 1e9, + 100 * mem.used / mem.total)) + + return out
+ +
[docs] def run(self, out_fpath=None, max_workers=None): + """Run the bespoke wind plant optimization in serial or parallel. + + Parameters + ---------- + out_fpath : str, optional + Path to output file. If ``None``, no output file will + be written. If the filepath is specified but the module name + (bespoke) is not included, the module name will get added to + the output file name. By default, ``None``. + max_workers : int, optional + Number of local workers to run on. If ``None``, uses all + available cores (typically 36). By default, ``None``. + + Returns + ------- + str | None + Path to output HDF5 file, or ``None`` if results were not + written to disk. + """ + + # parallel job distribution test. + if self._obj_fun == 'test': + return True + + if max_workers == 1: + slice_lookup = copy.deepcopy(self.slice_lookup) + for gid in self.gids: + gid_incl_mask = None + if self._inclusion_mask is not None: + rs, cs = slice_lookup[gid] + gid_incl_mask = self._inclusion_mask[rs, cs] + + sam_inputs = self.sam_sys_inputs_with_site_data(gid) + prior_meta = self._get_prior_meta(gid) + pre_loaded_data = self._pre_loaded_data_for_sc_gid(gid) + afk = self._area_filter_kernel + wlm = self._wake_loss_multiplier + si = self.run_serial(self._excl_fpath, + self._res_fpath, + self._tm_dset, + sam_inputs, + self._obj_fun, + self._cap_cost_fun, + self._foc_fun, + self._voc_fun, + min_spacing=self._min_spacing, + wake_loss_multiplier=wlm, + ga_kwargs=self._ga_kwargs, + output_request=self._output_request, + ws_bins=self._ws_bins, + wd_bins=self._wd_bins, + excl_dict=self._excl_dict, + inclusion_mask=gid_incl_mask, + area_filter_kernel=afk, + min_area=self._min_area, + resolution=self._resolution, + excl_area=self._excl_area, + data_layers=self._data_layers, + slice_lookup=slice_lookup, + prior_meta=prior_meta, + gid_map=self._gid_map, + bias_correct=self._bias_correct, + gids=gid, + pre_loaded_data=pre_loaded_data) + self._outputs.update(si) + else: + self._outputs = self.run_parallel(max_workers=max_workers) + + if out_fpath is not None: + out_fpath = self.save_outputs(out_fpath) + + return out_fpath
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/bespoke/gradient_free.html b/_modules/reV/bespoke/gradient_free.html new file mode 100644 index 000000000..5a368bf39 --- /dev/null +++ b/_modules/reV/bespoke/gradient_free.html @@ -0,0 +1,901 @@ + + + + + + reV.bespoke.gradient_free — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.bespoke.gradient_free

+# -*- coding: utf-8 -*-
+"""
+a simple genetic algorithm
+"""
+import numpy as np
+import time
+from math import log
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class GeneticAlgorithm(): + """a simple genetic algorithm used to select bespoke turbine locations + """ + + def __init__(self, bits, bounds, variable_type, objective_function, + max_generation=100, population_size=0, crossover_rate=0.1, + mutation_rate=0.01, tol=1E-6, convergence_iters=5, + max_time=3600): + """ + Parameters + ---------- + bits : array of ints + The number of bits assigned to each of the design variables. + The number of discretizations for each design variables will be + 2^n where n is the number of bits assigned to that variable. + bounds : array of tuples + The bounds for each design variable. This parameter looks like: + np.array([(lower, upper), (lower, upper)...]) + variable_type : array of strings ('int' or 'float') + The type of each design variable (int or float). + objective_function : function handle for the objective that is to be + minimized. Should take a single variable as an input which is a + list/array of the design variables. + max_generation : int, optional + The maximum number of generations that will be run in the genetic + algorithm. + population_size : int, optional + The population size in the genetic algorithm. + crossover_rate : float, optional + The probability of crossover for a single bit during the crossover + phase of the genetic algorithm. + mutation_rate : float, optional + The probability of a single bit mutating during the mutation phase + of the genetic algorithm. + tol : float, optional + The absolute tolerance to determine convergence. + convergence_iters : int, optional + The number of generations to determine convergence. + max_time : float + The maximum time (in seconds) to run the genetic algorithm. + """ + + logger.debug('Initializing GeneticAlgorithm...') + logger.debug('Minimum convergence iterations: {}' + .format(convergence_iters)) + logger.debug('Max iterations (generations): {}'.format(max_generation)) + logger.debug('Population size: {}'.format(population_size)) + logger.debug('Crossover rate: {}'.format(crossover_rate)) + logger.debug('Mutation rate: {}'.format(mutation_rate)) + logger.debug('Convergence tolerance: {}'.format(tol)) + logger.debug('Maximum runtime (in seconds): {}'.format(max_time)) + + # inputs + self.bits = bits + self.bounds = bounds + self.variable_type = variable_type + self.objective_function = objective_function + self.max_generation = max_generation + self.population_size = population_size + self.crossover_rate = crossover_rate + self.mutation_rate = mutation_rate + self.tol = tol + self.convergence_iters = convergence_iters + self.max_time = max_time + + # internal variables, you could output some of this info if you wanted + self.design_variables = np.array([]) # the desgin variables as they + # are passed into self.objective function + self.nbits = 0 # the total number of bits in each chromosome + self.nvars = 0 # the total number of design variables + self.parent_population = np.array([]) # 2D array containing all of the + # parent individuals + self.offspring_population = np.array([]) # 2D array containing all of + # the offspring individuals + self.parent_fitness = np.array([]) # array containing all of the + # parent fitnesses + self.offspring_fitness = np.array([]) # array containing all of the + # offspring fitnesses + self.discretized_variables = {} # a dict of arrays containing all of + # the discretized design variable + + # outputs + self.solution_history = np.array([]) + self.optimized_function_value = 0.0 + self.optimized_design_variables = np.array([]) + + self.initialize_design_variables() + self.initialize_bits() + if self.population_size % 2 == 1: + self.population_size += 1 + self.initialize_population() + self.initialize_fitness() + + if self.population_size > 5: + n = 5 + else: + n = self.population_size + logger.debug('The first few parent individuals are: {}' + .format(self.parent_population[0:n])) + logger.debug('The first few parent fitness values are: {}' + .format(self.parent_fitness[0:n])) + +
[docs] def initialize_design_variables(self): + """initialize the design variables from the randomly initialized + population + """ + # determine the number of design variables and initialize + self.nvars = len(self.variable_type) + self.design_variables = np.zeros(self.nvars) + float_ind = 0 + for i in range(self.nvars): + if self.variable_type[i] == "float": + ndiscretizations = 2**self.bits[i] + self.discretized_variables["float_var%s" % float_ind] = \ + np.linspace(self.bounds[i][0], self.bounds[i][1], + ndiscretizations) + float_ind += 1
+ +
[docs] def initialize_bits(self): + """determine the total number of bits""" + # determine the total number of bits + for i in range(self.nvars): + if self.variable_type[i] == "int": + int_range = self.bounds[i][1] - self.bounds[i][0] + int_bits = int(np.ceil(log(int_range, 2))) + self.bits[i] = int_bits + self.nbits += self.bits[i]
+ +
[docs] def initialize_population(self): + """randomly initialize the parent and offspring populations""" + all_bits_on = np.ones((1, self.nbits)) + random_bits_on = np.random.randint( + 0, high=2, size=(self.population_size - 1, self.nbits) + ) + self.parent_population = np.r_[all_bits_on, random_bits_on] + self.offspring_population = np.zeros_like(self.parent_population)
+ +
[docs] def initialize_fitness(self): + """initialize the fitness of member of the parent population""" + # initialize the fitness arrays + self.parent_fitness = np.zeros(self.population_size) + self.offspring_fitness = np.zeros(self.population_size) + + # initialize fitness of the parent population + for i in range(self.population_size): + self.chromosome_2_variables(self.parent_population[i]) + self.parent_fitness[i] = \ + self.objective_function(self.design_variables)
+ +
[docs] def chromosome_2_variables(self, chromosome): + """convert the binary chromosomes to design variable values""" + + first_bit = 0 + float_ind = 0 + + for i in range(self.nvars): + binary_value = 0 + for j in range(self.bits[i]): + binary_value += chromosome[first_bit + j] * 2**j + first_bit += self.bits[i] + + if self.variable_type[i] == "float": + self.design_variables[i] = \ + self.discretized_variables["float_var%s" + % float_ind][binary_value] + float_ind += 1 + + elif self.variable_type[i] == "int": + self.design_variables[i] = self.bounds[i][0] + binary_value
+ +
[docs] def crossover(self): + """perform crossover between individual parents""" + self.offspring_population[:, :] = self.parent_population[:, :] + + # mate conscutive pairs of parents (0, 1), (2, 3), ... + # The population is shuffled so this does not need to be randomized + for i in range(int(self.population_size / 2)): + # trade bits in the offspring + crossover_arr = np.random.rand(self.nbits) + for j in range(self.nbits): + if crossover_arr[j] < self.crossover_rate: + self.offspring_population[2 * i][j], \ + self.offspring_population[2 * i + 1][j] = \ + self.offspring_population[2 * i + 1][j], \ + self.offspring_population[2 * i][j]
+ +
[docs] def mutate(self): + """randomly mutate bits of each chromosome""" + for i in range(int(self.population_size)): + # mutate bits in the offspring + mutate_arr = np.random.rand(self.nbits) + for j in range(self.nbits): + if mutate_arr[j] < self.mutation_rate: + self.offspring_population[i][j] = \ + (self.offspring_population[i][j] + 1) % 2
+ +
[docs] def optimize_ga(self): + """run the genetic algorithm""" + + converged = False + ngens = 1 + generation = 1 + difference = self.tol * 10000.0 + self.solution_history = np.zeros(self.max_generation + 1) + self.solution_history[0] = np.min(self.parent_fitness) + + run_time = 0.0 + start_time = time.time() + while converged is False and ngens < self.max_generation and \ + run_time < self.max_time: + self.crossover() + self.mutate() + # determine fitness of offspring + for i in range(self.population_size): + self.chromosome_2_variables(self.offspring_population[i]) + self.offspring_fitness[i] = \ + self.objective_function(self.design_variables) + + # rank the total population from best to worst + total_fitness = np.append(self.parent_fitness, + self.offspring_fitness) + ranked_fitness = \ + np.argsort(total_fitness)[0:int(self.population_size)] + + total_population = \ + np.vstack([self.parent_population, self.offspring_population]) + self.parent_population[:, :] = total_population[ranked_fitness, :] + self.parent_fitness[:] = total_fitness[ranked_fitness] + + # store solution history and wrap up generation + self.solution_history[generation] = np.min(self.parent_fitness) + + if generation > self.convergence_iters: + difference = \ + self.solution_history[generation - self.convergence_iters]\ + - self.solution_history[generation] + else: + difference = 1000 + if abs(difference) <= self.tol: + converged = True + + # shuffle up the order of the population + shuffle_order = np.arange(1, self.population_size) + np.random.shuffle(shuffle_order) + shuffle_order = np.append([0], shuffle_order) + self.parent_population = self.parent_population[shuffle_order] + self.parent_fitness = self.parent_fitness[shuffle_order] + + generation += 1 + ngens += 1 + + run_time = time.time() - start_time + + # Assign final outputs + self.solution_history = self.solution_history[0:ngens] + self.optimized_function_value = np.min(self.parent_fitness) + self.chromosome_2_variables( + self.parent_population[np.argmin(self.parent_fitness)]) + self.optimized_design_variables = self.design_variables + + logger.debug('The GA ran for this many generations: {}' + .format(ngens)) + logger.debug('The GA ran for this many seconds: {:.3f}' + .format(run_time)) + logger.debug('The optimized function value was: {:.3e}' + .format(self.optimized_function_value)) + logger.debug('The optimal design variables were: {}' + .format(self.optimized_design_variables))
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/bespoke/pack_turbs.html b/_modules/reV/bespoke/pack_turbs.html new file mode 100644 index 000000000..7ea610982 --- /dev/null +++ b/_modules/reV/bespoke/pack_turbs.html @@ -0,0 +1,725 @@ + + + + + + reV.bespoke.pack_turbs — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.bespoke.pack_turbs

+# -*- coding: utf-8 -*-
+"""
+turbine packing module.
+"""
+import numpy as np
+from shapely.geometry import Polygon, MultiPolygon, Point
+from reV.bespoke.plotting_functions import get_xy
+from reV.utilities.exceptions import WhileLoopPackingError
+
+
+
[docs]class PackTurbines(): + """Framework to maximize plant capacity in a provided wind plant area. + """ + + def __init__(self, min_spacing, safe_polygons, weight_x=0.0013547): + """ + Parameters + ---------- + min_spacing : float + The minimum allowed spacing between wind turbines. + safe_polygons : Polygon | MultiPolygon + The "safe" area(s) where turbines can be placed without + violating boundary, setback, exclusion, or other constraints. + weight_x : float, optional + """ + + self.min_spacing = min_spacing + self.safe_polygons = safe_polygons + self.weight_x = weight_x + + # turbine locations + self.turbine_x = np.array([]) + self.turbine_y = np.array([]) + +
[docs] def pack_turbines_poly(self): + """Fast packing algorithm that maximizes plant capacity in a + provided wind plant area. Sets the the optimal locations to + self.turbine_x and self.turbine_y + """ + + if self.safe_polygons.area > 0.0: + can_add_more = True + leftover = MultiPolygon(self.safe_polygons) + iters = 0 + while can_add_more: + iters += 1 + if iters > 10000: + msg = ('Too many points placed in packing algorithm') + raise WhileLoopPackingError(msg) + + if leftover.area > 0: + nareas = len(leftover.geoms) + areas = np.zeros(len(leftover.geoms)) + for i in range(nareas): + areas[i] = leftover.geoms[i].area + m = min(i for i in areas if i > 0) + ind = np.where(areas == m)[0][0] + # smallest_area = leftover.geoms[np.argmin(areas)] + smallest_area = leftover.geoms[ind] + exterior_coords = smallest_area.exterior.coords[:] + x, y = get_xy(exterior_coords) + metric = self.weight_x * x + y + index = np.argmin(metric) + self.turbine_x = np.append(self.turbine_x, + x[index]) + self.turbine_y = np.append(self.turbine_y, + y[index]) + new_turbine = Point(x[index], + y[index] + ).buffer(self.min_spacing) + else: + break + leftover = leftover.difference(new_turbine) + if isinstance(leftover, Polygon): + leftover = MultiPolygon([leftover])
+ +
[docs] def clear(self): + """Reset the packing algorithm by clearing the x and y turbine arrays + """ + self.turbine_x = np.array([]) + self.turbine_y = np.array([])
+ + +
[docs]def smallest_area_with_tiebreakers(g): + """_summary_ + + This function helps break ties in the area of two different + geometries using their exterior coordinate values. + + Parameters + ---------- + g : _type_ + A geometry object with an `area` and an + `exterior.coords` coords attribute. + + Returns + ------- + tuple + Tuple with the following elements: + - area of the geometry + - minimum exterior coordinate (southwest) + - maximum exterior coordinate (northeast) + """ + return g.area, min(g.exterior.coords), max(g.exterior.coords)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/bespoke/place_turbines.html b/_modules/reV/bespoke/place_turbines.html new file mode 100644 index 000000000..8aa85fa00 --- /dev/null +++ b/_modules/reV/bespoke/place_turbines.html @@ -0,0 +1,1146 @@ + + + + + + reV.bespoke.place_turbines — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.bespoke.place_turbines

+# -*- coding: utf-8 -*-
+# pylint: disable=inconsistent-return-statements
+"""
+place turbines for bespoke wind plants
+"""
+import numpy as np
+
+from shapely.geometry import Point, Polygon, MultiPolygon, MultiPoint
+
+from reV.bespoke.pack_turbs import PackTurbines
+from reV.bespoke.gradient_free import GeneticAlgorithm
+from reV.utilities.exceptions import WhileLoopPackingError
+
+
+
[docs]def none_until_optimized(func): + """Decorator that returns None until `PlaceTurbines` is optimized. + + Meant for exclusive use in `PlaceTurbines` and its subclasses. + `PlaceTurbines` is considered optimized when its + `optimized_design_variables` attribute is not `None`. + + Parameters + ---------- + func : callable + A callable function that should return `None` until + `PlaceTurbines` is optimized. + + Returns + ------- + callable + New function that returns `None` until `PlaceTurbines` is + optimized. + """ + + def _func(pt): + """Wrapper to return `None` if `PlaceTurbines` is not optimized""" + if pt.optimized_design_variables is None: + return + return func(pt) + return _func
+ + +
[docs]class PlaceTurbines: + """Framework for optimizing turbine locations for site specific + exclusions, wind resources, and objective + """ + + def __init__(self, wind_plant, objective_function, + capital_cost_function, + fixed_operating_cost_function, + variable_operating_cost_function, + include_mask, pixel_side_length, min_spacing, + wake_loss_multiplier=1): + """ + Parameters + ---------- + wind_plant : WindPowerPD + wind plant object to analyze wind plant performance. This object + should have everything in the plant defined, such that only the + turbine coordinates and plant capacity need to be defined during + the optimization. + objective_function : str + The objective function of the optimization as a string, should + return the objective to be minimized during layout optimization. + Variables available are: + + - n_turbines: the number of turbines + - system_capacity: wind plant capacity + - aep: annual energy production + - fixed_charge_rate: user input fixed_charge_rate if included + as part of the sam system config. + - capital_cost: plant capital cost as evaluated + by `capital_cost_function` + - fixed_operating_cost: plant fixed annual operating cost as + evaluated by `fixed_operating_cost_function` + - variable_operating_cost: plant variable annual operating cost + as evaluated by `variable_operating_cost_function` + - self.wind_plant: the SAM wind plant object, through which + all SAM variables can be accessed + - cost: the annual cost of the wind plant (from cost_function) + + capital_cost_function : str + The plant capital cost function as a string, must return the total + capital cost in $. Has access to the same variables as the + objective_function. + fixed_operating_cost_function : str + The plant annual fixed operating cost function as a string, must + return the fixed operating cost in $/year. Has access to the same + variables as the objective_function. + variable_operating_cost_function : str + The plant annual variable operating cost function as a string, must + return the variable operating cost in $/kWh. Has access to the same + variables as the objective_function. + exclusions : ExclusionMaskFromDict + The exclusions that define where turbines can be placed. Contains + exclusions.latitude, exclusions.longitude, and exclusions.mask + min_spacing : float + The minimum spacing between turbines (in meters). + wake_loss_multiplier : float, optional + A multiplier used to scale the annual energy lost due to + wake losses. **IMPORTANT**: This multiplier will ONLY be + applied during the optimization process and will NOT be + come through in output values such as aep, any of the cost + functions, or even the output objective. + """ + + # inputs + self.wind_plant = wind_plant + + self.capital_cost_function = capital_cost_function + self.fixed_operating_cost_function = fixed_operating_cost_function + self.variable_operating_cost_function = \ + variable_operating_cost_function + + self.objective_function = objective_function + self.include_mask = include_mask + self.pixel_side_length = pixel_side_length + self.min_spacing = min_spacing + self.wake_loss_multiplier = wake_loss_multiplier + + # internal variables + self.nrows, self.ncols = np.shape(include_mask) + self.x_locations = np.array([]) + self.y_locations = np.array([]) + self.turbine_capacity = \ + np.max(self.wind_plant. + sam_sys_inputs["wind_turbine_powercurve_powerout"]) + self.full_polygons = None + self.packing_polygons = None + self.optimized_design_variables = None + self.safe_polygons = None + + self.ILLEGAL = ('import ', 'os.', 'sys.', '.__', '__.', 'eval', 'exec') + self._preflight(self.objective_function) + self._preflight(self.capital_cost_function) + self._preflight(self.fixed_operating_cost_function) + self._preflight(self.variable_operating_cost_function) + + def _preflight(self, eqn): + """Run preflight checks on the equation string.""" + for substr in self.ILLEGAL: + if substr in str(eqn): + msg = ('Will not evaluate string which contains "{}": {}' + .format(substr, eqn)) + raise ValueError(msg) + +
[docs] def define_exclusions(self): + """From the exclusions data, create a shapely MultiPolygon as + self.safe_polygons that defines where turbines can be placed. + """ + nx, ny = np.shape(self.include_mask) + self.safe_polygons = MultiPolygon() + side_x = np.arange(nx + 1) * self.pixel_side_length + side_y = np.arange(ny + 1, -1, -1) * self.pixel_side_length + floored = np.floor(self.include_mask) + for i in range(nx): + for j in range(ny): + if floored[j, i] == 1: + added_poly = Polygon(((side_x[i], side_y[j]), + (side_x[i + 1], side_y[j]), + (side_x[i + 1], side_y[j + 1]), + (side_x[i], side_y[j + 1]))) + self.safe_polygons = self.safe_polygons.union(added_poly) + + if self.safe_polygons.area == 0.0: + self.full_polygons = MultiPolygon([]) + self.packing_polygons = MultiPolygon([]) + else: + self.full_polygons = self.safe_polygons.buffer(0) + + # add extra setback to cell boundary + minx = 0.0 + miny = 0.0 + maxx = nx * self.pixel_side_length + maxy = ny * self.pixel_side_length + minx += self.min_spacing / 2.0 + miny += self.min_spacing / 2.0 + maxx -= self.min_spacing / 2.0 + maxy -= self.min_spacing / 2.0 + + boundary_poly = \ + Polygon(((minx, miny), (minx, maxy), (maxx, maxy), + (maxx, miny))) + packing_polygons = boundary_poly.intersection(self.full_polygons) + if isinstance(packing_polygons, MultiPolygon): + self.packing_polygons = packing_polygons + elif isinstance(packing_polygons, Polygon): + self.packing_polygons = MultiPolygon([packing_polygons]) + else: + self.packing_polygons = MultiPolygon([])
+ +
[docs] def initialize_packing(self): + """run the turbine packing algorithm (maximizing plant capacity) to + define potential turbine locations that will be used as design + variables in the gentic algorithm. + """ + packing = PackTurbines(self.min_spacing, self.packing_polygons) + nturbs = 1E6 + mult = 1.0 + iters = 0 + while nturbs > 300: + iters += 1 + if iters > 10000: + msg = ('Too many attempts within initialize packing') + raise WhileLoopPackingError(msg) + packing.clear() + packing.min_spacing = self.min_spacing * mult + packing.pack_turbines_poly() + nturbs = len(packing.turbine_x) + mult *= 1.1 + self.x_locations = packing.turbine_x + self.y_locations = packing.turbine_y
+ + # pylint: disable=W0641,W0123 +
[docs] def optimization_objective(self, x): + """The optimization objective used in the bespoke optimization + """ + x = [bool(y) for y in x] + if len(x) > 0: + n_turbines = np.sum(x) + self.wind_plant["wind_farm_xCoordinates"] = self.x_locations[x] + self.wind_plant["wind_farm_yCoordinates"] = self.y_locations[x] + + system_capacity = n_turbines * self.turbine_capacity + self.wind_plant["system_capacity"] = system_capacity + + self.wind_plant.assign_inputs() + self.wind_plant.execute() + aep = self._aep_after_scaled_wake_losses() + else: + n_turbines = system_capacity = aep = 0 + + fixed_charge_rate = self.fixed_charge_rate + capital_cost = eval(self.capital_cost_function, + globals(), locals()) + fixed_operating_cost = eval(self.fixed_operating_cost_function, + globals(), locals()) + variable_operating_cost = eval(self.variable_operating_cost_function, + globals(), locals()) + + capital_cost *= self.wind_plant.sam_sys_inputs.get( + 'capital_cost_multiplier', 1) + fixed_operating_cost *= self.wind_plant.sam_sys_inputs.get( + 'fixed_operating_cost_multiplier', 1) + variable_operating_cost *= self.wind_plant.sam_sys_inputs.get( + 'variable_operating_cost_multiplier', 1) + + objective = eval(self.objective_function, globals(), locals()) + + return objective
+ + def _aep_after_scaled_wake_losses(self): + """AEP after scaling the energy lost due to wake.""" + wake_loss_pct = self.wind_plant['wake_losses'] + aep = self.wind_plant['annual_energy'] + agep = self.wind_plant['annual_gross_energy'] + + energy_lost_due_to_wake = wake_loss_pct / 100 * agep + aep_after_wake_losses = agep - energy_lost_due_to_wake + other_losses_multiplier = 1 - aep / aep_after_wake_losses + + scaled_wake_losses = (self.wake_loss_multiplier + * energy_lost_due_to_wake) + aep_after_scaled_wake_losses = max(0, agep - scaled_wake_losses) + return aep_after_scaled_wake_losses * (1 - other_losses_multiplier) + +
[docs] def optimize(self, **kwargs): + """Optimize wind farm layout. + + Use a genetic algorithm to optimize wind plant layout for the + user-defined objective function. + + Parameters + ---------- + **kwargs + Keyword arguments to pass to GA initialization. + + See Also + -------- + :class:`~reV.bespoke.gradient_free.GeneticAlgorithm` : GA Algorithm. + """ + nlocs = len(self.x_locations) + bits = np.ones(nlocs, dtype=int) + bounds = np.zeros((nlocs, 2), dtype=int) + bounds[:, 1] = 2 + variable_type = np.array([]) + for _ in range(nlocs): + variable_type = np.append(variable_type, "int") + + ga_kwargs = { + 'max_generation': 10000, + 'population_size': 25, + 'crossover_rate': 0.2, + 'mutation_rate': 0.01, + 'tol': 1E-6, + 'convergence_iters': 10000, + 'max_time': 3600 + } + + ga_kwargs.update(kwargs) + + ga = GeneticAlgorithm(bits, bounds, variable_type, + self.optimization_objective, + **ga_kwargs) + + ga.optimize_ga() + + optimized_design_variables = ga.optimized_design_variables + self.optimized_design_variables = \ + [bool(y) for y in optimized_design_variables] + + self.wind_plant["wind_farm_xCoordinates"] = self.turbine_x + self.wind_plant["wind_farm_yCoordinates"] = self.turbine_y + self.wind_plant["system_capacity"] = self.capacity
+ +
[docs] def place_turbines(self, **kwargs): + """Define bespoke wind plant turbine layouts. + + Run all functions to define bespoke wind plant turbine layouts. + + Parameters + ---------- + **kwargs + Keyword arguments to pass to GA initialization. + + See Also + -------- + :class:`~reV.bespoke.gradient_free.GeneticAlgorithm` : GA Algorithm. + """ + self.define_exclusions() + self.initialize_packing() + self.optimize(**kwargs)
+ +
[docs] def capital_cost_per_kw(self, capacity_mw): + """Capital cost function ($ per kW) evaluated for a given capacity. + + The capacity will be adjusted to be an exact multiple of the + turbine rating in order to yield an integer number of + turbines. + + Parameters + ---------- + capacity_mw : float + The desired capacity (MW) to sample the cost curve at. Note + as mentioned above, the capacity will be adjusted to be an + exact multiple of the turbine rating in order to yield an + integer number of turbines. For best results, set this + value to be an integer multiple of the turbine rating. + + Returns + ------- + capital_cost : float + Capital cost ($ per kW) for the (adjusted) plant capacity. + """ + + fixed_charge_rate = self.fixed_charge_rate + n_turbines = int(round(capacity_mw * 1e3 / self.turbine_capacity)) + system_capacity = n_turbines * self.turbine_capacity + mult = self.wind_plant.sam_sys_inputs.get( + 'capital_cost_multiplier', 1) / system_capacity + return eval(self.capital_cost_function, globals(), locals()) * mult
+ + @property + def fixed_charge_rate(self): + """Fixed charge rate if input to the SAM WindPowerPD object, None if + not found in inputs.""" + return self.wind_plant.sam_sys_inputs.get('fixed_charge_rate', None) + + @property + @none_until_optimized + def turbine_x(self): + """This is the final optimized turbine x locations (m)""" + return self.x_locations[self.optimized_design_variables] + + @property + @none_until_optimized + def turbine_y(self): + """This is the final optimized turbine y locations (m)""" + return self.y_locations[self.optimized_design_variables] + + @property + @none_until_optimized + def nturbs(self): + """This is the final optimized number of turbines""" + return np.sum(self.optimized_design_variables) + + @property + @none_until_optimized + def capacity(self): + """This is the final optimized plant nameplate capacity (kW)""" + return self.turbine_capacity * self.nturbs + + @property + @none_until_optimized + def convex_hull(self): + """This is the convex hull of the turbine locations""" + turbines = MultiPoint([Point(x, y) + for x,y in zip(self.turbine_x, self.turbine_y)]) + return turbines.convex_hull + + @property + @none_until_optimized + def area(self): + """This is the area available for wind turbine placement (km^2)""" + return self.full_polygons.area / 1e6 + + @property + @none_until_optimized + def convex_hull_area(self): + """This is the area of the convex hull of the turbines (km^2)""" + return self.convex_hull.area / 1e6 + + @property + @none_until_optimized + def full_cell_area(self): + """This is the full non-excluded area available for wind turbine + placement (km^2)""" + nx, ny = np.shape(self.include_mask) + side_x = nx * self.pixel_side_length + side_y = ny * self.pixel_side_length + return side_x * side_y / 1e6 + + @property + @none_until_optimized + def capacity_density(self): + """This is the optimized capacity density of the wind plant + defined with the area available after removing the exclusions + (MW/km2)""" + if self.full_polygons is None or self.capacity is None: + return + + if self.area != 0.0: + return self.capacity / self.area / 1E3 + + return 0.0 + + @property + @none_until_optimized + def convex_hull_capacity_density(self): + """This is the optimized capacity density of the wind plant + defined with the convex hull area of the turbine layout (MW/km2)""" + if self.convex_hull_area != 0.0: + return self.capacity / self.convex_hull_area / 1E3 + return 0.0 + + @property + @none_until_optimized + def full_cell_capacity_density(self): + """This is the optimized capacity density of the wind plant + defined with the full non-excluded area of the turbine layout (MW/km2) + """ + if self.full_cell_area != 0.0: + return self.capacity / self.full_cell_area / 1E3 + return 0.0 + + @property + @none_until_optimized + def aep(self): + """This is the annual energy production of the optimized plant (kWh)""" + if self.nturbs <= 0: + return 0 + + self.wind_plant["wind_farm_xCoordinates"] = self.turbine_x + self.wind_plant["wind_farm_yCoordinates"] = self.turbine_y + self.wind_plant["system_capacity"] = self.capacity + self.wind_plant.assign_inputs() + self.wind_plant.execute() + return self.wind_plant.annual_energy() + + # pylint: disable=W0641,W0123 + @property + @none_until_optimized + def capital_cost(self): + """This is the capital cost of the optimized plant ($)""" + fixed_charge_rate = self.fixed_charge_rate + n_turbines = self.nturbs + system_capacity = self.capacity + aep = self.aep + mult = self.wind_plant.sam_sys_inputs.get( + 'capital_cost_multiplier', 1) + return eval(self.capital_cost_function, globals(), locals()) * mult + + # pylint: disable=W0641,W0123 + @property + @none_until_optimized + def fixed_operating_cost(self): + """This is the annual fixed operating cost of the + optimized plant ($/year)""" + fixed_charge_rate = self.fixed_charge_rate + n_turbines = self.nturbs + system_capacity = self.capacity + aep = self.aep + mult = self.wind_plant.sam_sys_inputs.get( + 'fixed_operating_cost_multiplier', 1) + return eval(self.fixed_operating_cost_function, + globals(), locals()) * mult + + # pylint: disable=W0641,W0123 + @property + @none_until_optimized + def variable_operating_cost(self): + """This is the annual variable operating cost of the + optimized plant ($/kWh)""" + fixed_charge_rate = self.fixed_charge_rate + n_turbines = self.nturbs + system_capacity = self.capacity + aep = self.aep + mult = self.wind_plant.sam_sys_inputs.get( + 'variable_operating_cost_multiplier', 1) + return eval(self.variable_operating_cost_function, + globals(), locals()) * mult + + # pylint: disable=W0641,W0123 + @property + @none_until_optimized + def objective(self): + """This is the optimized objective function value""" + fixed_charge_rate = self.fixed_charge_rate + n_turbines = self.nturbs + system_capacity = self.capacity + aep = self.aep + capital_cost = self.capital_cost + fixed_operating_cost = self.fixed_operating_cost + variable_operating_cost = self.variable_operating_cost + return eval(self.objective_function, globals(), locals())
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/bespoke/plotting_functions.html b/_modules/reV/bespoke/plotting_functions.html new file mode 100644 index 000000000..33f5bc0b3 --- /dev/null +++ b/_modules/reV/bespoke/plotting_functions.html @@ -0,0 +1,799 @@ + + + + + + reV.bespoke.plotting_functions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for reV.bespoke.plotting_functions

+# -*- coding: utf-8 -*-
+"""
+functions to plot turbine layouts and boundary polygons
+"""
+import numpy as np
+import matplotlib.pyplot as plt
+
+
+
[docs]def get_xy(A): + """separate polygon exterior coordinates to x and y + + Parameters + ---------- + A : Polygon.exteroir.coords + Exterior coordinates from a shapely Polygon + + Outputs + ---------- + x, y : array + Boundary polygon x and y coordinates + """ + x = np.zeros(len(A)) + y = np.zeros(len(A)) + for i, _ in enumerate(A): + x[i] = A[i][0] + y[i] = A[i][1] + return x, y
+ + +
[docs]def plot_poly(geom, ax=None, color="black", linestyle="--", linewidth=0.5): + """plot the wind plant boundaries + + Parameters + ---------- + geom : Polygon | MultiPolygon + The shapely.Polygon or shapely.MultiPolygon that define the wind + plant boundary(ies). + ax : :py:class:`matplotlib.pyplot.axes`, optional + The figure axes on which the wind rose is plotted. + Defaults to :obj:`None`. + color : string, optional + The color for the wind plant boundaries + linestyle : string, optional + Style to plot the boundary lines + linewidth : float, optional + The width of the boundary lines + """ + if ax is None: + _, ax = plt.subplots() + + if geom.type == 'Polygon': + exterior_coords = geom.exterior.coords[:] + x, y = get_xy(exterior_coords) + ax.fill(x, y, color="C0", alpha=0.25) + ax.plot(x, y, color=color, linestyle=linestyle, linewidth=linewidth) + + for interior in geom.interiors: + interior_coords = interior.coords[:] + x, y = get_xy(interior_coords) + ax.fill(x, y, color="white", alpha=1.0) + ax.plot(x, y, "--k", linewidth=0.5) + + elif geom.type == 'MultiPolygon': + + for part in geom: + exterior_coords = part.exterior.coords[:] + x, y = get_xy(exterior_coords) + ax.fill(x, y, color="C0", alpha=0.25) + ax.plot(x, y, color=color, linestyle=linestyle, + linewidth=linewidth) + + for interior in part.interiors: + interior_coords = interior.coords[:] + x, y = get_xy(interior_coords) + ax.fill(x, y, color="white", alpha=1.0) + ax.plot(x, y, "--k", linewidth=0.5) + return ax
+ + +
[docs]def plot_turbines(x, y, r, ax=None, color="C0", nums=False): + """plot wind turbine locations + + Parameters + ---------- + x, y : array + Wind turbine x and y locations + r : float + Wind turbine radius + ax :py:class:`matplotlib.pyplot.axes`, optional + The figure axes on which the wind rose is plotted. + Defaults to :obj:`None`. + color : string, optional + The color for the wind plant boundaries + nums : bool, optional + Option to show the turbine numbers next to each turbine + """ + # Set up figure + if ax is None: + _, ax = plt.subplots() + + n = len(x) + for i in range(n): + t = plt.Circle((x[i], y[i]), r, color=color) + ax.add_patch(t) + if nums is True: + ax.text(x[i], y[i], "%s" % (i + 1)) + + return ax
+ + +
[docs]def plot_windrose(wind_directions, wind_speeds, wind_frequencies, ax=None, + colors=None): + """plot windrose + + Parameters + ---------- + wind_directions : 1D array + Wind direction samples + wind_speeds : 1D array + Wind speed samples + wind_frequencies : 2D array + Frequency of wind direction and speed samples + ax :py:class:`matplotlib.pyplot.axes`, optional + The figure axes on which the wind rose is plotted. + Defaults to :obj:`None`. + color : array, optional + The color for the different wind speed bins + """ + if ax is None: + _, ax = plt.subplots(subplot_kw=dict(polar=True)) + + ndirs = len(wind_directions) + nspeeds = len(wind_speeds) + + if colors is None: + colors = [] + for i in range(nspeeds): + colors = np.append(colors, "C%s" % i) + + for i in range(ndirs): + wind_directions[i] = np.deg2rad(90.0 - wind_directions[i]) + + width = 0.8 * 2 * np.pi / len(wind_directions) + + for i in range(ndirs): + bottom = 0.0 + for j in range(nspeeds): + if i == 0: + if j < nspeeds - 1: + ax.bar(wind_directions[i], wind_frequencies[j, i], + bottom=bottom, width=width, edgecolor="black", + color=[colors[j]], + label="%s-%s m/s" % (int(wind_speeds[j]), + int(wind_speeds[j + 1])) + ) + else: + ax.bar(wind_directions[i], wind_frequencies[j, i], + bottom=bottom, width=width, edgecolor="black", + color=[colors[j]], + label="%s+ m/s" % int(wind_speeds[j]) + ) + else: + ax.bar(wind_directions[i], wind_frequencies[j, i], + bottom=bottom, width=width, edgecolor="black", + color=[colors[j]]) + bottom = bottom + wind_frequencies[j, i] + + ax.legend(bbox_to_anchor=(1.3, 1), fontsize=10) + pi = np.pi + ax.set_xticks((0, pi / 4, pi / 2, 3 * pi / 4, pi, 5 * pi / 4, + 3 * pi / 2, 7 * pi / 4)) + ax.set_xticklabels(("E", "NE", "N", "NW", "W", "SW", "S", "SE"), + fontsize=10) + plt.yticks(fontsize=10) + + plt.subplots_adjust(left=0.0, right=1.0, top=0.9, bottom=0.1) + + return ax
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/config/base_analysis_config.html b/_modules/reV/config/base_analysis_config.html new file mode 100644 index 000000000..f5f5e1c32 --- /dev/null +++ b/_modules/reV/config/base_analysis_config.html @@ -0,0 +1,779 @@ + + + + + + reV.config.base_analysis_config — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for reV.config.base_analysis_config

+# -*- coding: utf-8 -*-
+"""
+reV Base analysis Configuration Frameworks
+"""
+import os
+import logging
+from warnings import warn
+
+from reV.config.base_config import BaseConfig
+from reV.config.execution import (BaseExecutionConfig, SlurmConfig)
+from reV.utilities.exceptions import (ConfigError, ConfigWarning,
+                                      reVDeprecationWarning)
+from reV.utilities import ModuleName
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class AnalysisConfig(BaseConfig): + """Base analysis config (generation, lcoe, etc...).""" + + NAME = None + + def __init__(self, config, run_preflight=True, check_keys=True): + """ + Parameters + ---------- + config : str | dict + File path to config json (str), serialized json object (str), + or dictionary with pre-extracted config. + run_preflight : bool, optional + Flag to run or disable preflight checks, by default True + check_keys : bool, optional + Flag to check config keys against Class properties, by default True + """ + super().__init__(config, check_keys=check_keys) + + self._analysis_years = None + self._ec = None + self.dirout = self.config_dir + self.__config_fn = config + + self._preflight() + + if run_preflight: + self._analysis_config_preflight() + + def _analysis_config_preflight(self): + """Check for required config blocks""" + + if 'execution_control' not in self: + e = 'reV config must have "execution_control" block!' + logger.error(e) + raise ConfigError(e) + + @property + def analysis_years(self): + """Get the analysis years. + + Returns + ------- + analysis_years : list + List of years to analyze. If this is a single year run, this return + value is a single entry list. If no analysis_years are specified, + the code will look anticipate a year in the input files. + """ + + if self._analysis_years is None: + self._analysis_years = self.get('analysis_years', [None]) + if not isinstance(self._analysis_years, list): + self._analysis_years = [self._analysis_years] + + if self._analysis_years[0] is None: + warn('Years may not have been specified, may default ' + 'to available years in inputs files.', ConfigWarning) + + return self._analysis_years + + @property + def log_directory(self): + """Get the logging directory, look for key "log_directory" in the + config. + Returns + ------- + log_directory : str + Target path for reV log files. + """ + return self.get('log_directory', './logs/') + + @property + def execution_control(self): + """Get the execution control object. + + Returns + ------- + _ec : BaseExecutionConfig | EagleConfig + reV execution config object specific to the execution_control + option. + """ + if self._ec is None: + ec = self['execution_control'] + # static map of avail execution options with corresponding classes + ec_config_types = {'local': BaseExecutionConfig, + 'slurm': SlurmConfig, + 'eagle': SlurmConfig, + 'kestrel': SlurmConfig, + } + if 'option' in ec: + try: + # Try setting the attribute to the appropriate exec option + self._ec = ec_config_types[ec['option'].lower()](ec) + except KeyError as exc: + # Option not found + msg = ('Execution control option not ' + 'recognized: "{}". ' + 'Available options are: {}.' + .format(ec['option'].lower(), + list(ec_config_types.keys()))) + raise ConfigError(msg) from exc + else: + # option not specified, default to a base execution (local) + warn('Execution control option not specified. ' + 'Defaulting to a local run.') + self._ec = BaseExecutionConfig(ec) + return self._ec + + @property + def name(self): + """Get the job name, defaults to the output directory name. + Returns + ------- + _name : str + reV job name. + """ + + if self._name is None: + + # name defaults to base directory name + self._name = os.path.basename(os.path.normpath(self.dirout)) + + # collect name is simple, will be added to what is being collected + if self.NAME == ModuleName.COLLECT: + self._name = self.NAME + + # Analysis job name tag (helps ensure unique job name) + elif self.NAME is not None: + self._name += '_{}'.format(self.NAME) + + # Throw warning if user still has 'name' key in config + if self.get('name') is not None: + msg = ("Specifying a job name using config key 'name' is " + "deprecated. Job names are now inferred from the run " + "directory name. To silence this warning, remove " + "the 'name' key from the following config file: {!r}'" + .format(self.__config_fn)) + logger.warning(msg) + warn(reVDeprecationWarning(msg)) + + return self._name
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/config/base_config.html b/_modules/reV/config/base_config.html new file mode 100644 index 000000000..008a9c1af --- /dev/null +++ b/_modules/reV/config/base_config.html @@ -0,0 +1,938 @@ + + + + + + reV.config.base_config — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.config.base_config

+# -*- coding: utf-8 -*-
+"""
+reV Base Configuration Framework
+"""
+import json
+import logging
+import os
+from pathlib import Path
+
+from rex.utilities.utilities import get_class_properties, unstupify_path
+from gaps.config import load_config
+
+from reV.utilities.exceptions import ConfigError
+
+logger = logging.getLogger(__name__)
+REVDIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+TESTDATADIR = os.path.join(os.path.dirname(REVDIR), 'tests', 'data')
+
+
+
[docs]class BaseConfig(dict): + """Base class for configuration frameworks.""" + + REQUIREMENTS = () + """Required keys for config""" + + STR_REP = {'REVDIR': REVDIR, + 'TESTDATADIR': TESTDATADIR} + """Mapping of config inputs (keys) to desired replacements (values) in + addition to relative file paths as demarcated by ./ and ../""" + + def __init__(self, config, check_keys=True, perform_str_rep=True): + """ + Parameters + ---------- + config : str | dict + File path to config json (str), serialized json object (str), + or dictionary with pre-extracted config. + check_keys : bool, optional + Flag to check config keys against Class properties, by default True + perform_str_rep : bool + Flag to perform string replacement for REVDIR, TESTDATADIR, and ./ + """ + + # str_rep is a mapping of config strings to replace with real values + self._perform_str_rep = perform_str_rep + self._name = None + self._config_dir = None + self._log_level = None + self._parse_config(config) + + self._preflight() + + self._keys = self._get_properties() + if check_keys: + self._check_keys() + + @property + def config_dir(self): + """Get the directory that the config file is in. + + Returns + ------- + config_dir : str + Directory path that the config file is in. + """ + return self._config_dir + + @property + def config_keys(self): + """ + List of valid config keys + + Returns + ------- + list + """ + return self._keys + + @property + def log_level(self): + """Get user-specified "log_level" (DEBUG, INFO, WARNING, etc...). + + Returns + ------- + log_level : int + Python logging module level (integer format) corresponding to the + config-specified log level string. + """ + + if self._log_level is None: + levels = {'DEBUG': logging.DEBUG, + 'INFO': logging.INFO, + 'WARNING': logging.WARNING, + 'ERROR': logging.ERROR, + 'CRITICAL': logging.CRITICAL, + } + + x = str(self.get('log_level', 'INFO')) + self._log_level = levels[x.upper()] + + return self._log_level + + @property + def name(self): + """Get the job name, defaults to 'rev'. + + Returns + ------- + name : str + reV job name. + """ + return self._name or 'rev' + + def _preflight(self): + """Run a preflight check on the config.""" + if 'project_control' in self: + msg = ('config "project_control" block is no ' + 'longer used. All project control keys should be placed at ' + 'the top config level.') + logger.error(msg) + raise ConfigError(msg) + + missing = [] + for req in self.REQUIREMENTS: + if req not in self: + missing.append(req) + + if any(missing): + e = ('{} missing the following keys: {}' + .format(self.__class__.__name__, missing)) + logger.error(e) + raise ConfigError(e) + + @classmethod + def _get_properties(cls): + """ + Get all class properties + Used to check against config keys + + Returns + ------- + properties : list + List of class properties, each of which should represent a valid + config key/entry + """ + return get_class_properties(cls) + + def _check_keys(self): + """ + Check on config keys to ensure they match available + properties + """ + for key in self.keys(): + if isinstance(key, str) and key not in self._keys: + msg = ('{} is not a valid config entry for {}! Must be one of:' + '\n{}'.format(key, self.__class__.__name__, self._keys)) + logger.error(msg) + raise ConfigError(msg) + +
[docs] def check_overwrite_keys(self, primary_key, *overwrite_keys): + """ + Check for overwrite keys and raise a ConfigError if present + + Parameters + ---------- + primary_key : str + Primary key that overwrites overwrite_keys, used for error message + overwrite_keys : str + Key(s) to overwrite + """ + overwrite = [] + for key in overwrite_keys: + if key in self: + overwrite.append(key) + + if overwrite: + msg = ('A value for "{}" was provided which overwrites the ' + ' following key: "{}", please remove them from the config' + .format(primary_key, ', '.join(overwrite))) + logger.error(msg) + raise ConfigError(msg)
+ + def _parse_config(self, config): + """Parse a config input and set appropriate instance attributes. + + Parameters + ---------- + config : str | dict + File path to config json (str), serialized json object (str), + or dictionary with pre-extracted config. + """ + + # str is either json file path or serialized json object + if isinstance(config, str): + try: + # attempt to deserialize JSON-style string + config = json.loads(config) + except json.JSONDecodeError: + self._config_dir = os.path.dirname(unstupify_path(config)) + self._config_dir += '/' + self._config_dir = self._config_dir.replace('\\', '/') + config = load_config(config) + + # Perform string replacement, save config to self instance + if self._perform_str_rep: + config = self.str_replace_and_resolve(config, self.STR_REP) + + self.set_self_dict(config) + +
[docs] @staticmethod + def check_files(flist): + """Make sure all files in the input file list exist. + + Parameters + ---------- + flist : list + List of files (with paths) to check existance of. + """ + for f in flist: + # ignore files that are to be specified using pipeline utils + if 'PIPELINE' not in os.path.basename(f): + if os.path.exists(f) is False: + raise IOError('File does not exist: {}'.format(f))
+ +
[docs] def str_replace_and_resolve(self, d, str_rep): + """Perform a deep string replacement and path resolve in d. + + Parameters + ---------- + d : dict + Config dictionary potentially containing strings to replace + and/or paths to resolve. + str_rep : dict + Replacement mapping where keys are strings to search for and + values are the new values. + + Returns + ------- + d : dict + Config dictionary with updated strings. + """ + + if isinstance(d, dict): + # go through dict keys and values + for key, val in d.items(): + d[key] = self.str_replace_and_resolve(val, str_rep) + + elif isinstance(d, list): + # if the value is also a list, iterate through + for i, entry in enumerate(d): + d[i] = self.str_replace_and_resolve(entry, str_rep) + + elif isinstance(d, str): + # if val is a str, check to see if str replacements apply + for old_str, new in str_rep.items(): + # old_str is in the value, replace with new value + d = d.replace(old_str, new) + + # `resolve_path` is safe to call on any string, + # even if it is not a path + d = self.resolve_path(d) + + # return updated + return d
+ +
[docs] def set_self_dict(self, dictlike): + """Save a dict-like variable as object instance dictionary items. + + Parameters + ---------- + dictlike : dict + Python namespace object to set to this dictionary-emulating class. + """ + for key, val in dictlike.items(): + self[key] = val
+ +
[docs] def resolve_path(self, path): + """Resolve a file path represented by the input string. + + This function resolves the input string if it resembles a path. + Specifically, the string will be resolved if it starts with + "``./``" or "``..``", or it if it contains either "``./``" or + "``..``" somewhere in the string body. Otherwise, the string + is returned unchanged, so this function *is* safe to call on any + string, even ones that do not resemble a path. + + This method delegates the "resolving" logic to + :meth:`pathlib.Path.resolve`. This means the path is made + absolute, symlinks are resolved, and "``..``" components are + eliminated. If the ``path`` input starts with "``./``" or + "``..``", it is assumed to be w.r.t the config directory, *not* + the run directory. + + Parameters + ---------- + path : str + Input file path. + + Returns + ------- + str + The resolved path. + """ + + if path.startswith('./'): + path = (self.config_dir / Path(path[2:])) + elif path.startswith('..'): + path = (self.config_dir / Path(path)) + elif './' in path: # this covers both './' and '../' + path = Path(path) + + try: + path = path.resolve().as_posix() + except AttributeError: # `path` is still a `str` + pass + + return path
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/config/curtailment.html b/_modules/reV/config/curtailment.html new file mode 100644 index 000000000..4f1e2d6e4 --- /dev/null +++ b/_modules/reV/config/curtailment.html @@ -0,0 +1,801 @@ + + + + + + reV.config.curtailment — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.config.curtailment

+# -*- coding: utf-8 -*-
+"""
+reV config for curtailment inputs.
+
+Created on Mon Jan 28 11:43:27 2019
+
+@author: gbuster
+"""
+import logging
+from rex.utilities import check_eval_str
+from gaps.config import load_config
+from reV.config.base_config import BaseConfig
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Curtailment(BaseConfig): + """Config for generation curtailment.""" + + def __init__(self, curtailment_parameters): + """ + Parameters + ---------- + curtailment_parameters : str | dict + Configuration json file (with path) containing curtailment + information. Could also be a pre-extracted curtailment config + dictionary (the contents of the curtailment json). + """ + + if isinstance(curtailment_parameters, str): + # received json, extract to dictionary + curtailment_parameters = load_config(curtailment_parameters) + + # intialize config object with curtailment parameters + super().__init__(curtailment_parameters) + + @property + def wind_speed(self): + """Get the wind speed threshold below which curtailment is possible. + + Returns + ------- + _wind_speed : float | None + Wind speed threshold below which curtailment is possible. + """ + return self.get('wind_speed', None) + + @property + def dawn_dusk(self): + """Get the solar zenith angle that signifies dawn and dusk. + + Returns + ------- + _dawn_dusk : float + Solar zenith angle at dawn and dusk. Default is nautical, 12 + degrees below the horizon (sza=102). + """ + + # preset commonly used dawn/dusk values in solar zenith angles. + presets = {'nautical': 102.0, + 'astronomical': 108.0, + 'civil': 96.0} + + # set a default value + dd = presets['nautical'] + + if 'dawn_dusk' in self: + if isinstance(self['dawn_dusk'], str): + # Use a pre-set dawn/dusk + dd = presets[self['dawn_dusk']] + + if isinstance(self['dawn_dusk'], (int, float)): + # Use an explicit solar zenith angle + dd = float(self['dawn_dusk']) + + return dd + + @property + def months(self): + """Get the months during which curtailment is possible (inclusive). + This can be overridden by the date_range input. + + Returns + ------- + months : tuple | None + Tuple of month integers. These are the months during which + curtailment could be in effect. Default is None. + """ + m = self.get('months', None) + if isinstance(m, list): + m = tuple(m) + return m + + @property + def date_range(self): + """Get the date range tuple (start, end) over which curtailment is + possible (inclusive, exclusive) ("MMDD", "MMDD"). This overrides the + months input. + + Returns + ------- + date_range : tuple + Two-entry tuple of the starting date (inclusive) and ending date + (exclusive) over which curtailment is possible. Input format is a + zero-padded string: "MMDD". + """ + dr = self.get('date_range', None) + if dr is not None: + msg = 'date_range input needs to be a tuple!' + assert isinstance(dr, (list, tuple)), msg + msg = 'date_range input needs to have two entries!' + assert len(dr) == 2, msg + dr = (str(int(dr[0])).zfill(4), str(int(dr[1])).zfill(4)) + + return dr + + @property + def temperature(self): + """Get the temperature (C) over which curtailment is possible. + + Returns + ------- + temperature : float | NoneType + Temperature over which curtailment is possible. Defaults to None. + """ + return self.get('temperature', None) + + @property + def precipitation(self): + """Get the precip rate (mm/hour) under which curtailment is possible. + + Returns + ------- + precipitation : float | NoneType + Precipitation rate under which curtailment is possible. This is + compared to the WTK resource dataset "precipitationrate_0m" in + mm/hour. Defaults to None. + """ + return self.get('precipitation', None) + + @property + def equation(self): + """Get an equation-based curtailment scenario. + + Returns + ------- + equation : str + A python equation based on other curtailment variables (wind_speed, + temperature, precipitation_rate, solar_zenith_angle) that returns + a True or False output to signal curtailment. + """ + eq = self.get('equation', None) + if isinstance(eq, str): + check_eval_str(eq) + return eq + + @property + def probability(self): + """Get the probability that curtailment is in-effect if all other + screening criteria are met. + + Returns + ------- + probability : float + Fractional probability that curtailment is in-effect if all other + screening criteria are met. Defaults to 1 (curtailment is always + in effect if all other criteria are met). + """ + return float(self.get('probability', 1.0)) + + @property + def random_seed(self): + """ + Random seed to use for curtailment probability + + Returns + ------- + int + """ + return int(self.get('random_seed', 0))
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/config/execution.html b/_modules/reV/config/execution.html new file mode 100644 index 000000000..decb475be --- /dev/null +++ b/_modules/reV/config/execution.html @@ -0,0 +1,813 @@ + + + + + + reV.config.execution — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.config.execution

+# -*- coding: utf-8 -*-
+"""
+reV Configuration for Execution Options
+"""
+import logging
+
+from reV.config.base_config import BaseConfig
+from reV.utilities.exceptions import ConfigError
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class BaseExecutionConfig(BaseConfig): + """Base class to handle execution configuration""" + + def __init__(self, config_dict): + """ + Parameters + ---------- + config : str | dict + File path to config json (str), serialized json object (str), + or dictionary with pre-extracted config. + """ + + self._default_option = 'local' + self._default_nodes = 1 + self._default_mem_util_lim = 0.4 + super().__init__(config_dict) + + @property + def option(self): + """Get the hardware run option. + + Returns + ------- + option : str + Execution control option, e.g. local, peregrine, eagle... + """ + return str(self.get('option', self._default_option)).lower() + + @property + def nodes(self): + """Get the number of nodes property. + + Returns + ------- + nodes : int + Number of available nodes. Default is 1 node. + """ + return int(self.get('nodes', self._default_nodes)) + + @property + def max_workers(self): + """Get the max_workers property (1 runs in serial, None is all workers) + + Returns + ------- + max_workers : int | None + Processes per node. Default is None max_workers (all available). + """ + return self.get('max_workers', None) + + @property + def sites_per_worker(self): + """Get the number of sites to run per worker. + + Returns + ------- + sites_per_worker : int | None + Number of sites to run per worker in a parallel scheme. + """ + return self.get('sites_per_worker', None) + + @property + def memory_utilization_limit(self): + """Get the node memory utilization limit property. Key in the config + json is "memory_utilization_limit". + + Returns + ------- + mem_util_lim : float + Memory utilization limit (fractional). Key in the config json is + "memory_utilization_limit". + """ + mem_util_lim = self.get('memory_utilization_limit', + self._default_mem_util_lim) + + return mem_util_lim + + @property + def sh_script(self): + """Get the "sh_script" entry which is a string that contains extra + shell script commands to run before the reV commands. + + Returns + ------- + str + """ + return self.get('sh_script', '')
+ + +
[docs]class HPCConfig(BaseExecutionConfig): + """Class to handle HPC configuration inputs.""" + + @property + def allocation(self): + """Get the HPC allocation property. + + Returns + ------- + hpc_alloc : str + Name of the HPC allocation account for the specified job. + """ + + return self.get('allocation', None) + + @property + def feature(self): + """Get feature request str. + + Returns + ------- + feature : str | NoneType + Feature request string. For EAGLE, a full additional flag. + Config should look like: + ``"feature": "--depend=[state:job_id]"`` + """ + return self.get('feature', None) + + @property + def module(self): + """ + Get module to load if given + + Returns + ------- + module : str + Module to load on node + """ + return self.get('module', None) + + @property + def conda_env(self): + """ + Get conda environment to activate + + Returns + ------- + conda_env : str + Conda environment to activate + """ + return self.get('conda_env', None)
+ + +
[docs]class SlurmConfig(HPCConfig): + """Class to handle SLURM (Eagle) configuration inputs.""" + + def _preflight(self): + """Run a preflight check on the config.""" + if self.option in {'eagle', 'kestrel', 'slurm'}: + if self.allocation is None: + msg = 'HPC execution config must have an "allocation" input' + logger.error(msg) + raise ConfigError(msg) + if self.walltime is None: + msg = 'HPC execution config must have a "walltime" input' + logger.error(msg) + raise ConfigError(msg) + + super()._preflight() + + @property + def memory(self): + """Get the requested Eagle node "memory" value in GB or can be None. + + Returns + ------- + _hpc_node_mem : int | None + Requested node memory in GB. + """ + return self.get('memory', None) + + @property + def walltime(self): + """Get the requested Eagle node "walltime" value. + + Returns + ------- + _hpc_walltime : int + Requested single node job time in hours. + """ + return self.get('walltime', None)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/config/output_request.html b/_modules/reV/config/output_request.html new file mode 100644 index 000000000..9e856890b --- /dev/null +++ b/_modules/reV/config/output_request.html @@ -0,0 +1,725 @@ + + + + + + reV.config.output_request — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.config.output_request

+# -*- coding: utf-8 -*-
+"""Output request config to handle user output requests.
+
+This module will allow for aliases and fix some typos.
+
+Created on Mon Jul  8 09:37:23 2019
+
+@author: gbuster
+"""
+import logging
+from warnings import warn
+from reV.utilities.exceptions import ConfigWarning
+
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class OutputRequest(list): + """Base output request list framework with request key correction logic.""" + + # map of commonly expected typos. + # keys are typos, values are correct var names + # all available output variables should be in the values + CORRECTIONS = {} + + def __init__(self, inp): + """ + Parameters + ---------- + inp : list | tuple | str + List of requested reV output variables. + """ + + if isinstance(inp, str): + inp = [inp] + + for request in inp: + if request in self.CORRECTIONS.values(): + self.append(request) + elif request in self.CORRECTIONS.keys(): + self.append(self.CORRECTIONS[request]) + msg = ('Correcting output request "{}" to "{}".' + .format(request, self.CORRECTIONS[request])) + logger.warning(msg) + warn(msg, ConfigWarning) + else: + self.append(request) + logger.debug('Did not recognize requested output variable ' + '"{}". Passing forward, but this may cause a ' + 'downstream error. Available known output ' + 'variables are: {}' + .format(request, + list(set(self.CORRECTIONS.values()))))
+ + +
[docs]class SAMOutputRequest(OutputRequest): + """SAM output request framework.""" + + # map of commonly expected typos. + # keys are typos, values are correct SAM var names + # all available SAM output variables should be in the values + CORRECTIONS = {'cf_means': 'cf_mean', + 'cf': 'cf_mean', + 'capacity_factor': 'cf_mean', + 'capacityfactor': 'cf_mean', + 'cf_profiles': 'cf_profile', + 'profiles': 'cf_profile', + 'profile': 'cf_profile', + 'dni_means': 'dni_mean', + 'ghi_means': 'ghi_mean', + 'ws_means': 'ws_mean', + 'generation': 'annual_energy', + 'yield': 'energy_yield', + 'generation_profile': 'gen_profile', + 'generation_profiles': 'gen_profile', + 'plane_of_array': 'poa', + 'plane_of_array_irradiance': 'poa', + 'gen_profiles': 'gen_profile', + 'lcoe': 'lcoe_fcr', + 'foc': 'fixed_operating_cost', + 'voc': 'variable_operating_cost', + 'fcr': 'fixed_charge_rate', + 'cc': 'capital_cost', + 'lcoe_nominal': 'lcoe_nom', + 'real_lcoe': 'lcoe_real', + 'net_present_value': 'project_return_aftertax_npv', + 'npv': 'project_return_aftertax_npv', + 'ppa': 'ppa_price', + 'single_owner': 'ppa_price', + 'singleowner': 'ppa_price', + 'actual_irr': 'flip_actual_irr', + 'irr': 'flip_actual_irr', + 'cf_total_revenue': 'gross_revenue', + 'total_cost': 'total_installed_cost', + 'turbine': 'turbine_cost', + 'sales_tax': 'sales_tax_cost', + 'bos': 'bos_cost', + 'albedo': 'surface_albedo', + 'ac_power': 'ac', + 'dc_power': 'dc', + 'clipping': 'clipped_power', + 'clipped': 'clipped_power', + 'clip': 'clipped_power' + }
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/config/project_points.html b/_modules/reV/config/project_points.html new file mode 100644 index 000000000..e2fb6f80a --- /dev/null +++ b/_modules/reV/config/project_points.html @@ -0,0 +1,1641 @@ + + + + + + reV.config.project_points — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.config.project_points

+# -*- coding: utf-8 -*-
+"""
+reV Project Points Configuration
+"""
+import copy
+import logging
+import numpy as np
+import os
+import pandas as pd
+from warnings import warn
+
+from reV.config.curtailment import Curtailment
+from reV.config.sam_config import SAMConfig
+from reV.utilities.exceptions import ConfigError, ConfigWarning
+
+from rex.resource import Resource
+from rex.multi_file_resource import MultiFileResource
+from rex.resource_extraction.resource_extraction import (ResourceX,
+                                                         MultiFileResourceX)
+from rex.utilities import check_res_file, parse_table
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class PointsControl: + """Class to manage and split ProjectPoints.""" + def __init__(self, project_points, sites_per_split=100): + """ + Parameters + ---------- + project_points : reV.config.ProjectPoints + ProjectPoints instance to be split between execution workers. + sites_per_split : int + Sites per project points split instance returned in the __next__ + iterator function. + """ + + self._project_points = project_points + self._sites_per_split = sites_per_split + self._split_range = [] + self._i = 0 + self._iter_list = [] + + def __iter__(self): + """Initialize the iterator by pre-splitting into a list attribute.""" + last_site = 0 + ilim = len(self.project_points) + + logger.debug('PointsControl iterator initializing with sites ' + '{} through {}'.format(self.project_points.sites[0], + self.project_points.sites[-1])) + + # pre-initialize all iter objects + while True: + i0 = last_site + i1 = np.min([i0 + self.sites_per_split, ilim]) + if i0 == i1: + break + + last_site = i1 + + new = self.split(i0, i1, self.project_points, + sites_per_split=self.sites_per_split) + new._split_range = [i0, i1] + self._iter_list.append(new) + + logger.debug('PointsControl stopped iteration at attempted ' + 'index of {}. Length of iterator is: {}' + .format(i1, len(self))) + return self + + def __next__(self): + """Iterate through and return next site resource data. + + Returns + ------- + next_pc : config.PointsControl + Split instance of this class with a subset of project points based + on the number of sites per split. + """ + if self._i < self.N: + # Get next PointsControl from the iter list + next_pc = self._iter_list[self._i] + else: + # No more points controllers left in initialized list + raise StopIteration + + logger.debug('PointsControl passing site project points ' + 'with indices {} to {} on iteration #{} ' + .format(next_pc.split_range[0], + next_pc.split_range[1], self._i)) + self._i += 1 + return next_pc + + def __repr__(self): + msg = ("{} with {} sites from gid {} through {}" + .format(self.__class__.__name__, len(self.project_points), + self.sites[0], self.sites[-1])) + return msg + + def __len__(self): + """Len is the number of possible iterations aka splits.""" + return int(np.ceil(len(self.project_points) / self.sites_per_split)) + + @property + def N(self): + """ + Length of current iterator list + + Returns + ------- + N : int + Number of iterators in list + """ + return len(self._iter_list) + + @property + def sites_per_split(self): + """Get the iterator increment: number of sites per split. + + Returns + ------- + _sites_per_split : int + Sites per split iter object. + """ + return self._sites_per_split + + @property + def project_points(self): + """Get the project points property. + + Returns + ------- + _project_points : reV.config.project_points.ProjectPoints + ProjectPoints instance corresponding to this PointsControl + instance. + """ + return self._project_points + + @property + def sites(self): + """Get the project points sites for this instance. + + Returns + ------- + sites : list + List of sites belonging to the _project_points attribute. + """ + return self._project_points.sites + + @property + def split_range(self): + """Get the current split range property. + + Returns + ------- + _split_range : list + Two-entry list that indicates the starting and finishing + (inclusive, exclusive, respectively) indices of a split instance + of the PointsControl object. This is set in the iterator dunder + methods of PointsControl. + """ + return self._split_range + +
[docs] @classmethod + def split(cls, i0, i1, project_points, sites_per_split=100): + """Split this execution by splitting the project points attribute. + + Parameters + ---------- + i0/i1 : int + Beginning/end (inclusive/exclusive, respectively) index split + parameters for ProjectPoints.split() method. + project_points : reV.config.ProjectPoints + Project points instance that will be split. + sites_per_split : int + Sites per project points split instance returned in the __next__ + iterator function. + + Returns + ------- + sub : PointsControl + New instance of PointsControl with a subset of the original + project points. + """ + i0 = int(i0) + i1 = int(i1) + new_points = ProjectPoints.split(i0, i1, project_points) + sub = cls(new_points, sites_per_split=sites_per_split) + return sub
+ + +
[docs]class ProjectPoints: + """Class to manage site and SAM input configuration requests. + + Examples + -------- + >>> import os + >>> from reV import TESTDATADIR + >>> from reV.config.project_points import ProjectPoints + >>> + >>> points = slice(0, 100) + >>> sam_file = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json') + >>> pp = ProjectPoints(points, sam_file) + >>> + >>> config_id_site0, SAM_config_dict_site0 = pp[0] + >>> site_list_or_slice = pp.sites + >>> site_list_or_slice = pp.get_sites_from_config(config_id) + >>> ProjectPoints_sub = pp.split(0, 10, project_points) + >>> h_list = pp.h + """ + + def __init__(self, points, sam_configs, tech=None, res_file=None, + curtailment=None): + """ + Parameters + ---------- + points : int | slice | list | tuple | str | pd.DataFrame | dict + Slice specifying project points, string pointing to a project + points csv, or a dataframe containing the effective csv contents. + Can also be a single integer site value. + sam_configs : dict | str | SAMConfig + SAM input configuration ID(s) and file path(s). Keys are the SAM + config ID(s) which map to the config column in the project points + CSV. Values are either a JSON SAM config file or dictionary of SAM + config inputs. Can also be a single config file path or a + pre loaded SAMConfig object. + tech : str, optional + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam) + The string should be lower-cased with spaces and _ removed, + by default None + res_file : str | NoneType + Optional resource file to find maximum length of project points if + points slice stop is None. + curtailment : NoneType | dict | str | config.curtailment.Curtailment + Inputs for curtailment parameters. If not None, curtailment inputs + are expected. Can be: + + - Explicit namespace of curtailment variables (dict) + - Pointer to curtailment config json file with path (str) + - Instance of curtailment config object + (config.curtailment.Curtailment) + + """ + # set protected attributes + self._df = self._parse_points(points, res_file=res_file) + self._sam_config_obj = self._parse_sam_config(sam_configs) + self._check_points_config_mapping() + self._tech = str(tech) + self._h = self._d = None + self._curtailment = self._parse_curtailment(curtailment) + + def __getitem__(self, site): + """Get the SAM config ID and dictionary for the requested site. + + Parameters + ---------- + site : int | str + Site number (gid) of interest (typically the resource gid). + + Returns + ------- + config_id : str + Configuration ID (variable name) specified in the sam_generation + config section. + config : dict + Actual SAM input values in a single level dictionary with variable + names (keys) and values. + """ + + site_bool = (self.df['gid'] == site) + try: + config_id = self.df.loc[site_bool, 'config'].values[0] + except (KeyError, IndexError) as ex: + msg = ('Site {} not found in this instance of ' + 'ProjectPoints. Available sites include: {}' + .format(site, self.sites)) + logger.exception(msg) + raise KeyError(msg) from ex + + return config_id, copy.deepcopy(self.sam_inputs[config_id]) + + def __repr__(self): + msg = ("{} with {} sites from gid {} through {}" + .format(self.__class__.__name__, len(self), + self.sites[0], self.sites[-1])) + return msg + + def __len__(self): + """Length of this object is the number of sites.""" + return len(self.sites) + + @property + def df(self): + """Get the project points dataframe property. + + Returns + ------- + _df : pd.DataFrame + Table of sites and corresponding SAM configuration IDs. + Has columns 'gid' and 'config'. + """ + return self._df + + @property + def sam_config_ids(self): + """Get the SAM configs dictionary property. + + Returns + ------- + dict + Multi-level dictionary containing multiple SAM input config files. + The top level key is the SAM config ID, top level value is the SAM + config file path + """ + return sorted(self._sam_config_obj) + + @property + def sam_config_obj(self): + """Get the SAM config object. + + Returns + ------- + _sam_config_obj : reV.config.sam_config.SAMConfig + SAM configuration object. + """ + return self._sam_config_obj + + @property + def sam_inputs(self): + """Get the SAM configuration inputs dictionary property. + + Returns + ------- + dict + Multi-level dictionary containing multiple SAM input + configurations. The top level key is the SAM config ID, top level + value is the SAM config. Each SAM config is a dictionary with keys + equal to input names, values equal to the actual inputs. + """ + return self.sam_config_obj.inputs + + @property + def all_sam_input_keys(self): + """Get a list of unique input keys from all SAM technology configs. + + Returns + ------- + all_sam_input_keys : list + List of unique strings where each string is a input key for the + SAM technology configs. For example, "gcr" or "losses" for PVWatts + or "wind_turbine_hub_ht" for windpower. + """ + keys = [] + for sam_config in self.sam_inputs.values(): + keys += list(sam_config.keys()) + + keys = list(set(keys)) + + return keys + + @property + def gids(self): + """Get the list of gids (resource file index values) belonging to this + instance of ProjectPoints. This is an alias of self.sites. + + Returns + ------- + gids : list + List of integer gids (resource file index values) belonging to this + instance of ProjectPoints. This is an alias of self.sites. + """ + return self.sites + + @property + def sites(self): + """Get the list of sites (resource file gids) belonging to this + instance of ProjectPoints. + + Returns + ------- + sites : list + List of integer sites (resource file gids) belonging to this + instance of ProjectPoints. + """ + return self.df['gid'].values.tolist() + + @property + def sites_as_slice(self): + """Get the sites in slice format. + + Returns + ------- + sites_as_slice : list | slice + Sites slice belonging to this instance of ProjectPoints. + The type is slice if possible. Will be a list only if sites are + non-sequential. + """ + # try_slice is what the sites list would be if it is sequential + if len(self.sites) > 1: + try_step = self.sites[1] - self.sites[0] + else: + try_step = 1 + try_slice = slice(self.sites[0], self.sites[-1] + 1, try_step) + try_list = list(range(*try_slice.indices(try_slice.stop))) + + if self.sites == try_list: + # try_slice is equivelant to the site list + sites_as_slice = try_slice + else: + # cannot be converted to a sequential slice, return list + sites_as_slice = self.sites + + return sites_as_slice + + @property + def tech(self): + """Get the tech property from the config. + + Returns + ------- + _tech : str + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam) + The string should be lower-cased with spaces and _ removed. + """ + return 'windpower' if 'wind' in self._tech.lower() else self._tech + + @property + def h(self): + """Get the hub heights corresponding to the site list. + + Returns + ------- + _h : list | NoneType + Hub heights corresponding to each site, taken from the sam config + for each site. This is None if the technology is not wind. + """ + h_var = 'wind_turbine_hub_ht' + if self._h is None: + if 'wind' in self.tech: + # wind technology, get a list of h values + self._h = [self[site][1][h_var] for site in self.sites] + + return self._h + + @property + def d(self): + """Get the depths (m) corresponding to the site list. + + Returns + ------- + _d : list | NoneType + Resource depths (m) corresponding to each site, taken from + the sam config for each site. This is None if the technology + is not geothermal. + """ + d_var = 'resource_depth' + if self._d is None: + if 'geothermal' in self.tech: + if d_var in self.df: + self._d = list(self.df[d_var]) + else: + self._d = [self[site][1][d_var] for site in self.sites] + + return self._d + + @property + def curtailment(self): + """Get the curtailment config object. + + Returns + ------- + _curtailment : NoneType | reV.config.curtailment.Curtailment + None if no curtailment, reV curtailment config object if + curtailment is being assessed. + """ + return self._curtailment + + @staticmethod + def _parse_csv(fname): + """Import project points from .csv + + Parameters + ---------- + fname : str + Project points .csv file (with path). Must have 'gid' and 'config' + column names. + + Returns + ------- + df : pd.DataFrame + DataFrame mapping sites (gids) to SAM technology (config) + """ + fname = fname.strip() + if fname.endswith('.csv'): + df = pd.read_csv(fname) + else: + raise ValueError('Config project points file must be ' + '.csv, but received: {}' + .format(fname)) + + return df + + @staticmethod + def _parse_sites(points, res_file=None): + """Parse project points from list or slice + + Parameters + ---------- + points : int | str | pd.DataFrame | slice | list + Slice specifying project points, string pointing to a project + points csv, or a dataframe containing the effective csv contents. + Can also be a single integer site value. + res_file : str | NoneType + Optional resource file to find maximum length of project points if + points slice stop is None. + + Returns + ------- + df : pd.DataFrame + DataFrame mapping sites (gids) to SAM technology (config) + """ + df = pd.DataFrame(columns=['gid', 'config']) + if isinstance(points, int): + points = [points] + if isinstance(points, (list, tuple, np.ndarray)): + # explicit site list, set directly + if any(isinstance(i, (list, tuple, np.ndarray)) for i in points): + msg = "Provided project points is not flat: {}!".format(points) + logger.error(msg) + raise RuntimeError(msg) + + df['gid'] = points + elif isinstance(points, slice): + stop = points.stop + if stop is None: + if res_file is None: + raise ValueError('Must supply a resource file if ' + 'points is a slice of type ' + ' slice(*, None, *)') + + multi_h5_res, _ = check_res_file(res_file) + if multi_h5_res: + stop = MultiFileResource(res_file).shape[1] + else: + stop = Resource(res_file).shape[1] + + df['gid'] = list(range(*points.indices(stop))) + else: + raise TypeError('Project Points sites needs to be set as a list, ' + 'tuple, or slice, but was set as: {}' + .format(type(points))) + + df['config'] = None + + return df + + @classmethod + def _parse_points(cls, points, res_file=None): + """Generate the project points df from inputs + + Parameters + ---------- + points : int | str | pd.DataFrame | slice | list | dict + Slice specifying project points, string pointing to a project + points csv, or a dataframe containing the effective csv contents. + Can also be a single integer site value. + res_file : str | NoneType + Optional resource file to find maximum length of project points if + points slice stop is None. + + Returns + ------- + df : pd.DataFrame + DataFrame mapping sites (gids) to SAM technology (config) + """ + if isinstance(points, str): + df = cls._parse_csv(points) + elif isinstance(points, dict): + df = pd.DataFrame(points) + elif isinstance(points, (int, slice, list, tuple, np.ndarray)): + df = cls._parse_sites(points, res_file=res_file) + elif isinstance(points, pd.DataFrame): + df = points + else: + raise ValueError('Cannot parse Project points data from {}' + .format(type(points))) + + if 'gid' not in df.columns: + raise KeyError('Project points data must contain "gid" column.') + + # pylint: disable=no-member + if 'config' not in df.columns: + df = cls._parse_sites(points["gid"].values, res_file=res_file) + + gids = df['gid'].values + if not np.array_equal(np.sort(gids), gids): + msg = ('WARNING: points are not in sequential order and will be ' + 'sorted! The original order is being preserved under ' + 'column "points_order"') + logger.warning(msg) + warn(msg) + df['points_order'] = df.index.values + df = df.sort_values('gid').reset_index(drop=True) + + return df + + @staticmethod + def _parse_sam_config(sam_config): + """ + Create SAM files dictionary. + + Parameters + ---------- + sam_config : dict | str | SAMConfig + SAM input configuration ID(s) and file path(s) or SAM config + dict(s). Keys are the SAM config ID(s). Can also be a single + config file str. Can also be a pre loaded SAMConfig object. + + Returns + ------- + _sam_config_obj : reV.config.sam_config.SAMConfig + SAM configuration object. + """ + + if isinstance(sam_config, SAMConfig): + return sam_config + + else: + if isinstance(sam_config, dict): + config_dict = sam_config + elif isinstance(sam_config, str): + config_dict = {sam_config: sam_config} + else: + raise ValueError('Cannot parse SAM configs from {}' + .format(type(sam_config))) + + return SAMConfig(config_dict) + + @staticmethod + def _parse_curtailment(curtailment_input): + """Parse curtailment config object. + + Parameters + ---------- + curtailment_input : None | dict | str | config.curtailment.Curtailment + Inputs for curtailment parameters. If not None, curtailment inputs + are expected. Can be: + - Explicit namespace of curtailment variables (dict) + - Pointer to curtailment config json file with path (str) + - Instance of curtailment config object + (config.curtailment.Curtailment) + + Returns + ------- + curtailments : NoneType | reV.config.curtailment.Curtailment + None if no curtailment, reV curtailment config object if + curtailment is being assessed. + """ + if isinstance(curtailment_input, (str, dict)): + # pointer to config file or explicit input namespace, + # instantiate curtailment config object + curtailment = Curtailment(curtailment_input) + + elif isinstance(curtailment_input, (Curtailment, type(None))): + # pre-initialized curtailment object or no curtailment (None) + curtailment = curtailment_input + + else: + curtailment = None + warn('Curtailment inputs not recognized. Received curtailment ' + 'input of type: "{}". Expected None, dict, str, or ' + 'Curtailment object. Defaulting to no curtailment.', + ConfigWarning) + + return curtailment + +
[docs] def index(self, gid): + """Get the index location (iloc not loc) for a resource gid found in + the project points. + + Parameters + ---------- + gid : int + Resource GID found in the project points gid column. + + Returns + ------- + ind : int + Row index of gid in the project points dataframe. + """ + if gid not in self._df['gid'].values: + e = ('Requested resource gid {} is not present in the project ' + 'points dataframe. Cannot return row index.'.format(gid)) + logger.error(e) + raise ConfigError(e) + + ind = np.where(self._df['gid'] == gid)[0][0] + + return ind
+ + def _check_points_config_mapping(self): + """ + Check to ensure the project points (df) and SAM configs + (sam_config_obj) are compatible. Update as necessary or break + """ + # Extract unique config refences from project_points DataFrame + df_configs = self.df['config'].unique() + sam_configs = self.sam_inputs + + # Checks to make sure that the same number of SAM config files + # as references in project_points DataFrame + if len(df_configs) > len(sam_configs): + msg = ('Points references {} configs while only ' + '{} SAM configs were provided!' + .format(len(df_configs), len(sam_configs))) + logger.error(msg) + raise ConfigError(msg) + + if len(df_configs) == 1 and df_configs[0] is None: + self._df['config'] = list(sam_configs)[0] + df_configs = self.df['config'].unique() + + # Check to see if config references in project_points DataFrame + # are valid file paths, if compare with SAM configs + # and update as needed + configs = {} + for config in df_configs: + if os.path.isfile(config): + configs[config] = config + elif config in sam_configs: + configs[config] = sam_configs[config] + else: + msg = ('{} does not map to a valid configuration file' + .format(config)) + logger.error(msg) + raise ConfigError(msg) + + # If configs has any keys that are not in sam_configs then + # something really weird happened so raise an error. + if any(set(configs) - set(sam_configs)): + msg = ('A wild config has appeared! Requested config keys for ' + 'ProjectPoints are {} and previous config keys are {}' + .format(list(configs), list(sam_configs))) + logger.error(msg) + raise ConfigError(msg) + +
[docs] def join_df(self, df2, key='gid'): + """Join new df2 to the _df attribute using the _df's gid as pkey. + + This can be used to add site-specific data to the project_points, + taking advantage of the points_control iterator/split functions such + that only the relevant site data is passed to the analysis functions. + + Parameters + ---------- + df2 : pd.DataFrame + Dataframe to be joined to the self._df attribute (this instance + of project points dataframe). This likely contains + site-specific inputs that are to be passed to parallel workers. + key : str + Primary key of df2 to be joined to the _df attribute (this + instance of the project points dataframe). Primary key + of the self._df attribute is fixed as the gid column. + """ + # ensure df2 doesnt have any duplicate columns for suffix reasons. + df2_cols = [c for c in df2.columns if c not in self._df or c == key] + self._df = pd.merge(self._df, df2[df2_cols], how='left', left_on='gid', + right_on=key, copy=False, validate='1:1')
+ +
[docs] def get_sites_from_config(self, config): + """Get a site list that corresponds to a config key. + + Parameters + ---------- + config : str + SAM configuration ID associated with sites. + + Returns + ------- + sites : list + List of sites associated with the requested configuration ID. If + the configuration ID is not recognized, an empty list is returned. + """ + sites = self.df.loc[(self.df['config'] == config), 'gid'].values + + return list(sites)
+ +
[docs] @classmethod + def split(cls, i0, i1, project_points): + """Return split instance of a ProjectPoints instance w/ site subset. + + Parameters + ---------- + i0 : int + Starting INDEX (not resource gid) (inclusive) of the site property + attribute to include in the split instance. This is not necessarily + the same as the starting site number, for instance if ProjectPoints + is sites 20:100, i0=0 i1=10 will result in sites 20:30. + i1 : int + Ending INDEX (not resource gid) (exclusive) of the site property + attribute to include in the split instance. This is not necessarily + the same as the final site number, for instance if ProjectPoints is + sites 20:100, i0=0 i1=10 will result in sites 20:30. + project_points: ProjectPoints + Instance of project points to split. + + Returns + ------- + sub : ProjectPoints + New instance of ProjectPoints with a subset of the following + attributes: sites, project points df, and the self dictionary data + struct. + """ + # Extract DF subset with only index values between i0 and i1 + n = len(project_points) + if i0 > n or i1 > n: + raise ValueError('{} and {} must be within the range of ' + 'project_points (0 - {})'.format(i0, i1, n - 1)) + + points_df = project_points.df.iloc[i0:i1] + + # make a new instance of ProjectPoints with subset DF + sub = cls(points_df, + project_points.sam_config_obj, + project_points.tech, + curtailment=project_points.curtailment) + + return sub
+ + @staticmethod + def _parse_lat_lons(lat_lons): + msg = ('Expecting a pair or multiple pairs of latitude and ' + 'longitude coordinates!') + if isinstance(lat_lons, str): + lat_lons = parse_table(lat_lons) + cols = [c for c in lat_lons + if c.lower().startswith(('lat', 'lon'))] + lat_lons = lat_lons[sorted(cols)].values + elif isinstance(lat_lons, (list, tuple)): + lat_lons = np.array(lat_lons) + elif isinstance(lat_lons, (int, float)): + msg += ' Recieved a single coordinate value!' + logger.error(msg) + raise ValueError(msg) + + if len(lat_lons.shape) == 1: + lat_lons = np.expand_dims(lat_lons, axis=0) + + if lat_lons.shape[1] != 2: + msg += ' Received {} coordinate values!'.format(lat_lons.shape[1]) + logger.error(msg) + raise ValueError(msg) + + return lat_lons + +
[docs] @classmethod + def lat_lon_coords(cls, lat_lons, res_file, sam_configs, tech=None, + curtailment=None): + """ + Generate ProjectPoints for gids nearest to given latitude longitudes + + Parameters + ---------- + lat_lons : str | tuple | list | ndarray + Pair or pairs of latitude longitude coordinates + res_file : str + Resource file, needed to fine nearest neighbors + sam_configs : dict | str | SAMConfig + SAM input configuration ID(s) and file path(s). Keys are the SAM + config ID(s) which map to the config column in the project points + CSV. Values are either a JSON SAM config file or dictionary of SAM + config inputs. Can also be a single config file path or a + pre loaded SAMConfig object. + tech : str, optional + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam) + The string should be lower-cased with spaces and _ removed, + by default None + curtailment : NoneType | dict | str | config.curtailment.Curtailment + Inputs for curtailment parameters. If not None, curtailment inputs + are expected. Can be: + + - Explicit namespace of curtailment variables (dict) + - Pointer to curtailment config json file with path (str) + - Instance of curtailment config object + (config.curtailment.Curtailment) + + + Returns + ------- + pp : ProjectPoints + Initialized ProjectPoints object for points nearest to given + lat_lons + """ + lat_lons = cls._parse_lat_lons(lat_lons) + + multi_h5_res, hsds = check_res_file(res_file) + if multi_h5_res: + res_cls = MultiFileResourceX + res_kwargs = {} + else: + res_cls = ResourceX + res_kwargs = {'hsds': hsds} + + logger.info('Converting latitude longitude coordinates into nearest ' + 'ProjectPoints') + logger.debug('- (lat, lon) pairs:\n{}'.format(lat_lons)) + with res_cls(res_file, **res_kwargs) as f: + gids = f.lat_lon_gid(lat_lons) # pylint: disable=no-member + + if isinstance(gids, int): + gids = [gids] + else: + if len(gids) != len(np.unique(gids)): + uniques, pos, counts = np.unique(gids, return_counts=True, + return_inverse=True) + duplicates = {} + for idx in np.where(counts > 1)[0]: + duplicate_lat_lons = lat_lons[np.where(pos == idx)[0]] + duplicates[uniques[idx]] = duplicate_lat_lons + + msg = ('reV Cannot currently handle duplicate Resource gids! ' + 'The given latitude and longitudes map to the same ' + 'gids:\n{}'.format(duplicates)) + logger.error(msg) + raise RuntimeError(msg) + + gids = gids.tolist() + + logger.debug('- Resource gids:\n{}'.format(gids)) + + pp = cls(gids, sam_configs, tech=tech, res_file=res_file, + curtailment=curtailment) + + if 'points_order' in pp.df: + lat_lons = lat_lons[pp.df['points_order'].values] + + pp._df['latitude'] = lat_lons[:, 0] + pp._df['longitude'] = lat_lons[:, 1] + + return pp
+ +
[docs] @classmethod + def regions(cls, regions, res_file, sam_configs, tech=None, + curtailment=None): + """ + Generate ProjectPoints for gids nearest to given latitude longitudes + + Parameters + ---------- + regions : dict + Dictionary of regions to extract points for in the form: + {'region': 'region_column'} + res_file : str + Resource file, needed to fine nearest neighbors + sam_configs : dict | str | SAMConfig + SAM input configuration ID(s) and file path(s). Keys are the SAM + config ID(s) which map to the config column in the project points + CSV. Values are either a JSON SAM config file or dictionary of SAM + config inputs. Can also be a single config file path or a + pre loaded SAMConfig object. + tech : str, optional + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam) + The string should be lower-cased with spaces and _ removed, + by default None + curtailment : NoneType | dict | str | config.curtailment.Curtailment + Inputs for curtailment parameters. If not None, curtailment inputs + are expected. Can be: + + - Explicit namespace of curtailment variables (dict) + - Pointer to curtailment config json file with path (str) + - Instance of curtailment config object + (config.curtailment.Curtailment) + + + Returns + ------- + pp : ProjectPoints + Initialized ProjectPoints object for points nearest to given + lat_lons + """ + multi_h5_res, hsds = check_res_file(res_file) + if multi_h5_res: + res_cls = MultiFileResourceX + else: + res_cls = ResourceX + + logger.info('Extracting ProjectPoints for desired regions') + points = [] + with res_cls(res_file, hsds=hsds) as f: + meta = f.meta + for region, region_col in regions.items(): + logger.debug('- {}: {}'.format(region_col, region)) + # pylint: disable=no-member + gids = f.region_gids(region, region_col=region_col) + logger.debug('- Resource gids:\n{}'.format(gids)) + if points: + duplicates = np.intersect1d(gids, points).tolist() + if duplicates: + msg = ('reV Cannot currently handle duplicate ' + 'Resource gids! The given regions containg the ' + 'same gids:\n{}'.format(duplicates)) + logger.error(msg) + raise RuntimeError(msg) + + points.extend(gids.tolist()) + + pp = cls(points, sam_configs, tech=tech, res_file=res_file, + curtailment=curtailment) + + meta = meta.loc[pp.sites] + cols = list(set(regions.values())) + for c in cols: + pp._df[c] = meta[c].values + + return pp
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/config/sam_config.html b/_modules/reV/config/sam_config.html new file mode 100644 index 000000000..d01e48697 --- /dev/null +++ b/_modules/reV/config/sam_config.html @@ -0,0 +1,858 @@ + + + + + + reV.config.sam_config — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.config.sam_config

+# -*- coding: utf-8 -*-
+"""
+reV configuration framework for SAM config inputs.
+"""
+import logging
+import os
+from warnings import warn
+
+from gaps.config import load_config
+
+from reV.utilities.exceptions import SAMInputError, SAMInputWarning
+from reV.config.base_config import BaseConfig
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SAMConfig(BaseConfig): + """Class to handle the SAM section of config input.""" + + def __init__(self, SAM_configs): + """ + Parameters + ---------- + SAM_configs : dict + Keys are config ID's, values are filepaths to the SAM configs. + """ + super().__init__(SAM_configs, check_keys=False) + self._clearsky = None + self._bifacial = None + self._icing = None + self._inputs = None + self._downscale = None + self._time_index_step = None + + @property + def clearsky(self): + """Get a boolean for whether solar resource requires clearsky irrad. + + Returns + ------- + clearsky : bool + Flag set in the SAM config input with key "clearsky" for solar + analysis to process generation for clearsky irradiance. + Defaults to False (normal all-sky irradiance). + """ + if self._clearsky is None: + self._clearsky = False + for v in self.inputs.values(): + self._clearsky = any((self._clearsky, + bool(v.get('clearsky', False)))) + + if self._clearsky: + logger.debug('Solar analysis being performed on clearsky ' + 'irradiance.') + + return self._clearsky + + @property + def bifacial(self): + """Get a boolean for whether bifacial solar analysis is being run. + + Returns + ------- + bifacial : bool + Flag set in the SAM config input with key "bifaciality" for solar + analysis to analyze bifacial PV panels. Will require albedo input. + Defaults to False (no bifacial panels is default). + """ + if self._bifacial is None: + self._bifacial = False + for v in self.inputs.values(): + bi_flags = ('bifaciality', 'spe_is_bifacial', + 'cec_is_bifacial', '6par_is_bifacial') + bi_bools = [bool(v.get(flag, 0)) for flag in bi_flags] + self._bifacial = any(bi_bools + [self._bifacial]) + + return self._bifacial + + @property + def icing(self): + """Get a boolean for whether wind generation is considering icing. + + Returns + ------- + _icing : bool + Flag for whether wind generation is considering icing effects. + Based on whether SAM input json has "en_icing_cutoff" == 1. + """ + if self._icing is None: + self._icing = False + for v in self.inputs.values(): + self._icing = any((self._icing, + bool(v.get('en_icing_cutoff', False)))) + + if self._icing: + logger.debug('Icing analysis active for wind gen.') + + return self._icing + + @property + def time_index_step(self): + """ + Step size for time_index for SAM profile output resolution + + Returns + ------- + int | None + Step size for time_index, used to reduce temporal resolution + """ + if self._time_index_step is None: + time_index_step = [] + for v in self.inputs.values(): + time_index_step.append(v.get('time_index_step', None)) + + self._time_index_step = list(set(time_index_step)) + + if len(self._time_index_step) > 1: + msg = ('Expecting a single unique value for "time_index_step" but ' + 'received: {}'.format(self._time_index_step)) + logger.error(msg) + raise SAMInputError(msg) + + return self._time_index_step[0] + + @property + def downscale(self): + """ + Resolution to downscale NSRDB resource to. + + Returns + ------- + dict | None + Option for NSRDB resource downscaling to higher temporal + resolution. The config expects a str entry in the Pandas + frequency format, e.g. '5min' or a dict of downscaling kwargs + such as {'frequency': '5min', 'variability_kwargs': + {'var_frac': 0.05, 'distribution': 'uniform'}}. + A str entry will be converted to a kwarg dict for the output + of this property e.g. '5min' -> {'frequency': '5min'} + """ + if self._downscale is None: + ds_list = [] + for v in self.inputs.values(): + ds_list.append(v.get('downscale', None)) + + self._downscale = ds_list[0] + ds_list = list({str(x) for x in ds_list}) + + if len(ds_list) > 1: + msg = ('Expecting a single unique value for "downscale" but ' + 'received: {}'.format(ds_list)) + logger.error(msg) + raise SAMInputError(msg) + + if isinstance(self._downscale, str): + self._downscale = {'frequency': self._downscale} + + return self._downscale + + @property + def inputs(self): + """Get the SAM input file(s) (JSON/JSON5/YAML/TOML) and return + as a dictionary. + + Parameters + ---------- + _inputs : dict + The keys of this dictionary are the "configuration ID's". + The values are the imported json SAM input dictionaries. + """ + if self._inputs is None: + self._inputs = {} + for key, config in self.items(): + # key is ID (i.e. sam_param_0) that matches project points json + # fname is the actual SAM config file name (with path) + if isinstance(config, str): + if not os.path.exists(config): + raise IOError('SAM config file does not exist: "{}"' + .format(config)) + else: + config = load_config(config) + + if not isinstance(config, dict): + raise RuntimeError('SAM config must be a file or a ' + 'pre-extracted dictionary, but got: {}' + .format(config)) + + SAMInputsChecker.check(config) + self._inputs[key] = config + + return self._inputs
+ + +
[docs]class SAMInputsChecker: + """Class to check SAM input jsons and warn against bad inputs.""" + + # Keys that are used to identify a technology config + KEYS_PV = ('tilt', 'azimuth', 'module_type', 'array_type') + + def __init__(self, config): + """ + Parameters + ---------- + config : dict + Extracted SAM technology input config in dict form. + """ + if isinstance(config, dict): + self._config = config + else: + raise TypeError('Bad SAM tech config type: {}' + .format(type(config))) + +
[docs] def check_pv(self): + """Run input checks for a pv input config.""" + if self._config['array_type'] >= 2 and self._config['tilt'] != 0: + w = ('SAM input for PV has array type {} (tracking) and tilt ' + 'of {}. This is uncommon!' + .format(self._config['array_type'], self._config['tilt'])) + logger.warning(w) + warn(w, SAMInputWarning)
+ + def _run_checks(self): + """Infer config type and run applicable checks.""" + if all(c in self._config for c in self.KEYS_PV): + self.check_pv() + +
[docs] @classmethod + def check(cls, config): + """Run checks on a SAM input json config. + + Parameters + ---------- + config : dict + Extracted SAM technology input config in dict form. + """ + c = cls(config) + c._run_checks()
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/econ/econ.html b/_modules/reV/econ/econ.html new file mode 100644 index 000000000..234d9c780 --- /dev/null +++ b/_modules/reV/econ/econ.html @@ -0,0 +1,1166 @@ + + + + + + reV.econ.econ — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.econ.econ

+# -*- coding: utf-8 -*-
+"""
+reV econ module (lcoe-fcr, single owner, etc...)
+"""
+import logging
+import numpy as np
+import os
+import pandas as pd
+import pprint
+from warnings import warn
+
+from reV.config.project_points import PointsControl
+from reV.generation.base import BaseGen
+from reV.handlers.outputs import Outputs
+from reV.SAM.econ import LCOE as SAM_LCOE
+from reV.SAM.econ import SingleOwner
+from reV.SAM.windbos import WindBos
+from reV.utilities.exceptions import (ExecutionError, OffshoreWindInputWarning)
+from reV.utilities import ModuleName
+
+from rex.resource import Resource
+from rex.multi_file_resource import MultiFileResource
+from rex.utilities.utilities import check_res_file
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Econ(BaseGen): + """Econ""" + + # Mapping of reV econ output strings to SAM econ modules + OPTIONS = {'lcoe_fcr': SAM_LCOE, + 'ppa_price': SingleOwner, + 'project_return_aftertax_npv': SingleOwner, + 'lcoe_real': SingleOwner, + 'lcoe_nom': SingleOwner, + 'flip_actual_irr': SingleOwner, + 'gross_revenue': SingleOwner, + 'total_installed_cost': WindBos, + 'turbine_cost': WindBos, + 'sales_tax_cost': WindBos, + 'bos_cost': WindBos, + 'fixed_charge_rate': SAM_LCOE, + 'capital_cost': SAM_LCOE, + 'fixed_operating_cost': SAM_LCOE, + 'variable_operating_cost': SAM_LCOE, + } + """Available ``reV`` econ `output_request` options""" + + # Mapping of reV econ outputs to scale factors and units. + # Type is scalar or array and corresponds to the SAM single-site output + OUT_ATTRS = BaseGen.ECON_ATTRS + + def __init__(self, project_points, sam_files, cf_file, site_data=None, + output_request=('lcoe_fcr',), sites_per_worker=100, + memory_utilization_limit=0.4, append=False): + """reV econ analysis class. + + ``reV`` econ analysis runs SAM econ calculations, typically to + compute LCOE (using :py:class:`PySAM.Lcoefcr.Lcoefcr`), though + :py:class:`PySAM.Singleowner.Singleowner` or + :py:class:`PySAM.Windbos.Windbos` calculations can also be + performed simply by requesting outputs from those computation + modules. See the keys of + :attr:`Econ.OPTIONS <reV.econ.econ.Econ.OPTIONS>` for all + available econ outputs. Econ computations rely on an input a + generation (i.e. capacity factor) profile. You can request + ``reV`` to run the analysis for one or more "sites", which + correspond to the meta indices in the generation data. + + Parameters + ---------- + project_points : int | list | tuple | str | dict | pd.DataFrame | slice + Input specifying which sites to process. A single integer + representing the GID of a site may be specified to evaluate + reV at a single location. A list or tuple of integers + (or slice) representing the GIDs of multiple sites can be + specified to evaluate reV at multiple specific locations. + A string pointing to a project points CSV file may also be + specified. Typically, the CSV contains two columns: + + - ``gid``: Integer specifying the GID of each site. + - ``config``: Key in the `sam_files` input dictionary + (see below) corresponding to the SAM configuration to + use for each particular site. This value can also be + ``None`` (or left out completely) if you specify only + a single SAM configuration file as the `sam_files` + input. + + The CSV file may also contain site-specific inputs by + including a column named after a config keyword (e.g. a + column called ``capital_cost`` may be included to specify a + site-specific capital cost value for each location). Columns + that do not correspond to a config key may also be included, + but they will be ignored. A DataFrame following the same + guidelines as the CSV input (or a dictionary that can be + used to initialize such a DataFrame) may be used for this + input as well. + sam_files : dict | str + A dictionary mapping SAM input configuration ID(s) to SAM + configuration(s). Keys are the SAM config ID(s) which + correspond to the ``config`` column in the project points + CSV. Values for each key are either a path to a + corresponding SAM config file or a full dictionary + of SAM config inputs. For example:: + + sam_files = { + "default": "/path/to/default/sam.json", + "onshore": "/path/to/onshore/sam_config.yaml", + "offshore": { + "sam_key_1": "sam_value_1", + "sam_key_2": "sam_value_2", + ... + }, + ... + } + + This input can also be a string pointing to a single SAM + config file. In this case, the ``config`` column of the + CSV points input should be set to ``None`` or left out + completely. See the documentation for the ``reV`` SAM class + (e.g. :class:`reV.SAM.generation.WindPower`, + :class:`reV.SAM.generation.PvWattsv8`, + :class:`reV.SAM.generation.Geothermal`, etc.) for + documentation on the allowed and/or required SAM config file + inputs. + cf_file : str + Path to reV output generation file containing a capacity + factor output. + + .. Note:: If executing ``reV`` from the command line, this + path can contain brackets ``{}`` that will be filled in + by the `analysis_years` input. Alternatively, this input + can be set to ``"PIPELINE"`` to parse the output of the + previous step (``reV`` generation) and use it as input to + this call. However, note that duplicate executions of + ``reV`` generation within the pipeline may invalidate this + parsing, meaning the `cf_file` input will have to be + specified manually. + + site_data : str | pd.DataFrame, optional + Site-specific input data for SAM calculation. If this input + is a string, it should be a path that points to a CSV file. + Otherwise, this input should be a DataFrame with + pre-extracted site data. Rows in this table should match + the input sites via a ``gid`` column. The rest of the + columns should match configuration input keys that will take + site-specific values. Note that some or all site-specific + inputs can be specified via the `project_points` input + table instead. If ``None``, no site-specific data is + considered. By default, ``None``. + output_request : list | tuple, optional + List of output variables requested from SAM. Can be any + of the parameters in the "Outputs" group of the PySAM module + (e.g. :py:class:`PySAM.Windpower.Windpower.Outputs`, + :py:class:`PySAM.Pvwattsv8.Pvwattsv8.Outputs`, + :py:class:`PySAM.Geothermal.Geothermal.Outputs`, etc.) being + executed. This list can also include a select number of SAM + config/resource parameters to include in the output: + any key in any of the + `output attribute JSON files <https://tinyurl.com/4bmrpe3j/>`_ + may be requested. Time-series profiles requested via this + input are output in UTC. By default, ``('lcoe_fcr',)``. + sites_per_worker : int, optional + Number of sites to run in series on a worker. ``None`` + defaults to the resource file chunk size. + By default, ``None``. + memory_utilization_limit : float, optional + Memory utilization limit (fractional). Must be a value + between 0 and 1. This input sets how many site results will + be stored in-memory at any given time before flushing to + disk. By default, ``0.4``. + append : bool + Option to append econ datasets to source `cf_file`. + By default, ``False``. + """ + + # get a points control instance + pc = self.get_pc(points=project_points, points_range=None, + sam_configs=sam_files, cf_file=cf_file, + sites_per_worker=sites_per_worker, append=append) + + super().__init__(pc, output_request, site_data=site_data, + memory_utilization_limit=memory_utilization_limit) + + self._cf_file = cf_file + self._append = append + self._run_attrs['cf_file'] = cf_file + self._run_attrs['sam_module'] = self._sam_module.MODULE + + @property + def cf_file(self): + """Get the capacity factor output filename and path. + + Returns + ------- + cf_file : str + reV generation capacity factor output file with path. + """ + return self._cf_file + + @property + def meta(self): + """Get meta data from the source capacity factors file. + + Returns + ------- + _meta : pd.DataFrame + Meta data from capacity factor outputs file. + """ + if self._meta is None and self.cf_file is not None: + with Outputs(self.cf_file) as cfh: + # only take meta that belongs to this project's site list + self._meta = cfh.meta[ + cfh.meta['gid'].isin(self.points_control.sites)] + + if 'offshore' in self._meta: + if self._meta['offshore'].sum() > 1: + w = ('Found offshore sites in econ meta data. ' + 'This functionality has been deprecated. ' + 'Please run the reV offshore module to ' + 'calculate offshore wind lcoe.') + warn(w, OffshoreWindInputWarning) + logger.warning(w) + + elif self._meta is None and self.cf_file is None: + self._meta = pd.DataFrame({'gid': self.points_control.sites}) + + return self._meta + + @property + def time_index(self): + """Get the generation resource time index data.""" + if self._time_index is None and self.cf_file is not None: + with Outputs(self.cf_file) as cfh: + if 'time_index' in cfh.datasets: + self._time_index = cfh.time_index + + return self._time_index + + @staticmethod + def _econ_append_pc(pp, cf_file, sites_per_worker=None): + """ + Generate ProjectControls for econ append + + Parameters + ---------- + pp : reV.config.project_points.ProjectPoints + ProjectPoints to adjust gids for + cf_file : str + reV generation capacity factor output file with path. + sites_per_worker : int + Number of sites to run in series on a worker. None defaults to the + resource file chunk size. + + Returns + ------- + pc : reV.config.project_points.PointsControl + PointsControl object instance. + """ + multi_h5_res, hsds = check_res_file(cf_file) + if multi_h5_res: + res_cls = MultiFileResource + res_kwargs = {} + else: + res_cls = Resource + res_kwargs = {'hsds': hsds} + + with res_cls(cf_file, **res_kwargs) as f: + gid0 = f.meta['gid'].values[0] + gid1 = f.meta['gid'].values[-1] + + i0 = pp.index(gid0) + i1 = pp.index(gid1) + 1 + pc = PointsControl.split(i0, i1, pp, sites_per_split=sites_per_worker) + + return pc + +
[docs] @classmethod + def get_pc(cls, points, points_range, sam_configs, cf_file, + sites_per_worker=None, append=False): + """ + Get a PointsControl instance. + + Parameters + ---------- + points : slice | list | str | reV.config.project_points.PointsControl + Slice specifying project points, or string pointing to a project + points csv, or a fully instantiated PointsControl object. + points_range : list | None + Optional two-entry list specifying the index range of the sites to + analyze. To be taken from the reV.config.PointsControl.split_range + property. + sam_configs : dict | str | SAMConfig + SAM input configuration ID(s) and file path(s). Keys are the SAM + config ID(s) which map to the config column in the project points + CSV. Values are either a JSON SAM config file or dictionary of SAM + config inputs. Can also be a single config file path or a + pre loaded SAMConfig object. + cf_file : str + reV generation capacity factor output file with path. + sites_per_worker : int + Number of sites to run in series on a worker. None defaults to the + resource file chunk size. + append : bool + Flag to append econ datasets to source cf_file. This has priority + over the out_fpath input. + + Returns + ------- + pc : reV.config.project_points.PointsControl + PointsControl object instance. + """ + pc = super().get_pc(points, points_range, sam_configs, ModuleName.ECON, + sites_per_worker=sites_per_worker, + res_file=cf_file) + + if append: + pc = cls._econ_append_pc(pc.project_points, cf_file, + sites_per_worker=sites_per_worker) + + return pc
+ + @staticmethod + def _run_single_worker(pc, econ_fun, output_request, **kwargs): + """Run the SAM econ calculation. + + Parameters + ---------- + pc : reV.config.project_points.PointsControl + Iterable points control object from reV config module. + Must have project_points with df property with all relevant + site-specific inputs and a 'gid' column. By passing site-specific + inputs in this dataframe, which was split using points_control, + only the data relevant to the current sites is passed. + econ_fun : method + reV_run() method from one of the econ modules (SingleOwner, + SAM_LCOE, WindBos). + output_request : str | list | tuple + Economic output variable(s) requested from SAM. + kwargs : dict + Additional input parameters for the SAM run module. + + Returns + ------- + out : dict + Output dictionary from the SAM reV_run function. Data is scaled + within this function to the datatype specified in Econ.OUT_ATTRS. + """ + + # make sure output request is a list + if isinstance(output_request, str): + output_request = [output_request] + + # Extract the site df from the project points df. + site_df = pc.project_points.df + site_df = site_df.set_index('gid', drop=True) + + # SAM execute econ analysis based on output request + try: + out = econ_fun(pc, site_df, output_request=output_request, + **kwargs) + except Exception as e: + out = {} + logger.exception('Worker failed for PC: {}'.format(pc)) + raise e + + return out + + def _parse_output_request(self, req): + """Set the output variables requested from generation. + + Parameters + ---------- + req : str| list | tuple + Output variables requested from SAM. + + Returns + ------- + output_request : list + Output variables requested from SAM. + """ + + output_request = self._output_request_type_check(req) + + for request in output_request: + if request not in self.OUT_ATTRS: + msg = ('User output request "{}" not recognized. ' + 'Will attempt to extract from PySAM.'.format(request)) + logger.debug(msg) + + modules = [] + for request in output_request: + if request in self.OPTIONS: + modules.append(self.OPTIONS[request]) + + if not any(modules): + msg = ('None of the user output requests were recognized. ' + 'Cannot run reV econ. ' + 'At least one of the following must be requested: {}' + .format(list(self.OPTIONS.keys()))) + logger.exception(msg) + raise ExecutionError(msg) + + b1 = [m == modules[0] for m in modules] + b2 = np.array([m == WindBos for m in modules]) + b3 = np.array([m == SingleOwner for m in modules]) + + if all(b1): + self._sam_module = modules[0] + self._fun = modules[0].reV_run + elif all(b2 | b3): + self._sam_module = SingleOwner + self._fun = SingleOwner.reV_run + else: + msg = ('Econ outputs requested from different SAM modules not ' + 'currently supported. Output request variables require ' + 'SAM methods: {}'.format(modules)) + raise ValueError(msg) + + return list(set(output_request)) + + def _get_data_shape(self, dset, n_sites): + """Get the output array shape based on OUT_ATTRS or PySAM.Outputs. + + This Econ get data shape method will also first check for the dset in + the site_data table. If not found in site_data, the dataset will be + looked for in OUT_ATTRS and PySAM.Outputs as it would for Generation. + + Parameters + ---------- + dset : str + Variable name to get shape for. + n_sites : int + Number of sites for this data shape. + + Returns + ------- + shape : tuple + 1D or 2D shape tuple for dset. + """ + + if dset in self.site_data: + data_shape = (n_sites, ) + data = self.site_data[dset].values[0] + + if isinstance(data, (list, tuple, np.ndarray, str)): + msg = ('Cannot pass through non-scalar site_data ' + 'input key "{}" as an output_request!'.format(dset)) + logger.error(msg) + raise ExecutionError(msg) + + else: + data_shape = super()._get_data_shape(dset, n_sites) + + return data_shape + +
[docs] def run(self, out_fpath=None, max_workers=1, timeout=1800, + pool_size=None): + """Execute a parallel reV econ run with smart data flushing. + + Parameters + ---------- + out_fpath : str, optional + Path to output file. If this class was initialized with + ``append=True``, this input has no effect. If ``None``, no + output file will be written. If the filepath is specified + but the module name (econ) and/or resource data year is not + included, the module name and/or resource data year will get + added to the output file name. By default, ``None``. + max_workers : int, optional + Number of local workers to run on. By default, ``1``. + timeout : int, optional + Number of seconds to wait for parallel run iteration to + complete before returning zeros. By default, ``1800`` + seconds. + pool_size : int, optional + Number of futures to submit to a single process pool for + parallel futures. If ``None``, the pool size is set to + ``os.cpu_count() * 2``. By default, ``None``. + + Returns + ------- + str | None + Path to output HDF5 file, or ``None`` if results were not + written to disk. + """ + if pool_size is None: + pool_size = os.cpu_count() * 2 + + # initialize output file or append econ data to gen file + if self._append: + self._out_fpath = self._cf_file + else: + self._init_fpath(out_fpath, ModuleName.ECON) + + self._init_h5(mode='a' if self._append else 'w') + self._init_out_arrays() + + diff = list(set(self.points_control.sites) + - set(self.meta['gid'].values)) + if diff: + raise Exception('The following analysis sites were requested ' + 'through project points for econ but are not ' + 'found in the CF file ("{}"): {}' + .format(self.cf_file, diff)) + + # make a kwarg dict + kwargs = {'output_request': self.output_request, + 'cf_file': self.cf_file, + 'year': self.year} + + logger.info('Running econ with smart data flushing ' + 'for: {}'.format(self.points_control)) + logger.debug('The following project points were specified: "{}"' + .format(self.project_points)) + logger.debug('The following SAM configs are available to this run:\n{}' + .format(pprint.pformat(self.sam_configs, indent=4))) + logger.debug('The SAM output variables have been requested:\n{}' + .format(self.output_request)) + + try: + kwargs['econ_fun'] = self._fun + if max_workers == 1: + logger.debug('Running serial econ for: {}' + .format(self.points_control)) + for i, pc_sub in enumerate(self.points_control): + self.out = self._run_single_worker(pc_sub, **kwargs) + logger.info('Finished reV econ serial compute for: {} ' + '(iteration {} out of {})' + .format(pc_sub, i + 1, + len(self.points_control))) + self.flush() + else: + logger.debug('Running parallel econ for: {}' + .format(self.points_control)) + self._parallel_run(max_workers=max_workers, + pool_size=pool_size, timeout=timeout, + **kwargs) + + except Exception as e: + logger.exception('SmartParallelJob.execute() failed for econ.') + raise e + + return self._out_fpath
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/econ/economies_of_scale.html b/_modules/reV/econ/economies_of_scale.html new file mode 100644 index 000000000..3868c9266 --- /dev/null +++ b/_modules/reV/econ/economies_of_scale.html @@ -0,0 +1,925 @@ + + + + + + reV.econ.economies_of_scale — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.econ.economies_of_scale

+# -*- coding: utf-8 -*-
+"""
+reV module for calculating economies of scale where larger power plants will
+have reduced capital cost.
+"""
+import logging
+import copy
+import re
+import numpy as np  # pylint: disable=unused-import
+import pandas as pd
+
+from reV.econ.utilities import lcoe_fcr
+from rex.utilities.utilities import check_eval_str
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class EconomiesOfScale: + """Class to calculate economies of scale where power plant capital cost is + reduced for larger power plants. + + Units + ----- + capacity_factor : unitless + capacity : kW + annual_energy_production : kWh + fixed_charge_rate : unitless + fixed_operating_cost : $ (per year) + variable_operating_cost : $/kWh + lcoe : $/MWh + """ + + def __init__(self, eqn, data): + """ + Parameters + ---------- + eqn : str + LCOE scaling equation to implement "economies of scale". + Equation must be in python string format and return a scalar + value to multiply the capital cost by. Independent variables in + the equation should match the keys in the data input arg. This + equation may use numpy functions with the package prefix "np". + data : dict | pd.DataFrame + Namespace of econ data to use to calculate economies of scale. Keys + in dict or column labels in dataframe should match the Independent + variables in the eqn input. Should also include variables required + to calculate LCOE. + """ + self._eqn = eqn + self._data = data + self._preflight() + + def _preflight(self): + """Run checks to validate EconomiesOfScale equation and input data.""" + + if self._eqn is not None: + check_eval_str(str(self._eqn)) + + if isinstance(self._data, pd.DataFrame): + self._data = {k: self._data[k].values.flatten() + for k in self._data.columns} + + if not isinstance(self._data, dict): + e = ('Cannot evaluate EconomiesOfScale with data input of type: {}' + .format(type(self._data))) + logger.error(e) + raise TypeError(e) + + missing = [] + for name in self.vars: + if name not in self._data: + missing.append(name) + + if any(missing): + e = ('Cannot evaluate EconomiesOfScale, missing data for variables' + ': {} for equation: {}'.format(missing, self._eqn)) + logger.error(e) + raise KeyError(e) + +
[docs] @staticmethod + def is_num(s): + """Check if a string is a number""" + try: + float(s) + except ValueError: + return False + else: + return True
+ +
[docs] @staticmethod + def is_method(s): + """Check if a string is a numpy/pandas or python builtin method""" + return bool(s.startswith(('np.', 'pd.')) or s in dir(__builtins__))
+ + @property + def vars(self): + """Get a list of variable names that the EconomiesOfScale equation + uses as input. + + Returns + ------- + vars : list + List of strings representing variable names that were parsed from + the equation string. This will return an empty list if the equation + has no variables. + """ + var_names = [] + if self._eqn is not None: + delimiters = ('*', '/', '+', '-', ' ', '(', ')', '[', ']', ',') + regex_pattern = '|'.join(map(re.escape, delimiters)) + var_names = [] + for sub in re.split(regex_pattern, str(self._eqn)): + if sub: + if not self.is_num(sub) and not self.is_method(sub): + var_names.append(sub) + var_names = sorted(list(set(var_names))) + + return var_names + + def _evaluate(self): + """Evaluate the EconomiesOfScale equation with Independent variables + parsed into a kwargs dictionary input. + + Returns + ------- + out : float | np.ndarray + Evaluated output of the EconomiesOfScale equation. Should be + numeric scalars to apply directly to the capital cost. + """ + out = 1 + if self._eqn is not None: + kwargs = {k: self._data[k] for k in self.vars} + # pylint: disable=eval-used + out = eval(str(self._eqn), globals(), kwargs) + + return out + + @staticmethod + def _get_prioritized_keys(input_dict, key_list): + """Get data from an input dictionary based on an ordered (prioritized) + list of retrieval keys. If no keys are found in the input_dict, an + error will be raised. + + Parameters + ---------- + input_dict : dict + Dictionary of data + key_list : list | tuple + Ordered (prioritized) list of retrieval keys. + + Returns + ------- + out : object + Data retrieved from input_dict using the first key in key_list + found in the input_dict. + """ + + out = None + for key in key_list: + if key in input_dict: + out = input_dict[key] + break + + if out is None: + e = ('Could not find requested key list ({}) in the input ' + 'dictionary keys: {}' + .format(key_list, list(input_dict.keys()))) + logger.error(e) + raise KeyError(e) + + return out + + @property + def capital_cost_scalar(self): + """Evaluated output of the EconomiesOfScale equation. Should be + numeric scalars to apply directly to the capital cost. + + Returns + ------- + out : float | np.ndarray + Evaluated output of the EconomiesOfScale equation. Should be + numeric scalars to apply directly to the capital cost. + """ + return self._evaluate() + + @property + def raw_capital_cost(self): + """Unscaled (raw) capital cost found in the data input arg. + + Returns + ------- + out : float | np.ndarray + Unscaled (raw) capital_cost found in the data input arg. + """ + key_list = ['capital_cost', 'mean_capital_cost'] + return self._get_prioritized_keys(self._data, key_list) + + @property + def scaled_capital_cost(self): + """Capital cost found in the data input arg scaled by the evaluated + EconomiesOfScale input equation. + + Returns + ------- + out : float | np.ndarray + Capital cost found in the data input arg scaled by the evaluated + EconomiesOfScale equation. + """ + cc = copy.deepcopy(self.raw_capital_cost) + cc *= self.capital_cost_scalar + return cc + + @property + def system_capacity(self): + """Get the system capacity in kW (SAM input, not the reV supply + curve capacity). + + Returns + ------- + out : float | np.ndarray + """ + key_list = ['system_capacity', 'mean_system_capacity'] + return self._get_prioritized_keys(self._data, key_list) + + @property + def fcr(self): + """Fixed charge rate from input data arg + + Returns + ------- + out : float | np.ndarray + Fixed charge rate from input data arg + """ + key_list = ['fixed_charge_rate', 'mean_fixed_charge_rate', + 'fcr', 'mean_fcr'] + return self._get_prioritized_keys(self._data, key_list) + + @property + def foc(self): + """Fixed operating cost from input data arg + + Returns + ------- + out : float | np.ndarray + Fixed operating cost from input data arg + """ + key_list = ['fixed_operating_cost', 'mean_fixed_operating_cost', + 'foc', 'mean_foc'] + return self._get_prioritized_keys(self._data, key_list) + + @property + def voc(self): + """Variable operating cost from input data arg + + Returns + ------- + out : float | np.ndarray + Variable operating cost from input data arg + """ + key_list = ['variable_operating_cost', 'mean_variable_operating_cost', + 'voc', 'mean_voc'] + return self._get_prioritized_keys(self._data, key_list) + + @property + def aep(self): + """Annual energy production back-calculated from the raw LCOE: + + AEP = (fcr * raw_cap_cost + foc) / raw_lcoe + + Returns + ------- + out : float | np.ndarray + """ + + aep = (self.fcr * self.raw_capital_cost + self.foc) / self.raw_lcoe + aep *= 1000 # convert MWh to KWh + return aep + + @property + def raw_lcoe(self): + """Raw LCOE taken from the input data + + Returns + ------- + lcoe : float | np.ndarray + """ + key_list = ['raw_lcoe', 'mean_lcoe'] + return copy.deepcopy(self._get_prioritized_keys(self._data, key_list)) + + @property + def scaled_lcoe(self): + """LCOE calculated with the scaled capital cost based on the + EconomiesOfScale input equation. + + LCOE = (FCR * scaled_capital_cost + FOC) / AEP + VOC + + Returns + ------- + lcoe : float | np.ndarray + LCOE calculated with the scaled capital cost based on the + EconomiesOfScale input equation. + """ + return lcoe_fcr(self.fcr, self.scaled_capital_cost, self.foc, + self.aep, self.voc)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/econ/utilities.html b/_modules/reV/econ/utilities.html new file mode 100644 index 000000000..ba54b8b08 --- /dev/null +++ b/_modules/reV/econ/utilities.html @@ -0,0 +1,658 @@ + + + + + + reV.econ.utilities — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.econ.utilities

+# -*- coding: utf-8 -*-
+"""
+reV Econ utilities
+"""
+
+
+
[docs]def lcoe_fcr(fixed_charge_rate, capital_cost, fixed_operating_cost, + annual_energy_production, variable_operating_cost): + """Calculate the Levelized Cost of Electricity (LCOE) using the + fixed-charge-rate method: + + LCOE = ((fixed_charge_rate * capital_cost + fixed_operating_cost) + / annual_energy_production + variable_operating_cost) + + Parameters + ---------- + fixed_charge_rate : float | np.ndarray + Fixed charge rage (unitless) + capital_cost : float | np.ndarray + Capital cost (aka Capital Expenditures) ($) + fixed_operating_cost : float | np.ndarray + Fixed annual operating cost ($/year) + annual_energy_production : float | np.ndarray + Annual energy production (kWh for year) + (can be calculated as capacity * cf * 8760) + variable_operating_cost : float | np.ndarray + Variable operating cost ($/kWh) + + Returns + ------- + lcoe : float | np.ndarray + LCOE in $/MWh + """ + lcoe = ((fixed_charge_rate * capital_cost + fixed_operating_cost) + / annual_energy_production + variable_operating_cost) + lcoe *= 1000 # convert $/kWh to $/MWh + return lcoe
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/generation/base.html b/_modules/reV/generation/base.html new file mode 100644 index 000000000..b18484d37 --- /dev/null +++ b/_modules/reV/generation/base.html @@ -0,0 +1,1862 @@ + + + + + + reV.generation.base — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.generation.base

+# -*- coding: utf-8 -*-
+"""
+reV base gen and econ module.
+"""
+from abc import ABC, abstractmethod
+import copy
+from concurrent.futures import TimeoutError
+import logging
+import pandas as pd
+import numpy as np
+import os
+import psutil
+import json
+import sys
+from warnings import warn
+
+from reV.config.output_request import SAMOutputRequest
+from reV.config.project_points import ProjectPoints, PointsControl
+from reV.handlers.outputs import Outputs
+from reV.SAM.version_checker import PySamVersionChecker
+from reV.utilities.exceptions import (OutputWarning, ExecutionError,
+                                      ParallelExecutionWarning,
+                                      OffshoreWindInputWarning)
+from reV.utilities import log_versions, ModuleName
+
+from rex.resource import Resource
+from rex.utilities.execution import SpawnProcessPool
+
+logger = logging.getLogger(__name__)
+
+
+ATTR_DIR = os.path.dirname(os.path.realpath(__file__))
+ATTR_DIR = os.path.join(ATTR_DIR, 'output_attributes')
+with open(os.path.join(ATTR_DIR, 'other.json'), 'r') as f:
+    OTHER_ATTRS = json.load(f)
+with open(os.path.join(ATTR_DIR, 'lcoe_fcr.json'), 'r') as f:
+    LCOE_ATTRS = json.load(f)
+with open(os.path.join(ATTR_DIR, 'single_owner.json'), 'r') as f:
+    SO_ATTRS = json.load(f)
+with open(os.path.join(ATTR_DIR, 'windbos.json'), 'r') as f:
+    BOS_ATTRS = json.load(f)
+with open(os.path.join(ATTR_DIR, 'lcoe_fcr_inputs.json'), 'r') as f:
+    LCOE_IN_ATTRS = json.load(f)
+
+
+
[docs]class BaseGen(ABC): + """Base class for reV gen and econ classes to run SAM simulations.""" + + # Mapping of reV requests to SAM objects that should be used for simulation + OPTIONS = {} + + # Mapping of reV generation / econ outputs to scale factors and units. + OUT_ATTRS = copy.deepcopy(OTHER_ATTRS) + + # Mapping of reV econ outputs to scale factors and units. + # Type is scalar or array and corresponds to the SAM single-site output + # This is the OUT_ATTRS class attr for Econ but should also be accessible + # to rev generation + ECON_ATTRS = copy.deepcopy(OTHER_ATTRS) + ECON_ATTRS.update(LCOE_ATTRS) + ECON_ATTRS.update(SO_ATTRS) + ECON_ATTRS.update(BOS_ATTRS) + ECON_ATTRS.update(LCOE_IN_ATTRS) + + # SAM argument names used to calculate LCOE + # Note that system_capacity is not included here because it is never used + # downstream and could be confused with the supply_curve point capacity + LCOE_ARGS = ('fixed_charge_rate', 'capital_cost', 'fixed_operating_cost', + 'variable_operating_cost') + + def __init__(self, points_control, output_request, site_data=None, + drop_leap=False, memory_utilization_limit=0.4, + scale_outputs=True): + """ + Parameters + ---------- + points_control : reV.config.project_points.PointsControl + Project points control instance for site and SAM config spec. + output_request : list | tuple + Output variables requested from SAM. + site_data : str | pd.DataFrame | None + Site-specific input data for SAM calculation. String should be a + filepath that points to a csv, DataFrame is pre-extracted data. + Rows match sites, columns are input keys. Need a "gid" column. + Input as None if no site-specific data. + drop_leap : bool + Drop leap day instead of final day of year during leap years. + memory_utilization_limit : float + Memory utilization limit (fractional). This sets how many site + results will be stored in-memory at any given time before flushing + to disk. + scale_outputs : bool + Flag to scale outputs in-place immediately upon Gen returning data. + """ + log_versions(logger) + self._points_control = points_control + self._year = None + self._site_limit = None + self._site_mem = None + self._out_fpath = None + self._meta = None + self._time_index = None + self._sam_module = None + self._sam_obj_default = None + self._drop_leap = drop_leap + self.mem_util_lim = memory_utilization_limit + self.scale_outputs = scale_outputs + + self._run_attrs = {'points_control': str(points_control), + 'output_request': output_request, + 'site_data': str(site_data), + 'drop_leap': str(drop_leap), + 'memory_utilization_limit': self.mem_util_lim} + + self._site_data = self._parse_site_data(site_data) + self.add_site_data_to_pp(self._site_data) + output_request = SAMOutputRequest(output_request) + self._output_request = self._parse_output_request(output_request) + + # pre-initialize output arrays to store results when available. + self._out = {} + self._finished_sites = [] + self._out_n_sites = 0 + self._out_chunk = () + self._check_sam_version_inputs() + + @property + def output_request(self): + """Get the output variables requested from the user. + + Returns + ------- + output_request : list + Output variables requested from SAM. + """ + return self._output_request + + @property + def out_chunk(self): + """Get the current output chunk index range (INCLUSIVE). + + Returns + ------- + _out_chunk : tuple + Two entry tuple (start, end) indicies (inclusive) for where the + current data in-memory belongs in the final output. + """ + return self._out_chunk + + @property + def site_data(self): + """Get the site-specific inputs in dataframe format. + + Returns + ------- + _site_data : pd.DataFrame + Site-specific input data for gen or econ calculation. Rows match + sites, columns are variables. + """ + return self._site_data + + @property + def site_limit(self): + """Get the number of sites results that can be stored in memory at once + + Returns + ------- + _site_limit : int + Number of site result sets that can be stored in memory at once + without violating memory limits. + """ + + if self._site_limit is None: + tot_mem = psutil.virtual_memory().total / 1e6 + avail_mem = self.mem_util_lim * tot_mem + self._site_limit = int(np.floor(avail_mem / self.site_mem)) + logger.info('Limited to storing {0} sites in memory ' + '({1:.1f} GB total hardware, {2:.1f} GB available ' + 'with {3:.1f}% utilization).' + .format(self._site_limit, tot_mem / 1e3, + avail_mem / 1e3, self.mem_util_lim * 100)) + + return self._site_limit + + @property + def site_mem(self): + """Get the memory (MB) required to store all results for a single site. + + Returns + ------- + _site_mem : float + Memory (MB) required to store all results in requested in + output_request for a single site. + """ + + if self._site_mem is None: + # average the memory usage over n sites + # (for better understanding of array overhead) + n = 100 + self._site_mem = 0 + for request in self.output_request: + dtype = 'float32' + if request in self.OUT_ATTRS: + dtype = self.OUT_ATTRS[request].get('dtype', 'float32') + + shape = self._get_data_shape(request, n) + self._site_mem += sys.getsizeof(np.ones(shape, dtype=dtype)) + + self._site_mem = self._site_mem / 1e6 / n + logger.info('Output results from a single site are calculated to ' + 'use {0:.1f} KB of memory.' + .format(self._site_mem / 1000)) + + return self._site_mem + + @property + def points_control(self): + """Get project points controller. + + Returns + ------- + points_control : reV.config.project_points.PointsControl + Project points control instance for site and SAM config spec. + """ + return self._points_control + + @property + def project_points(self): + """Get project points + + Returns + ------- + project_points : reV.config.project_points.ProjectPoints + Project points from the points control instance. + """ + return self._points_control.project_points + + @property + def sam_configs(self): + """Get the sam config dictionary. + + Returns + ------- + sam_configs : dict + SAM config from the project points instance. + """ + return self.project_points.sam_inputs + + @property + def sam_metas(self): + """ + SAM configurations including runtime module + + Returns + ------- + sam_metas : dict + Nested dictionary of SAM configuration files with module used + at runtime + """ + sam_metas = self.sam_configs.copy() + for v in sam_metas.values(): + v.update({'module': self._sam_module.MODULE}) + + return sam_metas + + @property + def sam_module(self): + """Get the SAM module class to be used for SAM simulations. + + Returns + ------- + sam_module : object + SAM object like PySAM.Pvwattsv7 or PySAM.Lcoefcr + """ + return self._sam_module + + @property + def meta(self): + """Get resource meta for all sites in project points. + + Returns + ------- + meta : pd.DataFrame + Meta data df for sites in project points. Column names are meta + data variables, rows are different sites. The row index + does not indicate the site number if the project points are + non-sequential or do not start from 0, so a 'gid' column is added. + """ + return self._meta + + @property + def time_index(self): + """Get the resource time index data. + + Returns + ------- + _time_index : pandas.DatetimeIndex + Time-series datetime index + """ + return self._time_index + + @property + def run_attrs(self): + """ + Run time attributes (__init__ args and kwargs) + + Returns + ------- + run_attrs : dict + Dictionary of runtime args and kwargs + """ + return self._run_attrs + + @property + def year(self): + """Get the resource year. + + Returns + ------- + _year : int + Year of the time-series datetime index. + """ + + if self._year is None and self.time_index is not None: + self._year = int(self.time_index.year[0]) + + return self._year + + @property + def tech(self): + """Get the reV technology string. + + Returns + ------- + tech : str + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam, econ) + The string should be lower-cased with spaces and _ removed. + """ + return self.project_points.tech + + @property + def out(self): + """Get the reV gen or econ output results. + + Returns + ------- + out : dict + Dictionary of gen or econ results from SAM. + """ + out = {} + for k, v in self._out.items(): + if k in self.OUT_ATTRS: + scale_factor = self.OUT_ATTRS[k].get('scale_factor', 1) + else: + scale_factor = 1 + + if scale_factor != 1 and self.scale_outputs: + v = v.astype('float32') + v /= scale_factor + + out[k] = v + + return out + + @out.setter + def out(self, result): + """Set the output attribute, unpack futures, clear output from mem. + + Parameters + ---------- + result : list | dict | None + Gen or Econ results to set to output dictionary. Use cases: + - List input is interpreted as a futures list, which is unpacked + before setting to the output dict. + - Dictionary input is interpreted as an already unpacked result. + - None is interpreted as a signal to clear the output dictionary. + """ + if isinstance(result, list): + # unpack futures list to dictionary first + result = self.unpack_futures(result) + + if isinstance(result, dict): + + # iterate through dict where sites are keys and values are + # corresponding results + for site_gid, site_output in result.items(): + + # check that the sites are stored sequentially then add to + # the finished site list + if self._finished_sites: + if int(site_gid) < np.max(self._finished_sites): + raise Exception('Site results are non sequential!') + + # unpack site output object + self.unpack_output(site_gid, site_output) + + # add site gid to the finished list after outputs are unpacked + self._finished_sites.append(site_gid) + + elif isinstance(result, type(None)): + self._out.clear() + self._finished_sites.clear() + else: + raise TypeError('Did not recognize the type of output. ' + 'Tried to set output type "{}", but requires ' + 'list, dict or None.'.format(type(result))) + + @staticmethod + def _output_request_type_check(req): + """Output request type check and ensure list for manipulation. + + Parameters + ---------- + req : list | tuple | str + Output request of variable type. + + Returns + ------- + output_request : list + Output request. + """ + + if isinstance(req, list): + output_request = req + elif isinstance(req, tuple): + output_request = list(req) + elif isinstance(req, str): + output_request = [req] + else: + raise TypeError('Output request must be str, list, or tuple but ' + 'received: {}'.format(type(req))) + + return output_request + +
[docs] @staticmethod + def handle_leap_ti(ti, drop_leap=False): + """Handle a time index for a leap year by dropping a day. + + Parameters + ---------- + ti : pandas.DatetimeIndex + Time-series datetime index with or without a leap day. + drop_leap : bool + Option to drop leap day (if True) or drop the last day of the year + (if False). + + Returns + ------- + ti : pandas.DatetimeIndex + Time-series datetime index with length a multiple of 365. + """ + + # drop leap day or last day + leap_day = ((ti.month == 2) & (ti.day == 29)) + last_day = ((ti.month == 12) & (ti.day == 31)) + if drop_leap: + # preference is to drop leap day if exists + ti = ti.drop(ti[leap_day]) + elif any(leap_day): + # leap day exists but preference is to drop last day of year + ti = ti.drop(ti[last_day]) + + if len(ti) % 365 != 0: + raise ValueError('Bad time index with length not a multiple of ' + '365: {}'.format(ti)) + + return ti
+ + @staticmethod + def _pp_to_pc(points, points_range, sam_configs, tech, + sites_per_worker=None, res_file=None, curtailment=None): + """ + Create ProjectControl from ProjectPoints + + Parameters + ---------- + points : int | slice | list | str | pandas.DataFrame + | reV.config.project_points.PointsControl + Single site integer, + or slice or list specifying project points, + or string pointing to a project points csv, + or a pre-loaded project points DataFrame, + or a fully instantiated PointsControl object. + points_range : list | None + Optional two-entry list specifying the index range of the sites to + analyze. To be taken from the reV.config.PointsControl.split_range + property. + sam_configs : dict | str | SAMConfig + SAM input configuration ID(s) and file path(s). Keys are the SAM + config ID(s) which map to the config column in the project points + CSV. Values are either a JSON SAM config file or dictionary of SAM + config inputs. Can also be a single config file path or a + pre loaded SAMConfig object. + tech : str + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam) + The string should be lower-cased with spaces and _ removed. + sites_per_worker : int + Number of sites to run in series on a worker. None defaults to the + resource file chunk size. + res_file : str + Filepath to single resource file, multi-h5 directory, + or /h5_dir/prefix*suffix + curtailment : NoneType | dict | str | config.curtailment.Curtailment + Inputs for curtailment parameters. If not None, curtailment inputs + are expected. Can be: + - Explicit namespace of curtailment variables (dict) + - Pointer to curtailment config json file with path (str) + - Instance of curtailment config object + (config.curtailment.Curtailment) + + Returns + ------- + pc : reV.config.project_points.PointsControl + PointsControl object instance. + """ + if hasattr(points, "df"): + points = points.df + + pp = ProjectPoints(points, sam_configs, tech=tech, res_file=res_file, + curtailment=curtailment) + + # make Points Control instance + if points_range is not None: + # PointsControl is for just a subset of the project points... + # this is the case if generation is being initialized on one + # of many HPC nodes in a large project + pc = PointsControl.split(points_range[0], points_range[1], pp, + sites_per_split=sites_per_worker) + else: + # PointsControl is for all of the project points + pc = PointsControl(pp, sites_per_split=sites_per_worker) + + return pc + +
[docs] @classmethod + def get_pc(cls, points, points_range, sam_configs, tech, + sites_per_worker=None, res_file=None, curtailment=None): + """Get a PointsControl instance. + + Parameters + ---------- + points : int | slice | list | str | pandas.DataFrame | PointsControl + Single site integer, + or slice or list specifying project points, + or string pointing to a project points csv, + or a pre-loaded project points DataFrame, + or a fully instantiated PointsControl object. + points_range : list | None + Optional two-entry list specifying the index range of the sites to + analyze. To be taken from the reV.config.PointsControl.split_range + property. + sam_configs : dict | str | SAMConfig + SAM input configuration ID(s) and file path(s). Keys are the SAM + config ID(s) which map to the config column in the project points + CSV. Values are either a JSON SAM config file or dictionary of SAM + config inputs. Can also be a single config file path or a + pre loaded SAMConfig object. + tech : str + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam) + The string should be lower-cased with spaces and _ removed. + sites_per_worker : int + Number of sites to run in series on a worker. None defaults to the + resource file chunk size. + res_file : str + Filepath to single resource file, multi-h5 directory, + or /h5_dir/prefix*suffix + curtailment : NoneType | dict | str | config.curtailment.Curtailment + Inputs for curtailment parameters. If not None, curtailment inputs + are expected. Can be: + + - Explicit namespace of curtailment variables (dict) + - Pointer to curtailment config json file with path (str) + - Instance of curtailment config object + (config.curtailment.Curtailment) + + + Returns + ------- + pc : reV.config.project_points.PointsControl + PointsControl object instance. + """ + + if tech not in cls.OPTIONS and tech.lower() != ModuleName.ECON: + msg = ('Did not recognize reV-SAM technology string "{}". ' + 'Technology string options are: {}' + .format(tech, list(cls.OPTIONS.keys()))) + logger.error(msg) + raise KeyError(msg) + + if sites_per_worker is None: + # get the optimal sites per split based on res file chunk size + sites_per_worker = cls.get_sites_per_worker(res_file) + + logger.debug('Sites per worker being set to {} for ' + 'PointsControl.'.format(sites_per_worker)) + + if isinstance(points, PointsControl): + # received a pre-intialized instance of pointscontrol + pc = points + else: + pc = cls._pp_to_pc(points, points_range, sam_configs, tech, + sites_per_worker=sites_per_worker, + res_file=res_file, curtailment=curtailment) + + return pc
+ +
[docs] @staticmethod + def get_sites_per_worker(res_file, default=100): + """Get the nominal sites per worker (x-chunk size) for a given file. + + This is based on the concept that it is most efficient for one core to + perform one read on one chunk of resource data, such that chunks will + not have to be read into memory twice and no sites will be read + redundantly. + + Parameters + ---------- + res_file : str + Filepath to single resource file, multi-h5 directory, + or /h5_dir/prefix*suffix + default : int + Sites to be analyzed on a single core if the chunk size cannot be + determined from res_file. + + Returns + ------- + sites_per_worker : int + Nominal sites to be analyzed per worker. This is set to the x-axis + chunk size for windspeed and dni datasets for the WTK and NSRDB + data, respectively. + """ + if not res_file or not os.path.isfile(res_file): + return default + + with Resource(res_file) as res: + if 'wtk' in res_file.lower(): + for dset in res.datasets: + if 'speed' in dset: + # take nominal WTK chunks from windspeed + _, _, chunks = res.get_dset_properties(dset) + break + elif 'nsrdb' in res_file.lower(): + # take nominal NSRDB chunks from dni + _, _, chunks = res.get_dset_properties('dni') + else: + warn('Could not infer dataset chunk size as the resource type ' + 'could not be determined from the filename: {}' + .format(res_file)) + chunks = None + + if chunks is None: + # if chunks not set, go to default + sites_per_worker = default + logger.debug('Sites per worker being set to {} (default) based on ' + 'no set chunk size in {}.' + .format(sites_per_worker, res_file)) + else: + sites_per_worker = chunks[1] + logger.debug('Sites per worker being set to {} based on chunk ' + 'size of {}.'.format(sites_per_worker, res_file)) + + return sites_per_worker
+ +
[docs] @staticmethod + def unpack_futures(futures): + """Combine list of futures results into their native dict format/type. + + Parameters + ---------- + futures : list + List of dictionary futures results. + + Returns + ------- + out : dict + Compiled results of the native future results type (dict). + """ + + out = {} + for x in futures: + out.update(x) + + return out
+ + @staticmethod + @abstractmethod + def _run_single_worker(points_control, tech=None, res_file=None, + output_request=None, scale_outputs=True): + """Run a reV-SAM analysis based on the points_control iterator. + + Parameters + ---------- + points_control : reV.config.PointsControl + A PointsControl instance dictating what sites and configs are run. + tech : str + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam) + The string should be lower-cased with spaces and _ removed. + res_file : str + Filepath to single resource file, multi-h5 directory, + or /h5_dir/prefix*suffix + output_request : list | tuple + Output variables requested from SAM. + scale_outputs : bool + Flag to scale outputs in-place immediately upon returning data. + + Returns + ------- + out : dict + Output dictionary from the SAM reV_run function. Data is scaled + within this function to the datatype specified in cls.OUT_ATTRS. + """ + + def _parse_site_data(self, inp): + """Parse site-specific data from input arg + + Parameters + ---------- + inp : str | pd.DataFrame | None + Site data in .csv or pre-extracted dataframe format. None signifies + that there is no extra site-specific data and that everything is + fully defined in the input h5 and SAM json configs. + + Returns + ------- + site_data : pd.DataFrame + Site-specific data for econ calculation. Rows correspond to sites, + columns are variables. + """ + + if inp is None or inp is False: + # no input, just initialize dataframe with site gids as index + site_data = pd.DataFrame(index=self.project_points.sites) + site_data.index.name = 'gid' + else: + # explicit input, initialize df + if isinstance(inp, str): + if inp.endswith('.csv'): + site_data = pd.read_csv(inp) + elif isinstance(inp, pd.DataFrame): + site_data = inp + else: + # site data was not able to be set. Raise error. + raise Exception('Site data input must be .csv or ' + 'dataframe, but received: {}'.format(inp)) + + if 'gid' not in site_data and site_data.index.name != 'gid': + # require gid as column label or index + raise KeyError('Site data input must have "gid" column ' + 'to match reV site gid.') + + # pylint: disable=no-member + if site_data.index.name != 'gid': + # make gid the dataframe index if not already + site_data = site_data.set_index('gid', drop=True) + + if 'offshore' in site_data: + if site_data['offshore'].sum() > 1: + w = ('Found offshore sites in econ site data input. ' + 'This functionality has been deprecated. ' + 'Please run the reV offshore module to ' + 'calculate offshore wind lcoe.') + warn(w, OffshoreWindInputWarning) + logger.warning(w) + + return site_data + +
[docs] def add_site_data_to_pp(self, site_data): + """Add the site df (site-specific inputs) to project points dataframe. + + This ensures that only the relevant site's data will be passed through + to parallel workers when points_control is iterated and split. + + Parameters + ---------- + site_data : pd.DataFrame + Site-specific data for econ calculation. Rows correspond to sites, + columns are variables. + """ + self.project_points.join_df(site_data, key=self.site_data.index.name)
+ + @abstractmethod + def _parse_output_request(self, req): + """Set the output variables requested from the user. + + Parameters + ---------- + req : list | tuple + Output variables requested from SAM. + + Returns + ------- + output_request : list + Output variables requested from SAM. + """ + + def _get_data_shape(self, dset, n_sites): + """Get the output array shape based on OUT_ATTRS or PySAM.Outputs. + + Parameters + ---------- + dset : str + Variable name to get shape for. + n_sites : int + Number of sites for this data shape. + + Returns + ------- + shape : tuple + 1D or 2D shape tuple for dset. + """ + + if dset in self.OUT_ATTRS: + return self._get_data_shape_from_out_attrs(dset, n_sites) + + if dset in self.project_points.all_sam_input_keys: + return self._get_data_shape_from_sam_config(dset, n_sites) + + return self._get_data_shape_from_pysam(dset, n_sites) + + def _get_data_shape_from_out_attrs(self, dset, n_sites): + """Get data shape from ``OUT_ATTRS`` variable""" + if self.OUT_ATTRS[dset]['type'] == 'array': + return (len(self.time_index), n_sites) + return (n_sites,) + + def _get_data_shape_from_sam_config(self, dset, n_sites): + """Get data shape from SAM input config """ + data = list(self.project_points.sam_inputs.values())[0][dset] + if isinstance(data, (list, tuple, np.ndarray)): + return (*np.array(data).shape, n_sites) + + if isinstance(data, str): + msg = ('Cannot pass through non-scalar SAM input key "{}" ' + 'as an output_request!'.format(dset)) + logger.error(msg) + raise ExecutionError(msg) + + return (n_sites, ) + + def _get_data_shape_from_pysam(self, dset, n_sites): + """Get data shape from PySAM output object""" + if self._sam_obj_default is None: + self._sam_obj_default = self.sam_module.default() + + try: + out_data = getattr(self._sam_obj_default.Outputs, dset) + except AttributeError as e: + msg = ('Could not get data shape for dset "{}" ' + 'from object "{}". ' + 'Received the following error: "{}"' + .format(dset, self._sam_obj_default, e)) + logger.error(msg) + raise ExecutionError(msg) from e + + if isinstance(out_data, (int, float, str)): + return (n_sites,) + + if len(out_data) % len(self.time_index) == 0: + return (len(self.time_index), n_sites) + + return (len(out_data), n_sites) + + def _init_fpath(self, out_fpath, module): + """Combine directory and filename, ensure .h5 ext., make out dirs.""" + if out_fpath is None: + return + + project_dir, out_fn = os.path.split(out_fpath) + + # ensure output file is an h5 + if not out_fn.endswith('.h5'): + out_fn += '.h5' + + if module not in out_fn: + extension_with_module = "_{}.h5".format(module) + out_fn = out_fn.replace(".h5", extension_with_module) + + # ensure year is in out_fpath + if self.year is not None and str(self.year) not in out_fn: + extension_with_year = "_{}.h5".format(self.year) + out_fn = out_fn.replace(".h5", extension_with_year) + + # create and use optional output dir + if project_dir and not os.path.exists(project_dir): + os.makedirs(project_dir, exist_ok=True) + + self._out_fpath = os.path.join(project_dir, out_fn) + self._run_attrs['out_fpath'] = out_fpath + + def _init_h5(self, mode='w'): + """Initialize the single h5 output file with all output requests. + + Parameters + ---------- + mode : str + Mode to instantiate h5py.File instance + """ + + if self._out_fpath is None: + return + + if 'w' in mode: + logger.info('Initializing full output file: "{}" with mode: {}' + .format(self._out_fpath, mode)) + elif 'a' in mode: + logger.info('Appending data to output file: "{}" with mode: {}' + .format(self._out_fpath, mode)) + + attrs = {d: {} for d in self.output_request} + chunks = {} + dtypes = {} + shapes = {} + + # flag to write time index if profiles are being output + write_ti = False + + for dset in self.output_request: + + tmp = 'other' + if dset in self.OUT_ATTRS: + tmp = dset + + attrs[dset]['units'] = self.OUT_ATTRS[tmp].get('units', + 'unknown') + attrs[dset]['scale_factor'] = \ + self.OUT_ATTRS[tmp].get('scale_factor', 1) + chunks[dset] = self.OUT_ATTRS[tmp].get('chunks', None) + dtypes[dset] = self.OUT_ATTRS[tmp].get('dtype', 'float32') + shapes[dset] = self._get_data_shape(dset, len(self.meta)) + if len(shapes[dset]) > 1: + write_ti = True + + # only write time index if profiles were found in output request + if write_ti: + ti = self.time_index + else: + ti = None + + Outputs.init_h5(self._out_fpath, self.output_request, shapes, + attrs, chunks, dtypes, self.meta, time_index=ti, + configs=self.sam_metas, run_attrs=self.run_attrs, + mode=mode) + + def _init_out_arrays(self, index_0=0): + """Initialize output arrays based on the number of sites that can be + stored in memory safely. + + Parameters + ---------- + index_0 : int + This is the site list index (not gid) for the first site in the + output data. If a node cannot process all sites in-memory at once, + this is used to segment the sites in the current output chunk. + """ + + self._out = {} + self._finished_sites = [] + + # Output chunk is the index range (inclusive) of this set of site outs + self._out_chunk = (index_0, np.min((index_0 + self.site_limit, + len(self.project_points) - 1))) + self._out_n_sites = int(self.out_chunk[1] - self.out_chunk[0]) + 1 + + logger.info('Initializing in-memory outputs for {} sites with gids ' + '{} through {} inclusive (site list index {} through {})' + .format(self._out_n_sites, + self.project_points.sites[self.out_chunk[0]], + self.project_points.sites[self.out_chunk[1]], + self.out_chunk[0], self.out_chunk[1])) + + for request in self.output_request: + dtype = 'float32' + if request in self.OUT_ATTRS and self.scale_outputs: + dtype = self.OUT_ATTRS[request].get('dtype', 'float32') + + shape = self._get_data_shape(request, self._out_n_sites) + # initialize the output request as an array of zeros + self._out[request] = np.zeros(shape, dtype=dtype) + + def _check_sam_version_inputs(self): + """Check the PySAM version and input keys. Fix where necessary.""" + for key, parameters in self.project_points.sam_inputs.items(): + updated = PySamVersionChecker.run(self.tech, parameters) + sam_obj = self._points_control._project_points._sam_config_obj + sam_obj._inputs[key] = updated + +
[docs] def unpack_output(self, site_gid, site_output): + """Unpack a SAM SiteOutput object to the output attribute. + + Parameters + ---------- + site_gid : int + Resource-native site gid (index). + site_output : dict + SAM site output object. + """ + + # iterate through the site results + for var, value in site_output.items(): + if var not in self._out: + raise KeyError('Tried to collect output variable "{}", but it ' + 'was not yet initialized in the output ' + 'dictionary.') + + # get the index in the output array for the current site + i = self.site_index(site_gid, out_index=True) + + # check to see if we have exceeded the current output chunk. + # If so, flush data to disk and reset the output initialization + if i + 1 > self._out_n_sites: + self.flush() + global_site_index = self.site_index(site_gid) + self._init_out_arrays(index_0=global_site_index) + i = self.site_index(site_gid, out_index=True) + + if isinstance(value, (list, tuple, np.ndarray)): + if not isinstance(value, np.ndarray): + value = np.array(value) + + self._out[var][:, i] = value.T + + elif value != 0: + self._out[var][i] = value
+ +
[docs] def site_index(self, site_gid, out_index=False): + """Get the index corresponding to the site gid. + + Parameters + ---------- + site_gid : int + Resource-native site index (gid). + out_index : bool + Option to get output index (if true) which is the column index in + the current in-memory output array, or (if false) the global site + index from the project points site list. + + Returns + ------- + index : int + Global site index if out_index=False, otherwise column index in + the current in-memory output array. + """ + + # get the index for site_gid in the (global) project points site list. + global_site_index = self.project_points.sites.index(site_gid) + + if not out_index: + output_index = global_site_index + else: + output_index = global_site_index - self.out_chunk[0] + if output_index < 0: + raise ValueError('Attempting to set output data for site with ' + 'gid {} to global site index {}, which was ' + 'already set based on the current output ' + 'index chunk of {}' + .format(site_gid, global_site_index, + self.out_chunk)) + + return output_index
+ +
[docs] def flush(self): + """Flush the output data in self.out attribute to disk in .h5 format. + + The data to be flushed is accessed from the instance attribute + "self.out". The disk target is based on the instance attributes + "self._out_fpath". Data is not flushed if _fpath is None or if .out is + empty. + """ + + # handle output file request if file is specified and .out is not empty + if isinstance(self._out_fpath, str) and self._out: + logger.info('Flushing outputs to disk, target file: "{}"' + .format(self._out_fpath)) + + # get the slice of indices to write outputs to + islice = slice(self.out_chunk[0], self.out_chunk[1] + 1) + + # open output file in append mode to add output results to + with Outputs(self._out_fpath, mode='a') as f: + + # iterate through all output requests writing each as a dataset + for dset, arr in self._out.items(): + if len(arr.shape) == 1: + # write array of scalars + f[dset, islice] = arr + else: + # write 2D array of profiles + f[dset, :, islice] = arr + + logger.debug('Flushed output successfully to disk.')
+ + def _pre_split_pc(self, pool_size=None): + """Pre-split project control iterator into sub chunks to further + split the parallelization. + + Parameters + ---------- + pool_size : int + Number of futures to submit to a single process pool for + parallel futures. If ``None``, the pool size is set to + ``os.cpu_count() * 2``. By default, ``None``. + + Returns + ------- + N : int + Total number of points control split instances. + pc_chunks : list + List of lists of points control split instances. + """ + N = 0 + pc_chunks = [] + i_chunk = [] + if pool_size is None: + pool_size = os.cpu_count() * 2 + + for i, split in enumerate(self.points_control): + N += 1 + i_chunk.append(split) + if (i + 1) % pool_size == 0: + pc_chunks.append(i_chunk) + i_chunk = [] + + if i_chunk: + pc_chunks.append(i_chunk) + + logger.debug('Pre-splitting points control into {} chunks with the ' + 'following chunk sizes: {}' + .format(len(pc_chunks), [len(x) for x in pc_chunks])) + return N, pc_chunks + + def _parallel_run(self, max_workers=None, pool_size=None, timeout=1800, + **kwargs): + """Execute parallel compute. + + Parameters + ---------- + max_workers : None | int + Number of workers. None will default to cpu count. + pool_size : int + Number of futures to submit to a single process pool for + parallel futures. If ``None``, the pool size is set to + ``os.cpu_count() * 2``. By default, ``None``. + timeout : int | float + Number of seconds to wait for parallel run iteration to complete + before returning zeros. + kwargs : dict + Keyword arguments to self._run_single_worker(). + """ + if pool_size is None: + pool_size = os.cpu_count() * 2 + if max_workers is None: + max_workers = os.cpu_count() + logger.info('Running parallel execution with max_workers={}' + .format(max_workers)) + i = 0 + N, pc_chunks = self._pre_split_pc(pool_size=pool_size) + for j, pc_chunk in enumerate(pc_chunks): + logger.debug('Starting process pool for points control ' + 'iteration {} out of {}' + .format(j + 1, len(pc_chunks))) + + failed_futures = False + chunks = {} + futures = [] + loggers = [__name__, 'reV.gen', 'reV.econ', 'reV'] + with SpawnProcessPool(max_workers=max_workers, + loggers=loggers) as exe: + for pc in pc_chunk: + future = exe.submit(self._run_single_worker, pc, **kwargs) + futures.append(future) + chunks[future] = pc + + for future in futures: + i += 1 + try: + result = future.result(timeout=timeout) + except TimeoutError: + failed_futures = True + sites = chunks[future].project_points.sites + result = self._handle_failed_future(future, i, sites, + timeout) + + self.out = result + + mem = psutil.virtual_memory() + m = ('Parallel run at iteration {0} out of {1}. ' + 'Memory utilization is {2:.3f} GB out of {3:.3f} GB ' + 'total ({4:.1f}% used, intended limit of {5:.1f}%)' + .format(i, N, mem.used / 1e9, mem.total / 1e9, + 100 * mem.used / mem.total, + 100 * self.mem_util_lim)) + logger.info(m) + + if failed_futures: + logger.info('Forcing pool shutdown after failed futures.') + exe.shutdown(wait=False) + logger.info('Forced pool shutdown complete.') + + self.flush() + + def _handle_failed_future(self, future, i, sites, timeout): + """Handle a failed future and return zeros. + + Parameters + ---------- + future : concurrent.futures.Future + Failed future to cancel. + i : int + Iteration number for logging + sites : list + List of site gids belonging to this failed future. + timeout : int + Number of seconds to wait for parallel run iteration to complete + before returning zeros. + """ + + w = ('Iteration {} hit the timeout limit of {} seconds! Passing zeros.' + .format(i, timeout)) + logger.warning(w) + warn(w, OutputWarning) + + site_out = {k: 0 for k in self.output_request} + result = {site: site_out for site in sites} + + try: + cancelled = future.cancel() + except Exception as e: + w = 'Could not cancel future! Received exception: {}'.format(e) + logger.warning(w) + warn(w, ParallelExecutionWarning) + + if not cancelled: + w = 'Could not cancel future!' + logger.warning(w) + warn(w, ParallelExecutionWarning) + + return result
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/generation/generation.html b/_modules/reV/generation/generation.html new file mode 100644 index 000000000..78a76981f --- /dev/null +++ b/_modules/reV/generation/generation.html @@ -0,0 +1,1470 @@ + + + + + + reV.generation.generation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.generation.generation

+# -*- coding: utf-8 -*-
+"""
+reV generation module.
+"""
+import os
+import copy
+import json
+import logging
+
+import pprint
+import numpy as np
+import pandas as pd
+
+from reV.generation.base import BaseGen
+from reV.utilities.exceptions import (ProjectPointsValueError, InputError)
+from reV.SAM.generation import (Geothermal,
+                                PvWattsv5,
+                                PvWattsv7,
+                                PvWattsv8,
+                                PvSamv1,
+                                TcsMoltenSalt,
+                                WindPower,
+                                SolarWaterHeat,
+                                TroughPhysicalHeat,
+                                LinearDirectSteam,
+                                MhkWave)
+from reV.utilities import ModuleName
+
+from rex.resource import Resource
+from rex.multi_file_resource import MultiFileResource
+from rex.multi_res_resource import MultiResolutionResource
+from rex.utilities.utilities import check_res_file
+
+logger = logging.getLogger(__name__)
+
+ATTR_DIR = os.path.dirname(os.path.realpath(__file__))
+ATTR_DIR = os.path.join(ATTR_DIR, 'output_attributes')
+with open(os.path.join(ATTR_DIR, 'other.json'), 'r') as f:
+    OTHER_ATTRS = json.load(f)
+with open(os.path.join(ATTR_DIR, 'generation.json'), 'r') as f:
+    GEN_ATTRS = json.load(f)
+with open(os.path.join(ATTR_DIR, 'linear_fresnel.json'), 'r') as f:
+    LIN_ATTRS = json.load(f)
+with open(os.path.join(ATTR_DIR, 'solar_water_heat.json'), 'r') as f:
+    SWH_ATTRS = json.load(f)
+with open(os.path.join(ATTR_DIR, 'trough_heat.json'), 'r') as f:
+    TPPH_ATTRS = json.load(f)
+
+
+
[docs]class Gen(BaseGen): + """Gen""" + + # Mapping of reV technology strings to SAM generation objects + OPTIONS = {'geothermal': Geothermal, + 'pvwattsv5': PvWattsv5, + 'pvwattsv7': PvWattsv7, + 'pvwattsv8': PvWattsv8, + 'pvsamv1': PvSamv1, + 'tcsmoltensalt': TcsMoltenSalt, + 'solarwaterheat': SolarWaterHeat, + 'troughphysicalheat': TroughPhysicalHeat, + 'lineardirectsteam': LinearDirectSteam, + 'windpower': WindPower, + 'mhkwave': MhkWave + } + """reV technology options.""" + + # Mapping of reV generation outputs to scale factors and units. + # Type is scalar or array and corresponds to the SAM single-site output + OUT_ATTRS = copy.deepcopy(OTHER_ATTRS) + OUT_ATTRS.update(GEN_ATTRS) + OUT_ATTRS.update(LIN_ATTRS) + OUT_ATTRS.update(SWH_ATTRS) + OUT_ATTRS.update(TPPH_ATTRS) + OUT_ATTRS.update(BaseGen.ECON_ATTRS) + + def __init__(self, technology, project_points, sam_files, resource_file, + low_res_resource_file=None, output_request=('cf_mean',), + site_data=None, curtailment=None, gid_map=None, + drop_leap=False, sites_per_worker=None, + memory_utilization_limit=0.4, scale_outputs=True, + write_mapped_gids=False, bias_correct=None): + """reV generation analysis class. + + ``reV`` generation analysis runs SAM simulations by piping in + renewable energy resource data (usually from the NSRDB or WTK), + loading the SAM config, and then executing the PySAM compute + module for a given technology. See the documentation for the + ``reV`` SAM class (e.g. :class:`reV.SAM.generation.WindPower`, + :class:`reV.SAM.generation.PvWattsv8`, + :class:`reV.SAM.generation.Geothermal`, etc.) for info on the + allowed and/or required SAM config file inputs. If economic + parameters are supplied in the SAM config, then you can bundle a + "follow-on" econ calculation by just adding the desired econ + output keys to the `output_request`. You can request ``reV`` to ' + run the analysis for one or more "sites", which correspond to + the meta indices in the resource data (also commonly called the + ``gid's``). + + Examples + -------- + The following is an example of the most simple way to run reV + generation. Note that the ``TESTDATADIR`` refers to the local cloned + repository and will need to be replaced with a valid path if you + installed ``reV`` via a simple pip install. + + >>> import os + >>> from reV import Gen, TESTDATADIR + >>> + >>> sam_tech = 'pvwattsv7' + >>> sites = 0 + >>> fp_sam = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json') + >>> fp_res = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2013.h5') + >>> + >>> gen = Gen(sam_tech, sites, fp_sam, fp_res) + >>> gen.run() + >>> + >>> gen.out + {'cf_mean': array([0.16966143], dtype=float32)} + >>> + >>> sites = [3, 4, 7, 9] + >>> req = ('cf_mean', 'cf_profile', 'lcoe_fcr') + >>> gen = Gen(sam_tech, sites, fp_sam, fp_res, output_request=req) + >>> gen.run() + >>> + >>> gen.out + {'lcoe_fcr': array([131.39166, 131.31221, 127.54539, 125.49656]), + 'cf_mean': array([0.17713654, 0.17724372, 0.1824783 , 0.1854574 ]), + 'cf_profile': array([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.], + ..., + [0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]])} + + Parameters + ---------- + technology : str + String indicating which SAM technology to analyze. Must be + one of the keys of + :attr:`~reV.generation.generation.Gen.OPTIONS`. The string + should be lower-cased with spaces and underscores removed. + project_points : int | list | tuple | str | dict | pd.DataFrame | slice + Input specifying which sites to process. A single integer + representing the generation GID of a site may be specified + to evaluate reV at a single location. A list or tuple of + integers (or slice) representing the generation GIDs of + multiple sites can be specified to evaluate reV at multiple + specific locations. A string pointing to a project points + CSV file may also be specified. Typically, the CSV contains + two columns: + + - ``gid``: Integer specifying the generation GID of each + site. + - ``config``: Key in the `sam_files` input dictionary + (see below) corresponding to the SAM configuration to + use for each particular site. This value can also be + ``None`` (or left out completely) if you specify only + a single SAM configuration file as the `sam_files` + input. + + The CSV file may also contain site-specific inputs by + including a column named after a config keyword (e.g. a + column called ``capital_cost`` may be included to specify a + site-specific capital cost value for each location). Columns + that do not correspond to a config key may also be included, + but they will be ignored. A DataFrame following the same + guidelines as the CSV input (or a dictionary that can be + used to initialize such a DataFrame) may be used for this + input as well. + + .. Note:: By default, the generation GID of each site is + assumed to match the resource GID to be evaluated for that + site. However, unique generation GID's can be mapped to + non-unique resource GID's via the `gid_map` input (see the + documentation for `gid_map` for more details). + + sam_files : dict | str + A dictionary mapping SAM input configuration ID(s) to SAM + configuration(s). Keys are the SAM config ID(s) which + correspond to the ``config`` column in the project points + CSV. Values for each key are either a path to a + corresponding SAM config file or a full dictionary + of SAM config inputs. For example:: + + sam_files = { + "default": "/path/to/default/sam.json", + "onshore": "/path/to/onshore/sam_config.yaml", + "offshore": { + "sam_key_1": "sam_value_1", + "sam_key_2": "sam_value_2", + ... + }, + ... + } + + This input can also be a string pointing to a single SAM + config file. In this case, the ``config`` column of the + CSV points input should be set to ``None`` or left out + completely. See the documentation for the ``reV`` SAM class + (e.g. :class:`reV.SAM.generation.WindPower`, + :class:`reV.SAM.generation.PvWattsv8`, + :class:`reV.SAM.generation.Geothermal`, etc.) for + info on the allowed and/or required SAM config file inputs. + resource_file : str + Filepath to resource data. This input can be path to a + single resource HDF5 file, a path to a directory containing + data spread across multiple HDF5 files, or a path including + a wildcard input like ``/h5_dir/prefix*suffix``. In all + cases, the resource data must be readable by + :py:class:`rex.resource.Resource` + or :py:class:`rex.multi_file_resource.MultiFileResource`. + (i.e. the resource data conform to the + `rex data format <https://tinyurl.com/3fy7v5kx>`_). This + means the data file(s) must contain a 1D ``time_index`` + dataset indicating the UTC time of observation, a 1D + ``meta`` dataset represented by a DataFrame with + site-specific columns, and 2D resource datasets that match + the dimensions of (``time_index``, ``meta``). The time index + must start at 00:00 of January 1st of the year under + consideration, and its shape must be a multiple of 8760. + + .. Note:: If executing ``reV`` from the command line, this + path can contain brackets ``{}`` that will be filled in by + the `analysis_years` input. + + .. Important:: If you are using custom resource data (i.e. + not NSRDB/WTK/Sup3rCC, etc.), ensure the following: + + - The data conforms to the + `rex data format <https://tinyurl.com/3fy7v5kx>`_. + - The ``meta`` DataFrame is organized such that every + row is a pixel and at least the columns + ``latitude``, ``longitude``, ``timezone``, and + ``elevation`` are given for each location. + - The time index and associated temporal data is in + UTC. + - The latitude is between -90 and 90 and longitude is + between -180 and 180. + - For solar data, ensure the DNI/DHI are not zero. You + can calculate one of these these inputs from the + other using the relationship + + .. math:: GHI = DNI * cos(SZA) + DHI + + low_res_resource_file : str, optional + Optional low resolution resource file that will be + dynamically mapped+interpolated to the nominal-resolution + `resource_file`. This needs to be of the same format as + `resource_file` - both files need to be handled by the + same ``rex Resource`` handler (e.g. ``WindResource``). All + of the requirements from the `resource_file` apply to this + input as well. If ``None``, no dynamic mapping to higher + resolutions is performed. By default, ``None``. + output_request : list | tuple, optional + List of output variables requested from SAM. Can be any + of the parameters in the "Outputs" group of the PySAM module + (e.g. :py:class:`PySAM.Windpower.Windpower.Outputs`, + :py:class:`PySAM.Pvwattsv8.Pvwattsv8.Outputs`, + :py:class:`PySAM.Geothermal.Geothermal.Outputs`, etc.) being + executed. This list can also include a select number of SAM + config/resource parameters to include in the output: + any key in any of the + `output attribute JSON files <https://tinyurl.com/4bmrpe3j/>`_ + may be requested. If ``cf_mean`` is not included in this + list, it will automatically be added. Time-series profiles + requested via this input are output in UTC. + + .. Note:: If you are performing ``reV`` solar runs using + ``PVWatts`` and would like ``reV`` to include AC capacity + values in your aggregation/supply curves, then you must + include the ``"dc_ac_ratio"`` time series as an output in + `output_request` when running ``reV`` generation. The AC + capacity outputs will automatically be added during the + aggregation/supply curve step if the ``"dc_ac_ratio"`` + dataset is detected in the generation file. + + By default, ``('cf_mean',)``. + site_data : str | pd.DataFrame, optional + Site-specific input data for SAM calculation. If this input + is a string, it should be a path that points to a CSV file. + Otherwise, this input should be a DataFrame with + pre-extracted site data. Rows in this table should match + the input sites via a ``gid`` column. The rest of the + columns should match configuration input keys that will take + site-specific values. Note that some or all site-specific + inputs can be specified via the `project_points` input + table instead. If ``None``, no site-specific data is + considered. By default, ``None``. + curtailment : dict | str, optional + Inputs for curtailment parameters, which can be: + + - Explicit namespace of curtailment variables (dict) + - Pointer to curtailment config file with path (str) + + The allowed key-value input pairs in the curtailment + configuration are documented as properties of the + :class:`reV.config.curtailment.Curtailment` class. If + ``None``, no curtailment is modeled. By default, ``None``. + gid_map : dict | str, optional + Mapping of unique integer generation gids (keys) to single + integer resource gids (values). This enables unique + generation gids in the project points to map to non-unique + resource gids, which can be useful when evaluating multiple + resource datasets in ``reV`` (e.g., forecasted ECMWF + resource data to complement historical WTK meteorology). + This input can be a pre-extracted dictionary or a path to a + JSON or CSV file. If this input points to a CSV file, the + file must have the columns ``gid`` (which matches the + project points) and ``gid_map`` (gids to extract from the + resource input). If ``None``, the GID values in the project + points are assumed to match the resource GID values. + By default, ``None``. + drop_leap : bool, optional + Drop leap day instead of final day of year when handling + leap years. By default, ``False``. + sites_per_worker : int, optional + Number of sites to run in series on a worker. ``None`` + defaults to the resource file chunk size. + By default, ``None``. + memory_utilization_limit : float, optional + Memory utilization limit (fractional). Must be a value + between 0 and 1. This input sets how many site results will + be stored in-memory at any given time before flushing to + disk. By default, ``0.4``. + scale_outputs : bool, optional + Flag to scale outputs in-place immediately upon ``Gen`` + returning data. By default, ``True``. + write_mapped_gids : bool, optional + Option to write mapped gids to output meta instead of + resource gids. By default, ``False``. + bias_correct : str | pd.DataFrame, optional + Optional DataFrame or CSV filepath to a wind or solar + resource bias correction table. This has columns: + + - ``gid``: GID of site (can be index name) + - ``adder``: Value to add to resource at each site + - ``scalar``: Value to scale resource at each site by + + The ``gid`` field should match the true resource ``gid`` + regardless of the optional ``gid_map`` input. If both + ``adder`` and ``scalar`` are present, the wind or solar + resource is corrected by :math:`(res*scalar)+adder`. If + *either* is missing, ``scalar`` defaults to 1 and + ``adder`` to 0. Only `windspeed` **or** `GHI` + `DNI` are + corrected, depending on the technology (wind for the former, + solar for the latter). `GHI` and `DNI` are corrected with + the same correction factors. If ``None``, no corrections are + applied. By default, ``None``. + """ + pc = self.get_pc(points=project_points, points_range=None, + sam_configs=sam_files, tech=technology, + sites_per_worker=sites_per_worker, + res_file=resource_file, + curtailment=curtailment) + + super().__init__(pc, output_request, site_data=site_data, + drop_leap=drop_leap, + memory_utilization_limit=memory_utilization_limit, + scale_outputs=scale_outputs) + + if self.tech not in self.OPTIONS: + msg = ('Requested technology "{}" is not available. ' + 'reV generation can analyze the following ' + 'SAM technologies: {}' + .format(self.tech, list(self.OPTIONS.keys()))) + logger.error(msg) + raise KeyError(msg) + + self.write_mapped_gids = write_mapped_gids + self._res_file = resource_file + self._lr_res_file = low_res_resource_file + self._sam_module = self.OPTIONS[self.tech] + self._run_attrs['sam_module'] = self._sam_module.MODULE + self._run_attrs['res_file'] = resource_file + + self._multi_h5_res, self._hsds = check_res_file(resource_file) + self._gid_map = self._parse_gid_map(gid_map) + self._nn_map = self._parse_nn_map() + self._bc = self._parse_bc(bias_correct) + + @property + def res_file(self): + """Get the resource filename and path. + + Returns + ------- + res_file : str + Filepath to single resource file, multi-h5 directory, + or /h5_dir/prefix*suffix + """ + return self._res_file + + @property + def lr_res_file(self): + """Get the (optional) low-resolution resource filename and path. + + Returns + ------- + str | None + """ + return self._lr_res_file + + @property + def meta(self): + """Get resource meta for all sites in project points. + + Returns + ------- + meta : pd.DataFrame + Meta data df for sites in project points. Column names are meta + data variables, rows are different sites. The row index + does not indicate the site number if the project points are + non-sequential or do not start from 0, so a 'gid' column is added. + """ + if self._meta is None: + res_cls = Resource + kwargs = {'hsds': self._hsds} + if self._multi_h5_res: + res_cls = MultiFileResource + kwargs = {} + + res_gids = self.project_points.sites + if self._gid_map is not None: + res_gids = [self._gid_map[i] for i in res_gids] + + with res_cls(self.res_file, **kwargs) as res: + meta_len = res.shapes['meta'][0] + + if np.max(res_gids) > meta_len: + msg = ('ProjectPoints has a max site gid of {} which is ' + 'out of bounds for the meta data of len {} from ' + 'resource file: {}' + .format(np.max(res_gids), + meta_len, self.res_file)) + logger.error(msg) + raise ProjectPointsValueError(msg) + + self._meta = res['meta', res_gids] + + self._meta.loc[:, 'gid'] = res_gids + if self.write_mapped_gids: + self._meta.loc[:, 'gid'] = self.project_points.sites + self._meta.index = self.project_points.sites + self._meta.index.name = 'gid' + self._meta.loc[:, 'reV_tech'] = self.project_points.tech + + return self._meta + + @property + def time_index(self): + """Get the generation resource time index data. + + Returns + ------- + _time_index : pandas.DatetimeIndex + Time-series datetime index + """ + if self._time_index is None: + if not self._multi_h5_res: + res_cls = Resource + kwargs = {'hsds': self._hsds} + else: + res_cls = MultiFileResource + kwargs = {} + + with res_cls(self.res_file, **kwargs) as res: + time_index = res.time_index + + downscale = self.project_points.sam_config_obj.downscale + step = self.project_points.sam_config_obj.time_index_step + if downscale is not None: + from rex.utilities.downscale import make_time_index + year = time_index.year[0] + ds_freq = downscale['frequency'] + time_index = make_time_index(year, ds_freq) + logger.info('reV solar generation running with temporal ' + 'downscaling frequency "{}" with final ' + 'time_index length {}' + .format(ds_freq, len(time_index))) + elif step is not None: + time_index = time_index[::step] + + self._time_index = self.handle_leap_ti(time_index, + drop_leap=self._drop_leap) + + return self._time_index + + @classmethod + def _run_single_worker(cls, points_control, tech=None, res_file=None, + lr_res_file=None, output_request=None, + scale_outputs=True, gid_map=None, nn_map=None, + bias_correct=None): + """Run a SAM generation analysis based on the points_control iterator. + + Parameters + ---------- + points_control : reV.config.PointsControl + A PointsControl instance dictating what sites and configs are run. + tech : str + SAM technology to analyze (pvwattsv7, windpower, tcsmoltensalt, + solarwaterheat, troughphysicalheat, lineardirectsteam) + The string should be lower-cased with spaces and _ removed. + res_file : str + Filepath to single resource file, multi-h5 directory, + or /h5_dir/prefix*suffix + lr_res_file : str | None + Optional low resolution resource file that will be dynamically + mapped+interpolated to the nominal-resolution res_file. This + needs to be of the same format as resource_file, e.g. they both + need to be handled by the same rex Resource handler such as + WindResource + output_request : list | tuple + Output variables requested from SAM. + scale_outputs : bool + Flag to scale outputs in-place immediately upon Gen returning data. + gid_map : None | dict + Mapping of unique integer generation gids (keys) to single integer + resource gids (values). This enables the user to input unique + generation gids in the project points that map to non-unique + resource gids. This can be None or a pre-extracted dict. + nn_map : np.ndarray + Optional 1D array of nearest neighbor mappings associated with the + res_file to lr_res_file spatial mapping. For details on this + argument, see the rex.MultiResolutionResource docstring. + bias_correct : None | pd.DataFrame + None if not provided or extracted DataFrame with wind or solar + resource bias correction table. This has columns: gid (can be index + name), adder, scalar. If both adder and scalar are present, the + wind or solar resource is corrected by (res*scalar)+adder. If + either adder or scalar is not present, scalar defaults to 1 and + adder to 0. Only windspeed or GHI+DNI are corrected depending on + the technology. GHI and DNI are corrected with the same correction + factors. + + Returns + ------- + out : dict + Output dictionary from the SAM reV_run function. Data is scaled + within this function to the datatype specified in Gen.OUT_ATTRS. + """ + + # Extract the site df from the project points df. + site_df = points_control.project_points.df + site_df = site_df.set_index('gid', drop=True) + + # run generation method for specified technology + try: + out = cls.OPTIONS[tech].reV_run( + points_control, res_file, site_df, + lr_res_file=lr_res_file, + output_request=output_request, + gid_map=gid_map, nn_map=nn_map, + bias_correct=bias_correct) + + except Exception as e: + out = {} + logger.exception('Worker failed for PC: {}'.format(points_control)) + raise e + + if scale_outputs: + # dtype convert in-place so no float data is stored unnecessarily + for site, site_output in out.items(): + for k in site_output.keys(): + # iterate through variable names in each site's output dict + if k in cls.OUT_ATTRS: + # get dtype and scale for output variable name + dtype = cls.OUT_ATTRS[k].get('dtype', 'float32') + scale_factor = cls.OUT_ATTRS[k].get('scale_factor', 1) + + # apply scale factor and dtype + out[site][k] *= scale_factor + + if np.issubdtype(dtype, np.integer): + # round after scaling if integer dtype + out[site][k] = np.round(out[site][k]) + + if isinstance(out[site][k], np.ndarray): + # simple astype for arrays + out[site][k] = out[site][k].astype(dtype) + else: + # use numpy array conversion for scalar values + out[site][k] = np.array([out[site][k]], + dtype=dtype)[0] + + return out + + def _parse_gid_map(self, gid_map): + """ + Parameters + ---------- + gid_map : None | dict | str + This can be None, a pre-extracted dict, or a filepath to json or + csv. If this is a csv, it must have the columns "gid" (which + matches the project points) and "gid_map" (gids to extract from the + resource input) + + Returns + ------- + gid_map : None | dict + Mapping of unique integer generation gids (keys) to single integer + resource gids (values). This enables the user to input unique + generation gids in the project points that map to non-unique + resource gids. + """ + + if isinstance(gid_map, str): + if gid_map.endswith('.csv'): + gid_map = pd.read_csv(gid_map).to_dict() + assert 'gid' in gid_map, 'Need "gid" in gid_map column' + assert 'gid_map' in gid_map, 'Need "gid_map" in gid_map column' + gid_map = {gid_map['gid'][i]: gid_map['gid_map'][i] + for i in gid_map['gid'].keys()} + + elif gid_map.endswith('.json'): + with open(gid_map, 'r') as f: + gid_map = json.load(f) + + if isinstance(gid_map, dict): + if not self._multi_h5_res: + res_cls = Resource + kwargs = {'hsds': self._hsds} + else: + res_cls = MultiFileResource + kwargs = {} + + with res_cls(self.res_file, **kwargs) as res: + for gen_gid, res_gid in gid_map.items(): + msg1 = ('gid_map values must all be int but received ' + '{}: {}'.format(gen_gid, res_gid)) + msg2 = ('Could not find the gen_gid to res_gid mapping ' + '{}: {} in the resource meta data.' + .format(gen_gid, res_gid)) + assert isinstance(gen_gid, int), msg1 + assert isinstance(res_gid, int), msg1 + assert res_gid in res.meta.index.values, msg2 + + for gen_gid in self.project_points.sites: + msg3 = ('Could not find the project points gid {} in the ' + 'gen_gid input of the gid_map.'.format(gen_gid)) + assert gen_gid in gid_map, msg3 + + elif gid_map is not None: + msg = ('Could not parse gid_map, must be None, dict, or path to ' + 'csv or json, but received: {}'.format(gid_map)) + logger.error(msg) + raise InputError(msg) + + return gid_map + + def _parse_nn_map(self): + """Parse a nearest-neighbor spatial mapping array if lr_res_file is + provided (resource data is at two resolutions and the low-resolution + data must be mapped to the nominal-resolution data) + + Returns + ------- + nn_map : np.ndarray + Optional 1D array of nearest neighbor mappings associated with the + res_file to lr_res_file spatial mapping. For details on this + argument, see the rex.MultiResolutionResource docstring. + """ + nn_map = None + if self.lr_res_file is not None: + + handler_class = Resource + if '*' in self.res_file or '*' in self.lr_res_file: + handler_class = MultiFileResource + + with handler_class(self.res_file) as hr_res: + with handler_class(self.lr_res_file) as lr_res: + logger.info('Making nearest neighbor map for multi ' + 'resolution resource data...') + nn_d, nn_map = MultiResolutionResource.make_nn_map(hr_res, + lr_res) + logger.info('Done making nearest neighbor map for multi ' + 'resolution resource data!') + + logger.info('Made nearest neighbor mapping between nominal-' + 'resolution and low-resolution resource files. ' + 'Min / mean / max dist: {:.3f} / {:.3f} / {:.3f}' + .format(nn_d.min(), nn_d.mean(), nn_d.max())) + + return nn_map + + @staticmethod + def _parse_bc(bias_correct): + """Parse the bias correction data. + + Parameters + ---------- + bias_correct : str | pd.DataFrame | None + Optional DataFrame or csv filepath to a wind or solar resource bias + correction table. This has columns: gid (can be index name), adder, + scalar. If both adder and scalar are present, the wind or solar + resource is corrected by (res*scalar)+adder. If either is not + present, scalar defaults to 1 and adder to 0. Only windspeed or + GHI+DNI are corrected depending on the technology. GHI and DNI are + corrected with the same correction factors. + + Returns + ------- + bias_correct : None | pd.DataFrame + None if not provided or extracted DataFrame with wind or solar + resource bias correction table. This has columns: gid (can be index + name), adder, scalar. If both adder and scalar are present, the + wind or solar resource is corrected by (res*scalar)+adder. If + either adder or scalar is not present, scalar defaults to 1 and + adder to 0. Only windspeed or GHI+DNI are corrected depending on + the technology. GHI and DNI are corrected with the same correction + factors. + """ + + if isinstance(bias_correct, type(None)): + return bias_correct + + elif isinstance(bias_correct, str): + bias_correct = pd.read_csv(bias_correct) + + msg = ('Bias correction data must be a filepath to csv or a dataframe ' + 'but received: {}'.format(type(bias_correct))) + assert isinstance(bias_correct, pd.DataFrame), msg + + if 'adder' not in bias_correct: + logger.info('Bias correction table provided, but "adder" not ' + 'found, defaulting to 0.') + bias_correct['adder'] = 0 + + if 'scalar' not in bias_correct: + logger.info('Bias correction table provided, but "scalar" not ' + 'found, defaulting to 1.') + bias_correct['scalar'] = 1 + + msg = ('Bias correction table must have "gid" column but only found: ' + '{}'.format(list(bias_correct.columns))) + assert 'gid' in bias_correct or bias_correct.index.name == 'gid', msg + + if bias_correct.index.name != 'gid': + bias_correct = bias_correct.set_index('gid') + + return bias_correct + + def _parse_output_request(self, req): + """Set the output variables requested from generation. + + Parameters + ---------- + req : list | tuple + Output variables requested from SAM. + + Returns + ------- + output_request : list + Output variables requested from SAM. + """ + + output_request = self._output_request_type_check(req) + + # ensure that cf_mean is requested from output + if 'cf_mean' not in output_request: + output_request.append('cf_mean') + + for request in output_request: + if request not in self.OUT_ATTRS: + msg = ('User output request "{}" not recognized. ' + 'Will attempt to extract from PySAM.'.format(request)) + logger.debug(msg) + + return list(set(output_request)) + +
[docs] def run(self, out_fpath=None, max_workers=1, timeout=1800, + pool_size=None): + """Execute a parallel reV generation run with smart data flushing. + + Parameters + ---------- + out_fpath : str, optional + Path to output file. If ``None``, no output file will + be written. If the filepath is specified but the module name + (generation) and/or resource data year is not included, the + module name and/or resource data year will get added to the + output file name. By default, ``None``. + max_workers : int, optional + Number of local workers to run on. By default, ``1``. + timeout : int, optional + Number of seconds to wait for parallel run iteration to + complete before returning zeros. By default, ``1800`` + seconds. + pool_size : int, optional + Number of futures to submit to a single process pool for + parallel futures. If ``None``, the pool size is set to + ``os.cpu_count() * 2``. By default, ``None``. + + Returns + ------- + str | None + Path to output HDF5 file, or ``None`` if results were not + written to disk. + """ + # initialize output file + self._init_fpath(out_fpath, module=ModuleName.GENERATION) + self._init_h5() + self._init_out_arrays() + if pool_size is None: + pool_size = os.cpu_count() * 2 + + kwargs = {'tech': self.tech, + 'res_file': self.res_file, + 'lr_res_file': self.lr_res_file, + 'output_request': self.output_request, + 'scale_outputs': self.scale_outputs, + 'gid_map': self._gid_map, + 'nn_map': self._nn_map, + 'bias_correct': self._bc} + + logger.info('Running reV generation for: {}' + .format(self.points_control)) + logger.debug('The following project points were specified: "{}"' + .format(self.project_points)) + logger.debug('The following SAM configs are available to this run:\n{}' + .format(pprint.pformat(self.sam_configs, indent=4))) + logger.debug('The SAM output variables have been requested:\n{}' + .format(self.output_request)) + + # use serial or parallel execution control based on max_workers + try: + if max_workers == 1: + logger.debug('Running serial generation for: {}' + .format(self.points_control)) + for i, pc_sub in enumerate(self.points_control): + self.out = self._run_single_worker(pc_sub, **kwargs) + logger.info('Finished reV gen serial compute for: {} ' + '(iteration {} out of {})' + .format(pc_sub, i + 1, + len(self.points_control))) + self.flush() + else: + logger.debug('Running parallel generation for: {}' + .format(self.points_control)) + self._parallel_run(max_workers=max_workers, + pool_size=pool_size, timeout=timeout, + **kwargs) + + except Exception as e: + logger.exception('reV generation failed!') + raise e + + return self._out_fpath
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/handlers/exclusions.html b/_modules/reV/handlers/exclusions.html new file mode 100644 index 000000000..ef6ecd581 --- /dev/null +++ b/_modules/reV/handlers/exclusions.html @@ -0,0 +1,1076 @@ + + + + + + reV.handlers.exclusions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.handlers.exclusions

+# -*- coding: utf-8 -*-
+"""
+Exclusion layers handler
+"""
+import logging
+import json
+import numpy as np
+
+from reV.utilities.exceptions import HandlerKeyError, MultiFileExclusionError
+
+from rex.utilities.parse_keys import parse_keys
+from rex.resource import Resource
+from rex.multi_file_resource import MultiFileResource
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class ExclusionLayers: + """ + Handler of .h5 file and techmap for Exclusion Layers + """ + + def __init__(self, h5_file, hsds=False): + """ + Parameters + ---------- + h5_file : str | list | tuple + .h5 file containing exclusion layers and techmap, + or a list of h5 files + hsds : bool + Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS + behind HSDS + """ + + self.h5_file = h5_file + + if isinstance(h5_file, str): + self._h5 = Resource(h5_file, hsds=hsds) + elif isinstance(h5_file, (list, tuple)): + self._h5 = MultiFileResource(h5_file, check_files=False) + self._preflight_multi_file() + else: + msg = ('Expected str, list, or tuple for h5_file input but ' + 'received {}'.format(type(h5_file))) + logger.error(msg) + raise TypeError(msg) + + self._iarr = None + + def __repr__(self): + msg = "{} for {}".format(self.__class__.__name__, self.h5_file) + + return msg + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + if type is not None: + raise + + def __len__(self): + return len(self.layers) + + def __getitem__(self, keys): + ds, ds_slice = parse_keys(keys) + + if ds.lower().startswith('lat'): + out = self._get_latitude(*ds_slice) + elif ds.lower().startswith('lon'): + out = self._get_longitude(*ds_slice) + else: + out = self._get_layer(ds, *ds_slice) + + return out + + def __contains__(self, layer): + return layer in self.layers + + def _preflight_multi_file(self): + """Run simple multi-file exclusion checks.""" + lat_shape = self.h5.shapes['latitude'] + lon_shape = self.h5.shapes['longitude'] + for layer in self.layers: + lshape = self.h5.shapes[layer] + lshape = lshape[1:] if len(lshape) > 2 else lshape + if lshape != lon_shape or lshape != lat_shape: + msg = ('Shape of layer "{}" is {} which does not match ' + 'latitude and longitude shapes of {} and {}. ' + 'Check your exclusion file inputs: {}' + .format(layer, self.h5.shapes[layer], + lat_shape, lon_shape, self.h5._h5_files)) + logger.error(msg) + raise MultiFileExclusionError(msg) + + check_attrs = ('height', 'width', 'crs', 'transform') + base_profile = {} + for fp in self.h5_file: + with ExclusionLayers(fp) as f: + if not base_profile: + base_profile = f.profile + else: + for attr in check_attrs: + if attr not in base_profile or attr not in f.profile: + msg = ('Multi-file exclusion inputs from {} ' + 'dont have profiles with height, width, ' + 'crs, and transform: {} and {}' + .format(self.h5_file, base_profile, + f.profile)) + logger.error(msg) + raise MultiFileExclusionError(msg) + + base_attr = base_profile[attr] + file_attr = f.profile[attr] + attrs_are_str = (isinstance(base_attr, str) + and isinstance(file_attr, str)) + if attr == 'crs' and attrs_are_str: + attrs_match = (set(base_attr.split(' ')) + == set(file_attr.split(' '))) + else: + attrs_match = base_profile[attr] == f.profile[attr] + + if not attrs_match: + msg = ('Multi-file exclusion inputs from {} ' + 'dont have matching "{}": {} and {}' + .format(self.h5_file, attr, + base_profile[attr], + f.profile[attr])) + logger.error(msg) + raise MultiFileExclusionError(msg) + +
[docs] def close(self): + """ + Close h5 instance + """ + self._h5.close()
+ + @property + def h5(self): + """ + Open h5py File instance. + + Returns + ------- + h5 : rex.MultiFileResource | rex.Resource + """ + return self._h5 + + @property + def iarr(self): + """Get an array of 1D index values for the flattened h5 excl extent. + + Returns + ------- + iarr : np.ndarray + Uint array with same shape as exclusion extent, representing the 1D + index values if the geotiff extent was flattened + (with default flatten order 'C') + """ + if self._iarr is None: + N = self.shape[0] * self.shape[1] + self._iarr = np.arange(N, dtype=np.uint32) + self._iarr = self._iarr.reshape(self.shape) + + return self._iarr + + @property + def profile(self): + """ + GeoTiff profile for exclusions + + Returns + ------- + profile : dict + """ + return json.loads(self.h5.global_attrs['profile']) + + @property + def crs(self): + """ + GeoTiff projection crs + + Returns + ------- + str + """ + return self.profile['crs'] + + @property + def pixel_area(self): + """Get pixel area in km2 from the transform profile of the excl file. + + Returns + ------- + area : float + Exclusion pixel area in km2. Will return None if the + appropriate transform attribute is not found. + """ + + area = None + if 'transform' in self.profile: + transform = self.profile['transform'] + area = np.abs(transform[0] * transform[4]) + area /= 1000 ** 2 + + return area + + @property + def layers(self): + """ + Available exclusions layers + + Returns + ------- + layers : list + """ + layers = self.h5.datasets + + return layers + + @property + def shape(self): + """ + Exclusion shape (latitude, longitude) + + Returns + ------- + shape : tuple + """ + shape = self.h5.attrs.get('shape', None) + if shape is None: + shape = self.h5.shapes['latitude'] + + return tuple(shape) + + @property + def chunks(self): + """ + Exclusion layers chunks default chunk size + + Returns + ------- + chunks : tuple | None + Chunk size of exclusion layers + """ + chunks = self.h5.attrs.get('chunks', None) + if chunks is None: + chunks = self.h5.chunks['latitude'] + + return chunks + + @property + def latitude(self): + """ + Latitude coordinates array + + Returns + ------- + ndarray + """ + return self['latitude'] + + @property + def longitude(self): + """ + Longitude coordinates array + + Returns + ------- + ndarray + """ + return self['longitude'] + +
[docs] def get_layer_profile(self, layer): + """ + Get profile for a specific exclusion layer + + Parameters + ---------- + layer : str + Layer to get profile for + + Returns + ------- + profile : dict | None + GeoTiff profile for single exclusion layer + """ + profile = self.h5.get_attrs(dset=layer).get('profile', None) + if profile is not None: + profile = json.loads(profile) + + return profile
+ +
[docs] def get_layer_crs(self, layer): + """ + Get crs for a specific exclusion layer + + Parameters + ---------- + layer : str + Layer to get profile for + + Returns + ------- + crs : str | None + GeoTiff projection crs + """ + profile = self.get_layer_profile(layer) + if profile is not None: + crs = profile['crs'] + else: + crs = None + + return crs
+ +
[docs] def get_layer_values(self, layer): + """ + Get values for given layer in Geotiff format (bands, y, x) + + Parameters + ---------- + layer : str + Layer to get values for + + Returns + ------- + values : ndarray + GeoTiff values for single exclusion layer + """ + values = self.h5[layer] + + return values
+ +
[docs] def get_layer_description(self, layer): + """ + Get description for given layer + + Parameters + ---------- + layer : str + Layer to get description for + + Returns + ------- + description : str + Description of layer + """ + description = self.h5.get_attrs(dset=layer).get('description', None) + + return description
+ +
[docs] def get_nodata_value(self, layer): + """ + Get the nodata value for a given layer + + Parameters + ---------- + layer : str + Layer to get nodata value for + + Returns + ------- + nodata : int | float | None + nodata value for layer or None if not found + """ + profile = self.get_layer_profile(layer) + nodata = profile.get('nodata', None) + + return nodata
+ + def _get_latitude(self, *ds_slice): + """ + Extract latitude coordinates + + Parameters + ---------- + ds_slice : tuple of int | list | slice + Pandas slicing describing which sites and columns to extract + + Returns + ------- + lat : ndarray + Latitude coordinates + """ + if 'latitude' not in self.h5: + msg = ('"latitude" is missing from {}' + .format(self.h5_file)) + logger.error(msg) + raise HandlerKeyError(msg) + + ds_slice = ('latitude', ) + ds_slice + + lat = self.h5[ds_slice] + + return lat + + def _get_longitude(self, *ds_slice): + """ + Extract longitude coordinates + + Parameters + ---------- + ds_slice : tuple of int | list | slice + Pandas slicing describing which sites and columns to extract + + Returns + ------- + lon : ndarray + Longitude coordinates + """ + if 'longitude' not in self.h5: + msg = ('"longitude" is missing from {}' + .format(self.h5_file)) + logger.error(msg) + raise HandlerKeyError(msg) + + ds_slice = ('longitude', ) + ds_slice + + lon = self.h5[ds_slice] + + return lon + + def _get_layer(self, layer_name, *ds_slice): + """ + Extract data from given dataset + + Parameters + ---------- + layer_name : str + Exclusion layer to extract + ds_slice : tuple of int | list | slice + tuple describing slice of layer array to extract + + Returns + ------- + layer_data : ndarray + Array of exclusion data + """ + if layer_name not in self.layers: + msg = ('{} not in available layers: {}' + .format(layer_name, self.layers)) + logger.error(msg) + raise HandlerKeyError(msg) + + shape = self.h5.get_dset_properties(layer_name)[0] + if len(shape) == 3: + ds_slice = (layer_name, 0) + ds_slice + else: + ds_slice = (layer_name, ) + ds_slice + + layer_data = self.h5[ds_slice] + + return layer_data
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/handlers/multi_year.html b/_modules/reV/handlers/multi_year.html new file mode 100644 index 000000000..3611749aa --- /dev/null +++ b/_modules/reV/handlers/multi_year.html @@ -0,0 +1,1449 @@ + + + + + + reV.handlers.multi_year — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.handlers.multi_year

+# -*- coding: utf-8 -*-
+"""
+Classes to collect reV outputs from multiple annual files.
+"""
+import glob
+import time
+import logging
+import numpy as np
+import os
+import pandas as pd
+from warnings import warn
+
+from rex import Resource
+from rex.utilities.utilities import (get_class_properties, parse_year,
+                                     get_lat_lon_cols)
+from gaps.pipeline import parse_previous_status
+
+from reV.handlers.outputs import Outputs
+from reV.config.output_request import SAMOutputRequest
+from reV.utilities.exceptions import HandlerRuntimeError, ConfigError
+from reV.utilities import log_versions, ModuleName
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class MultiYearGroup: + """ + Handle group parameters + """ + + def __init__(self, name, out_dir, source_files=None, + source_dir=None, source_prefix=None, + source_pattern=None, + dsets=('cf_mean',), pass_through_dsets=None): + """ + Parameters + ---------- + name : str + Group name. Can be ``"none"`` for no collection groups. + out_dir : str + Output directory - used for Pipeline handling. + source_files : str | list, optional + Explicit list of source files. Use either this input *OR* + `source_dir` + `source_prefix`. If this input is + ``"PIPELINE"``, the `source_files` input is determined from + the status file of the previous pipeline step. + If ``None``, use `source_dir` and `source_prefix`. + By default, ``None``. + source_dir : str, optional + Directory to extract source files from (must be paired with + `source_prefix`). By default, ``None``. + source_prefix : str, optional + File prefix to search for in source directory (must be + paired with `source_dir`). By default, ``None``. + source_pattern : str, optional + Optional unix-style ``/filepath/pattern*.h5`` to specify the + source files. This takes priority over `source_dir` and + `source_prefix` but is not used if `source_files` are + specified explicitly. By default, ``None``. + dsets : list | tuple, optional + List of datasets to collect. By default, ``('cf_mean',)``. + pass_through_dsets : list | tuple, optional + Optional list of datasets that are identical in the + multi-year files (e.g. input datasets that don't vary from + year to year) that should be copied to the output multi-year + file once without a year suffix or means/stdev calculation. + By default, ``None``. + """ + self._name = name + self._dirout = out_dir + self._source_files = source_files + self._source_dir = source_dir + self._source_prefix = source_prefix + self._source_pattern = source_pattern + self._pass_through_dsets = None + if pass_through_dsets is not None: + self._pass_through_dsets = SAMOutputRequest(pass_through_dsets) + + self._dsets = self._parse_dsets(dsets) + + def _parse_dsets(self, dsets): + """Parse a multi-year dataset collection request. Can handle PIPELINE + argument which will find all datasets from one of the files being + collected ignoring meta, time index, and pass_through_dsets + + Parameters + ---------- + dsets : str | list + One or more datasets to collect, or "PIPELINE" + + Returns + ------- + dsets : SAMOutputRequest + Dataset list object. + """ + if isinstance(dsets, str) and dsets == 'PIPELINE': + files = parse_previous_status(self._dirout, ModuleName.MULTI_YEAR) + with Resource(files[0]) as res: + dsets = [d for d in res + if not d.startswith('time_index') + and d != 'meta' + and d not in self.pass_through_dsets] + + dsets = SAMOutputRequest(dsets) + + return dsets + + @property + def name(self): + """ + Returns + ------- + name : str + Group name + """ + name = self._name if self._name.lower() != "none" else None + return name + + @property + def source_files(self): + """ + Returns + ------- + source_files : list + list of source files to collect from + """ + if self._source_files is not None: + if isinstance(self._source_files, (list, tuple)): + source_files = self._source_files + elif self._source_files == "PIPELINE": + source_files = parse_previous_status(self._dirout, + ModuleName.MULTI_YEAR) + else: + e = "source_files must be a list, tuple, or 'PIPELINE'" + logger.error(e) + raise ConfigError(e) + + elif self._source_pattern: + source_files = glob.glob(self._source_pattern) + if not all(fp.endswith('.h5') for fp in source_files): + msg = ('Source pattern resulted in non-h5 files that cannot ' + 'be collected: {}, pattern: {}' + .format(source_files, self._source_pattern)) + logger.error(msg) + raise RuntimeError(msg) + + elif self._source_dir and self._source_prefix: + source_files = [] + for file in os.listdir(self._source_dir): + if (file.startswith(self._source_prefix) + and file.endswith('.h5') and '_node' not in file): + source_files.append(os.path.join(self._source_dir, + file)) + else: + e = ("source_files or both source_dir and " + "source_prefix must be provided") + logger.error(e) + raise ConfigError(e) + + if not any(source_files): + e = ('Could not find any source files for ' + 'multi-year collection group: "{}" in "{}"' + .format(self.name, self._source_dir)) + logger.error(e) + raise FileNotFoundError(e) + + return source_files + + @property + def dsets(self): + """ + Returns + ------- + _dsets :list | tuple + Datasets to collect + """ + return self._dsets + + @property + def pass_through_dsets(self): + """Optional list of datasets that are identical in the multi-year + files (e.g. input datasets that don't vary from year to year) that + should be copied to the output multi-year file once without a + year suffix or means/stdev calculation + + Returns + ------- + list | tuple | None + """ + return self._pass_through_dsets + + def _dict_rep(self): + """Get a dictionary representation of this multi year collection group + + Returns + ------- + dict + """ + props = get_class_properties(self.__class__) + out = {k: getattr(self, k) for k in props} + out['group'] = self.name + return out + + @classmethod + def _factory(cls, out_dir, groups_dict): + """ + Generate dictionary of MultiYearGroup objects for all groups in groups + + Parameters + ---------- + out_dir : str + Output directory, used for Pipeline handling + groups_dict : dict + Dictionary of group parameters, parsed from multi-year config file + + Returns + ------- + groups : dict + Dictionary of MultiYearGroup objects for each group in groups + """ + groups = {} + for name, kwargs in groups_dict.items(): + groups[name] = cls(name, out_dir, **kwargs) + + return groups
+ + +
[docs]class MultiYear(Outputs): + """ + Class to handle multiple years of data and: + - collect datasets from multiple years + - compute multi-year means + - compute multi-year standard deviations + - compute multi-year coefficient of variations + + """ + + def __init__(self, h5_file, group=None, unscale=True, mode='r', + str_decode=True): + """ + Parameters + ---------- + h5_file : str + Path to .h5 resource file + group : str + Group to collect datasets into + unscale : bool + Boolean flag to automatically unscale variables on extraction + mode : str + Mode to instantiate h5py.File instance + str_decode : bool + Boolean flag to decode the bytestring meta data into normal + strings. Setting this to False will speed up the meta data read. + """ + log_versions(logger) + super().__init__(h5_file, group=group, unscale=unscale, mode=mode, + str_decode=str_decode) + + @staticmethod + def _create_dset_name(source_h5, dset): + """ + Create output dataset name by parsing year from source_h5 and + appending to source dataset name. + + Parameters + ---------- + source_h5 : str + Path to source .h5 file to copy data from + dset : str + Dataset to copy + + Returns + ------- + dset_out : str + Ouput dataset name + """ + f_name = os.path.basename(source_h5) + year = parse_year(f_name) + dset_out = "{}-{}".format(dset, year) + return dset_out + + def _copy_time_index(self, source_h5): + """ + Copy time_index from source_h5 to time_index-{year} in multiyear .h5 + + Parameters + ---------- + source_h5 : str + Path to source .h5 file to copy data from + """ + dset_out = self._create_dset_name(source_h5, 'time_index') + if dset_out not in self.datasets: + logger.debug("- Collecting time_index from {}" + .format(os.path.basename(source_h5))) + with Outputs(source_h5, mode='r') as f_in: + time_index = f_in.h5['time_index'][...] + + self._create_dset(dset_out, time_index.shape, time_index.dtype, + data=time_index) + + def _copy_dset(self, source_h5, dset, meta=None, pass_through=False): + """ + Copy dset_in from source_h5 to multiyear .h5 + + Parameters + ---------- + source_h5 : str + Path to source .h5 file to copy data from + dset : str + Dataset to copy + meta : pandas.DataFrame + If provided confirm that source meta matches given meta + pass_through : bool + Flag to just pass through dataset without name modifications + (no differences between years, no means or stdevs) + """ + if pass_through: + dset_out = dset + else: + dset_out = self._create_dset_name(source_h5, dset) + + if dset_out not in self.datasets: + logger.debug("- Collecting {} from {}" + .format(dset, os.path.basename(source_h5))) + with Outputs(source_h5, unscale=False, mode='r') as f_in: + if meta is not None: + cols = get_lat_lon_cols(meta) + source_meta = f_in.meta + + if len(meta) != len(source_meta): + msg = ('Meta data has different lengths between ' + 'collection files! Found {} and {}' + .format(len(meta), len(source_meta))) + logger.error(msg) + raise HandlerRuntimeError(msg) + + if not np.allclose(meta[cols], source_meta[cols]): + msg = ('Coordinates do not match between ' + 'collection files!') + logger.warning(msg) + warn(msg) + + _, ds_dtype, ds_chunks = f_in.get_dset_properties(dset) + ds_attrs = f_in.get_attrs(dset=dset) + ds_data = f_in[dset] + + self._create_dset(dset_out, ds_data.shape, ds_dtype, + chunks=ds_chunks, attrs=ds_attrs, data=ds_data) + +
[docs] @staticmethod + def parse_source_files_pattern(source_files): + """Parse a source_files pattern that can be either an explicit list of + source files or a unix-style /filepath/pattern*.h5 and either way + return a list of explicit filepaths. + + Parameters + ---------- + source_files : list | str + List of .h5 files to collect datasets from. This can also be a + unix-style /filepath/pattern*.h5 to find .h5 files to collect, + however all resulting files must be .h5 otherwise an exception will + be raised. NOTE: .h5 file names must indicate the year the data + pertains to + + Returns + ------- + source_files : list + List of .h5 filepaths. + """ + + if isinstance(source_files, str) and '*' in source_files: + source_files = glob.glob(source_files) + elif isinstance(source_files, str): + source_files = [source_files] + elif not isinstance(source_files, (list, tuple)): + msg = ('Cannot recognize source_files type: {} {}' + .format(source_files, type(source_files))) + logger.error(msg) + raise TypeError(msg) + + if not all(fp.endswith('.h5') for fp in source_files): + msg = ('Non-h5 files cannot be collected: {}'.format(source_files)) + logger.error(msg) + raise RuntimeError(msg) + + return source_files
+ +
[docs] def collect(self, source_files, dset, profiles=False, pass_through=False): + """ + Collect dataset dset from given list of h5 files + + Parameters + ---------- + source_files : list | str + List of .h5 files to collect datasets from. This can also be a + unix-style /filepath/pattern*.h5 to find .h5 files to collect, + however all resulting files must be .h5 otherwise an exception will + be raised. NOTE: .h5 file names must indicate the year the data + pertains to + dset : str + Dataset to collect + profiles : bool + Boolean flag to indicate if profiles are being collected + If True also collect time_index + pass_through : bool + Flag to just pass through dataset without name modifications + (no differences between years, no means or stdevs) + """ + source_files = self.parse_source_files_pattern(source_files) + with Outputs(source_files[0], mode='r') as f_in: + meta = f_in.h5['meta'][...] + + if 'meta' not in self.datasets: + logger.debug("Copying meta") + self._create_dset('meta', meta.shape, meta.dtype, + data=meta) + + meta = pd.DataFrame(meta) + for year_h5 in source_files: + if profiles: + self._copy_time_index(year_h5) + + self._copy_dset(year_h5, dset, meta=meta, + pass_through=pass_through)
+ + def _get_source_dsets(self, dset_out): + """ + Extract all available annual datasets associated with dset + + Parameters + ---------- + dset_out : str + Output dataset to find source datasets for + + Returns + ------- + source_dsets : list + List of annual datasets + """ + dset = os.path.basename(dset_out).split("-")[0] + logger.debug('-- source_dset root = {}'.format(dset)) + my_dset = ["{}-{}".format(dset, val) for val in ['means', 'stdev']] + source_dsets = [ds for ds in self.datasets if dset in ds + and ds not in my_dset] + if dset_out in source_dsets: + source_dsets.remove(dset_out) + + return source_dsets + + def _update_dset(self, dset_out, dset_data): + """ + Update dataset, create if needed + + Parameters + ---------- + dset_out : str + Dataset name + dset_data : ndarray + Dataset data to write to disc + """ + if dset_out in self.datasets: + logger.debug("- Updating {}".format(dset_out)) + self[dset_out] = dset_data + else: + logger.debug("- Creating {}".format(dset_out)) + source_dset = self._get_source_dsets(dset_out)[0] + _, ds_dtype, ds_chunks = self.get_dset_properties(source_dset) + ds_attrs = self.get_attrs(dset=source_dset) + self._add_dset(dset_out, dset_data, ds_dtype, + chunks=ds_chunks, attrs=ds_attrs) + + def _compute_means(self, dset_out): + """ + Compute multi-year means for given dataset + + Parameters + ---------- + dset_out : str + Multi-year means dataset name + + Returns + ------- + my_means : ndarray + Array of multi-year means + """ + source_dsets = self._get_source_dsets(dset_out) + logger.debug('\t- Computing {} from {}'.format(dset_out, source_dsets)) + + my_means = np.zeros(len(self), dtype='float32') + for ds in source_dsets: + if self.h5[ds].shape == my_means.shape: + my_means += self[ds] + else: + raise HandlerRuntimeError("{} shape {} should be {}" + .format(ds, self.h5[ds].shape, + my_means.shape)) + my_means /= len(source_dsets) + self._update_dset(dset_out, my_means) + + return my_means + +
[docs] def means(self, dset): + """ + Extract or compute multi-year means for given source dset + + Parameters + ---------- + dset : str + Dataset of interest + + Returns + ------- + my_means : ndarray + Array of multi-year means for dataset of interest + """ + my_dset = "{}-means".format(dset) + if my_dset in self.datasets: + my_means = self[my_dset] + else: + my_means = self._compute_means(my_dset) + + return my_means
+ + def _compute_stdev(self, dset_out, means=None): + """ + Compute multi-year standard deviation for given dataset + + Parameters + ---------- + dset_out : str + Multi-year stdev dataset name + means : ndarray + Array of pre-computed means + + Returns + ------- + my_stdev : ndarray + Array of multi-year standard deviations + """ + if means is None: + means = self._compute_means("{}-means".format(dset_out)) + + source_dsets = self._get_source_dsets(dset_out) + + my_stdev = np.zeros(means.shape, dtype='float32') + for ds in source_dsets: + if self.h5[ds].shape == my_stdev.shape: + my_stdev += (self[ds] - means)**2 + else: + raise HandlerRuntimeError("{} shape {} should be {}" + .format(ds, self.h5[ds].shape, + my_stdev.shape)) + + my_stdev = np.sqrt(my_stdev / len(source_dsets)) + self._update_dset(dset_out, my_stdev) + + return my_stdev + +
[docs] def stdev(self, dset): + """ + Extract or compute multi-year standard deviation for given source dset + + Parameters + ---------- + dset : str + Dataset of interest + + Returns + ------- + my_stdev : ndarray + Array of multi-year standard deviation for dataset of interest + """ + my_dset = "{}-stdev".format(dset) + if my_dset in self.datasets: + my_stdev = self[my_dset] + else: + my_means = self.means(dset) + my_stdev = self._compute_stdev(my_dset, means=my_means) + + return my_stdev
+ +
[docs] def CV(self, dset): + """ + Extract or compute multi-year coefficient of variation for given + source dset + + Parameters + ---------- + dset : str + Dataset of interest + + Returns + ------- + my_cv : ndarray + Array of multi-year coefficient of variation for + dataset of interest + """ + my_cv = self.stdev(dset) / self.means(dset) + return my_cv
+ +
[docs] @classmethod + def is_profile(cls, source_files, dset): + """ + Check dataset in source files to see if it is a profile. + + Parameters + ---------- + source_files : list | str + List of .h5 files to collect datasets from. This can also be a + unix-style /filepath/pattern*.h5 to find .h5 files to collect, + however all resulting files must be .h5 otherwise an exception will + be raised. NOTE: .h5 file names must indicate the year the data + pertains to + dset : str + Dataset to collect + + Returns + ------- + is_profile : bool + True if profile, False if not. + """ + source_files = cls.parse_source_files_pattern(source_files) + with Outputs(source_files[0]) as f: + if dset not in f.datasets: + raise KeyError('Dataset "{}" not found in source file: "{}"' + .format(dset, source_files[0])) + + shape, _, _ = f.get_dset_properties(dset) + + return len(shape) == 2
+ +
[docs] @classmethod + def pass_through(cls, my_file, source_files, dset, group=None): + """ + Pass through a dataset that is identical in all source files to a + dataset of the same name in the output multi-year file. + + Parameters + ---------- + my_file : str + Path to multi-year .h5 file + source_files : list | str + List of .h5 files to collect datasets from. This can also be a + unix-style /filepath/pattern*.h5 to find .h5 files to collect, + however all resulting files must be .h5 otherwise an exception will + be raised. NOTE: .h5 file names must indicate the year the data + pertains to + dset : str + Dataset to pass through (will also be the name of the output + dataset in my_file) + group : str + Group to collect datasets into + """ + source_files = cls.parse_source_files_pattern(source_files) + logger.info('Passing through {} into {}.' + .format(dset, my_file)) + with cls(my_file, mode='a', group=group) as my: + my.collect(source_files, dset, pass_through=True)
+ +
[docs] @classmethod + def collect_means(cls, my_file, source_files, dset, group=None): + """ + Collect and compute multi-year means for given dataset + + Parameters + ---------- + my_file : str + Path to multi-year .h5 file + source_files : list | str + List of .h5 files to collect datasets from. This can also be a + unix-style /filepath/pattern*.h5 to find .h5 files to collect, + however all resulting files must be .h5 otherwise an exception will + be raised. NOTE: .h5 file names must indicate the year the data + pertains to + dset : str + Dataset to collect + group : str + Group to collect datasets into + """ + logger.info('Collecting {} into {} ' + 'and computing multi-year means and standard deviations.' + .format(dset, my_file)) + source_files = cls.parse_source_files_pattern(source_files) + with cls(my_file, mode='a', group=group) as my: + my.collect(source_files, dset) + means = my._compute_means("{}-means".format(dset)) + my._compute_stdev("{}-stdev".format(dset), means=means)
+ +
[docs] @classmethod + def collect_profiles(cls, my_file, source_files, dset, group=None): + """ + Collect multi-year profiles associated with given dataset + + Parameters + ---------- + my_file : str + Path to multi-year .h5 file + source_files : list | str + List of .h5 files to collect datasets from. This can also be a + unix-style /filepath/pattern*.h5 to find .h5 files to collect, + however all resulting files must be .h5 otherwise an exception will + be raised. NOTE: .h5 file names must indicate the year the data + pertains to + dset : str + Profiles dataset to collect + group : str + Group to collect datasets into + """ + logger.info('Collecting {} into {}'.format(dset, my_file)) + source_files = cls.parse_source_files_pattern(source_files) + with cls(my_file, mode='a', group=group) as my: + my.collect(source_files, dset, profiles=True)
+ + +
[docs]def my_collect_groups(out_fpath, groups, clobber=True): + """Collect all groups into a single multi-year HDF5 file. + + ``reV`` multi-year combines ``reV`` generation data from multiple + years (typically stored in separate files) into a single multi-year + file. Each dataset in the multi-year file is labeled with the + corresponding years, and multi-year averages of the yearly datasets + are also computed. + + Parameters + ---------- + out_fpath : str + Path to multi-year HDF5 file to use for multi-year + collection. + groups : dict + Dictionary of collection groups and their parameters. This + should be a dictionary mapping group names (keys) to a set + of key word arguments (values) that can be used to initialize + :class:`~reV.handlers.multi_year.MultiYearGroup` (excluding the + required ``name`` and ``out_dir`` inputs, which are populated + automatically). For example:: + + groups = { + "none": { + "dsets": [ + "cf_profile", + "cf_mean", + "ghi_mean", + "lcoe_fcr", + ], + "source_dir": "./", + "source_prefix": "", + "pass_through_dsets": [ + "capital_cost", + "fixed_operating_cost", + "system_capacity", + "fixed_charge_rate", + "variable_operating_cost", + ] + }, + "solar_group": { + "source_files": "PIPELINE", + "dsets": [ + "cf_profile_ac", + "cf_mean_ac", + "ac", + "dc", + "clipped_power" + ], + "pass_through_dsets": [ + "system_capacity_ac", + "dc_ac_ratio" + ] + }, + ... + } + + The group names will be used as the HDF5 file group name under + which the collected data will be stored. You can have exactly + one group with the name ``"none"`` for a "no group" collection + (this is typically what you want and all you need to specify). + clobber : bool, optional + Flag to purge the multi-year output file prior to running the + multi-year collection step if the file already exists on disk. + This ensures the data is always freshly collected from the + single-year files. If ``False``, then datasets in the existing + file will **not** be overwritten with (potentially new/updated) + data from the single-year files. By default, ``True``. + """ + if not out_fpath.endswith(".h5"): + out_fpath = '{}.h5'.format(out_fpath) + + if clobber and os.path.exists(out_fpath): + msg = ('Found existing multi-year file: "{}". Removing...' + .format(str(out_fpath))) + logger.warning(msg) + warn(msg) + os.remove(out_fpath) + + out_dir = os.path.dirname(out_fpath) + groups = MultiYearGroup._factory(out_dir, groups) + group_params = {name: group._dict_rep() + for name, group in groups.items()} + + logger.info('Multi-year collection is being run with output path: {}' + .format(out_fpath)) + ts = time.time() + for group_name, group in group_params.items(): + logger.info('- Collecting datasets "{}" from "{}" into "{}/"' + .format(group['dsets'], group['source_files'], + group_name)) + t0 = time.time() + for dset in group['dsets']: + if MultiYear.is_profile(group['source_files'], dset): + MultiYear.collect_profiles(out_fpath, group['source_files'], + dset, group=group['group']) + else: + MultiYear.collect_means(out_fpath, group['source_files'], + dset, group=group['group']) + + if group.get('pass_through_dsets', None) is not None: + for dset in group['pass_through_dsets']: + MultiYear.pass_through(out_fpath, group['source_files'], + dset, group=group['group']) + + runtime = (time.time() - t0) / 60 + logger.info('- {} collection completed in: {:.2f} min.' + .format(group_name, runtime)) + + runtime = (time.time() - ts) / 60 + logger.info('Multi-year collection completed in : {:.2f} min.' + .format(runtime)) + + return out_fpath
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/handlers/outputs.html b/_modules/reV/handlers/outputs.html new file mode 100644 index 000000000..5ef940e8f --- /dev/null +++ b/_modules/reV/handlers/outputs.html @@ -0,0 +1,777 @@ + + + + + + reV.handlers.outputs — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.handlers.outputs

+# -*- coding: utf-8 -*-
+"""
+Classes to handle reV h5 output files.
+"""
+import logging
+import NRWAL
+import PySAM
+import rex
+import sys
+import json
+
+from reV.version import __version__
+from rex.outputs import Outputs as rexOutputs
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Outputs(rexOutputs): + """ + Base class to handle reV output data in .h5 format + + Examples + -------- + The reV Outputs handler can be used to initialize h5 files in the standard + reV/rex resource data format. + + >>> from reV import Outputs + >>> import pandas as pd + >>> import numpy as np + >>> + >>> meta = pd.DataFrame({'latitude': np.ones(100), + >>> 'longitude': np.ones(100)}) + >>> + >>> time_index = pd.date_range('20210101', '20220101', freq='1h', + >>> closed='right') + >>> + >>> with Outputs('test.h5', 'w') as f: + >>> f.meta = meta + >>> f.time_index = time_index + + You can also use the Outputs handler to read output h5 files from disk. + The Outputs handler will automatically parse the meta data and time index + into the expected pandas objects (DataFrame and DatetimeIndex, + respectively). + + >>> with Outputs('test.h5') as f: + >>> print(f.meta.head()) + >>> + latitude longitude + gid + 0 1.0 1.0 + 1 1.0 1.0 + 2 1.0 1.0 + 3 1.0 1.0 + 4 1.0 1.0 + + >>> with Outputs('test.h5') as f: + >>> print(f.time_index) + DatetimeIndex(['2021-01-01 01:00:00+00:00', '2021-01-01 02:00:00+00:00', + '2021-01-01 03:00:00+00:00', '2021-01-01 04:00:00+00:00', + '2021-01-01 05:00:00+00:00', '2021-01-01 06:00:00+00:00', + '2021-01-01 07:00:00+00:00', '2021-01-01 08:00:00+00:00', + '2021-01-01 09:00:00+00:00', '2021-01-01 10:00:00+00:00', + ... + '2021-12-31 15:00:00+00:00', '2021-12-31 16:00:00+00:00', + '2021-12-31 17:00:00+00:00', '2021-12-31 18:00:00+00:00', + '2021-12-31 19:00:00+00:00', '2021-12-31 20:00:00+00:00', + '2021-12-31 21:00:00+00:00', '2021-12-31 22:00:00+00:00', + '2021-12-31 23:00:00+00:00', '2022-01-01 00:00:00+00:00'], + dtype='datetime64[ns, UTC]', length=8760, freq=None) + + There are a few ways to use the Outputs handler to write data to a file. + Here is one example using the pre-initialized file we created earlier. + Note that the Outputs handler will automatically scale float data using + the "scale_factor" attribute. The Outputs handler will unscale the data + while being read unless the unscale kwarg is explicityly set to False. + This behavior is intended to reduce disk storage requirements for big + data and can be disabled by setting dtype=np.float32 or dtype=np.float64 + when writing data. + + >>> Outputs.add_dataset(h5_file='test.h5', dset_name='dset1', + >>> dset_data=np.ones((8760, 100)) * 42.42, + >>> attrs={'scale_factor': 100}, dtype=np.int32) + + + >>> with Outputs('test.h5') as f: + >>> print(f['dset1']) + >>> print(f['dset1'].dtype) + [[42.42 42.42 42.42 ... 42.42 42.42 42.42] + [42.42 42.42 42.42 ... 42.42 42.42 42.42] + [42.42 42.42 42.42 ... 42.42 42.42 42.42] + ... + [42.42 42.42 42.42 ... 42.42 42.42 42.42] + [42.42 42.42 42.42 ... 42.42 42.42 42.42] + [42.42 42.42 42.42 ... 42.42 42.42 42.42]] + float32 + + >>> with Outputs('test.h5', unscale=False) as f: + >>> print(f['dset1']) + >>> print(f['dset1'].dtype) + [[4242 4242 4242 ... 4242 4242 4242] + [4242 4242 4242 ... 4242 4242 4242] + [4242 4242 4242 ... 4242 4242 4242] + ... + [4242 4242 4242 ... 4242 4242 4242] + [4242 4242 4242 ... 4242 4242 4242] + [4242 4242 4242 ... 4242 4242 4242]] + int32 + + Note that the reV Outputs handler is specifically designed to read and + write spatiotemporal data. It is therefore important to intialize the meta + data and time index objects even if your data is only spatial or only + temporal. Furthermore, the Outputs handler will always assume that 1D + datasets represent scalar data (non-timeseries) that corresponds to the + meta data shape, and that 2D datasets represent spatiotemporal data whose + shape corresponds to (len(time_index), len(meta)). You can see these + constraints here: + + >>> Outputs.add_dataset(h5_file='test.h5', dset_name='bad_shape', + dset_data=np.ones((1, 100)) * 42.42, + attrs={'scale_factor': 100}, dtype=np.int32) + HandlerValueError: 2D data with shape (1, 100) is not of the proper + spatiotemporal shape: (8760, 100) + + >>> Outputs.add_dataset(h5_file='test.h5', dset_name='bad_shape', + dset_data=np.ones((8760,)) * 42.42, + attrs={'scale_factor': 100}, dtype=np.int32) + HandlerValueError: 1D data with shape (8760,) is not of the proper + spatial shape: (100,) + """ + + @property + def full_version_record(self): + """Get record of versions for dependencies + + Returns + ------- + dict + Dictionary of package versions for dependencies + """ + rev_versions = {'reV': __version__, + 'rex': rex.__version__, + 'pysam': PySAM.__version__, + 'python': sys.version, + 'nrwal': NRWAL.__version__, + } + versions = super().full_version_record + versions.update(rev_versions) + return versions + +
[docs] def set_version_attr(self): + """Set the version attribute to the h5 file.""" + self.h5.attrs['version'] = __version__ + self.h5.attrs['full_version_record'] = json.dumps( + self.full_version_record) + self.h5.attrs['package'] = 'reV'
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/handlers/transmission.html b/_modules/reV/handlers/transmission.html new file mode 100644 index 000000000..99656d72c --- /dev/null +++ b/_modules/reV/handlers/transmission.html @@ -0,0 +1,1354 @@ + + + + + + reV.handlers.transmission — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.handlers.transmission

+# -*- coding: utf-8 -*-
+"""
+Module to handle Supply Curve Transmission features
+"""
+import json
+import logging
+import numpy as np
+import os
+import pandas as pd
+from warnings import warn
+
+from reV.utilities.exceptions import (HandlerWarning, HandlerKeyError,
+                                      HandlerRuntimeError)
+
+from rex.utilities.utilities import parse_table
+from gaps.config import load_config
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class TransmissionFeatures: + """ + Class to handle Supply Curve Transmission features + """ + def __init__(self, trans_table, line_tie_in_cost=14000, line_cost=2279, + station_tie_in_cost=0, center_tie_in_cost=0, + sink_tie_in_cost=1e9, avail_cap_frac=1, + line_limited=False): + """ + Parameters + ---------- + trans_table : str | pandas.DataFrame + Path to .csv or config file or DataFrame containing supply curve + transmission mapping + line_tie_in_cost : float, optional + Cost of connecting to a transmission line in $/MW, + by default 14000 + line_cost : float, optional + Cost of building transmission line during connection in $/MW-km, + by default 2279 + station_tie_in_cost : float, optional + Cost of connecting to a substation in $/MW, + by default 0 + center_tie_in_cost : float, optional + Cost of connecting to a load center in $/MW, + by default 0 + sink_tie_in_cost : float, optional + Cost of connecting to a synthetic load center (infinite sink) + in $/MW, by default 1e9 + avail_cap_frac : float, optional + Fraction of capacity that is available for connection, by default 1 + line_limited : bool, optional + Substation connection is limited by maximum capacity of the + attached lines, legacy method, by default False + """ + + logger.debug('Trans table input: {}'.format(trans_table)) + logger.debug('Line tie in cost: {} $/MW'.format(line_tie_in_cost)) + logger.debug('Line cost: {} $/MW-km'.format(line_cost)) + logger.debug('Station tie in cost: {} $/MW' + .format(station_tie_in_cost)) + logger.debug('Center tie in cost: {} $/MW'.format(center_tie_in_cost)) + logger.debug('Synthetic load center tie in cost: {} $/MW' + .format(sink_tie_in_cost)) + logger.debug('Available capacity fraction: {}' + .format(avail_cap_frac)) + logger.debug('Line limited substation connections: {}' + .format(line_limited)) + + self._line_tie_in_cost = line_tie_in_cost + self._line_cost = line_cost + self._station_tie_in_cost = station_tie_in_cost + self._center_tie_in_cost = center_tie_in_cost + self._sink_tie_in_cost = sink_tie_in_cost + self._avail_cap_frac = avail_cap_frac + + self._features = self._get_features(trans_table) + + self._feature_gid_list = list(self._features.keys()) + self._available_mask = np.ones( + (int(1 + max(list(self._features.keys()))), ), dtype=bool) + + self._line_limited = line_limited + + def __repr__(self): + msg = "{} with {} features".format(self.__class__.__name__, len(self)) + return msg + + def __len__(self): + return len(self._features) + + def __getitem__(self, gid): + if gid not in self._features: + msg = "Invalid feature gid {}".format(gid) + logger.error(msg) + raise HandlerKeyError(msg) + + return self._features[gid] + + @staticmethod + def _parse_dictionary(features): + """ + Parse features dict object or config file + + Parameters + ---------- + features : dict | str + Dictionary of transmission features or path to config containing + dictionary of transmission features + + Returns + ------- + features : dict + Nested dictionary of features (lines, substations, loadcenters) + lines : {capacity} + substations : {lines} + loadcenters : {capacity} + """ + if isinstance(features, str): + if os.path.isfile(features): + features = load_config(features) + else: + features = json.loads(features) + + elif not isinstance(features, dict): + msg = ("Transmission features must be a config file, object, " + "or a dictionary") + logger.error(msg) + raise ValueError(msg) + + return features + + @staticmethod + def _parse_table(trans_table): + """ + Extract features and their capacity from supply curve transmission + mapping table + + Parameters + ---------- + trans_table : str | pandas.DataFrame + Path to .csv or .json containing supply curve transmission mapping + + Returns + ------- + trans_table : pandas.DataFrame + DataFrame of transmission features + """ + try: + trans_table = parse_table(trans_table) + except ValueError as ex: + logger.error(ex) + raise + + trans_table = \ + trans_table.rename(columns={'trans_line_gid': 'trans_gid', + 'trans_gids': 'trans_line_gids'}) + + if 'dist_mi' in trans_table and 'dist_km' not in trans_table: + trans_table = trans_table.rename(columns={'dist_mi': 'dist_km'}) + trans_table['dist_km'] *= 1.60934 + + return trans_table + + def _features_from_table(self, trans_table): + """ + Extract features and their capacity from supply curve transmission + mapping table + + Parameters + ---------- + trans_table : pandas.DataFrame + DataFrame of transmission features + + Returns + ------- + features : dict + Nested dictionary of features (lines, substations, loadcenters) + lines : {capacity} + substations : {lines} + loadcenters : {capacity} + """ + + features = {} + + cap_frac = self._avail_cap_frac + trans_features = trans_table.groupby('trans_gid').first() + + for gid, feature in trans_features.iterrows(): + name = feature['category'].lower() + feature_dict = {'type': name} + + if name == 'transline': + feature_dict['avail_cap'] = feature['ac_cap'] * cap_frac + + elif name == 'substation': + feature_dict['lines'] = json.loads(feature['trans_line_gids']) + + elif name == 'loadcen': + feature_dict['avail_cap'] = feature['ac_cap'] * cap_frac + + elif name == 'pcaloadcen': + feature_dict['avail_cap'] = None + + else: + msg = ('Cannot not recognize feature type "{}" ' + 'for trans gid {}!'.format(name, gid)) + logger.error(msg) + raise HandlerKeyError(msg) + + features[gid] = feature_dict + + return features + + def _get_features(self, trans_table): + """ + Create transmission features dictionary either from supply curve + transmission mapping or from pre-created dictionary + + Parameters + ---------- + trans_table : str + Path to .csv or .json containing supply curve transmission mapping + + Returns + ------- + features : dict + Nested dictionary of features (lines, substations, loadcenters) + lines : {capacity} + substations : {lines} + loadcenters : {capacity} + """ + + trans_table = self._parse_table(trans_table) + features = self._features_from_table(trans_table) + + return features + +
[docs] def check_feature_dependencies(self): + """Check features for dependencies that are missing and raise error.""" + missing = {} + for gid, feature_dict in self._features.items(): + for line_gid in feature_dict.get('lines', []): + if line_gid not in self._features: + if gid not in missing: + missing[gid] = [] + missing[gid].append(line_gid) + + if any(missing): + emsg = ('Transmission feature table has {} parent features that ' + 'depend on missing lines. Missing dependencies: {}' + .format(len(missing), missing)) + logger.error(emsg) + raise RuntimeError(emsg)
+ + @staticmethod + def _calc_cost(distance, line_cost=2279, tie_in_cost=0, + transmission_multiplier=1): + """ + Compute transmission cost in $/MW + + Parameters + ---------- + distance : float + Distance to feature in kms + line_tie_in_cost : float, optional + Cost of connecting to a transmission line in $/MW, + by default 14000 + tie_in_cost : float, optional + Cost to tie in line to feature in $/MW, by default 0 + tranmission_multiplier : float, optional + Multiplier for region specific line cost increases, by default 1 + + Returns + ------- + cost : float + Cost of transmission in $/MW + """ + cost = (distance * line_cost * transmission_multiplier + tie_in_cost) + + return cost + + def _substation_capacity(self, gid, line_gids): + """ + Get capacity of a substation from its tranmission lines + + Parameters + ---------- + gid : int + Substation gid + line_gids : list + List of transmission line gids connected to the substation + + Returns + ------- + avail_cap : float + Substation available capacity + """ + try: + line_caps = [self[l_gid]['avail_cap'] for l_gid in line_gids] + except HandlerKeyError as e: + msg = ('Could not find capacities for substation gid {} and ' + 'connected lines: {}'.format(gid, line_gids)) + logger.error(msg) + raise HandlerKeyError(msg) from e + + avail_cap = sum(line_caps) / 2 + + if self._line_limited: + max_cap = max(line_caps) / 2 + if max_cap < avail_cap: + avail_cap = max_cap + + return avail_cap + +
[docs] def available_capacity(self, gid): + """ + Get available capacity for given line + + Parameters + ---------- + gid : int + Unique id of feature of interest + + Returns + ------- + avail_cap : float + Available capacity = capacity * available fraction + default = 100% + """ + + feature = self[gid] + + if 'avail_cap' in feature: + avail_cap = feature['avail_cap'] + + elif 'lines' in feature: + avail_cap = self._substation_capacity(gid, feature['lines']) + + else: + msg = ('Could not parse available capacity from feature: {}' + .format(feature)) + logger.error(msg) + raise HandlerRuntimeError(msg) + + return avail_cap
+ + def _update_availability(self, gid): + """ + Check features available capacity, if its 0 update _available_mask + + Parameters + ---------- + gid : list + Feature gid to check + """ + avail_cap = self.available_capacity(gid) + if avail_cap == 0: + self._available_mask[gid] = False + +
[docs] def check_availability(self, gid): + """ + Check availablity of feature with given gid + + Parameters + ---------- + gid : int + Feature gid to check + + Returns + ------- + bool + Whether the gid is available or not + """ + return self._available_mask[gid]
+ + def _connect(self, gid, capacity): + """ + Connect to a standalone transmission feature (not a substation) + and decrement the feature's available capacity. + Raise exception if not able to connect. + + Parameters + ---------- + gid : int + Feature gid to connect to + capacity : float + Capacity needed in MW + """ + avail_cap = self[gid]['avail_cap'] + + if avail_cap < capacity: + msg = ("Cannot connect to {}: " + "needed capacity({} MW) > " + "available capacity({} MW)" + .format(gid, capacity, avail_cap)) + logger.error(msg) + raise RuntimeError(msg) + + self[gid]['avail_cap'] -= capacity + + def _fill_lines(self, line_gids, line_caps, capacity): + """ + Fill any lines that cannot handle equal portion of capacity and + remove from lines to be filled and capacity needed + + Parameters + ---------- + line_gids : ndarray + Vector of transmission line gids connected to the substation + line_caps : ndarray + Vector of available capacity of the transmission lines + capacity : float + Capacity needed in MW + + Returns + ---------- + line_gids : ndarray + Transmission lines with available capacity + line_caps : ndarray + Capacity of lines with available capacity + capacity : float + Updated capacity needed to be applied to substation in MW + """ + apply_cap = capacity / len(line_gids) + mask = line_caps < apply_cap + for pos in np.where(line_caps < apply_cap)[0]: + gid = line_gids[pos] + apply_cap = line_caps[pos] + self._connect(gid, apply_cap) + capacity -= apply_cap + + return line_gids[~mask], line_caps[~mask], capacity + + def _spread_substation_load(self, line_gids, line_caps, capacity): + """ + Spread needed capacity over all lines connected to substation + + Parameters + ---------- + line_gids : ndarray + Vector of transmission line gids connected to the substation + line_caps : ndarray + Vector of available capacity of the transmission lines + capacity : float + Capacity needed to be applied to substation in MW + """ + while True: + lines, line_caps, capacity = self._fill_lines(line_gids, line_caps, + capacity) + if len(lines) < len(line_gids): + line_gids = lines + else: + break + + apply_cap = capacity / len(lines) + for gid in lines: + self._connect(gid, apply_cap) + + def _connect_to_substation(self, line_gids, capacity): + """ + Connect to substation and update internal dictionary accordingly + + Parameters + ---------- + line_gids : list + List of transmission line gids connected to the substation + capacity : float + Capacity needed in MW + line_lmited : bool + Substation connection is limited by maximum capacity of the + attached lines + """ + line_caps = np.array([self[gid]['avail_cap'] + for gid in line_gids]) + if self._line_limited: + gid = line_gids[np.argmax(line_caps)] + self._connect(gid, capacity) + else: + non_zero = np.nonzero(line_caps)[0] + line_gids = np.array([line_gids[i] for i in non_zero]) + line_caps = line_caps[non_zero] + self._spread_substation_load(line_gids, line_caps, capacity) + +
[docs] def connect(self, gid, capacity, apply=True): + """ + Check if you can connect to given feature + If apply, update internal dictionary accordingly + + Parameters + ---------- + gid : int + Unique id of feature of intereset + capacity : float + Capacity needed in MW + apply : bool + Apply capacity to feature with given gid and update + internal dictionary + + Returns + ------- + connected : bool + Flag as to whether connection is possible or not + """ + if self.check_availability(gid): + avail_cap = self.available_capacity(gid) + if avail_cap is not None and capacity > avail_cap: + connected = False + else: + connected = True + if apply: + feature_type = self[gid]['type'] + if feature_type == 'transline': + self._connect(gid, capacity) + elif feature_type == 'substation': + lines = self[gid]['lines'] + self._connect_to_substation(lines, capacity) + elif feature_type == 'loadcen': + self._connect(gid, capacity) + + self._update_availability(gid) + else: + connected = False + + return connected
+ +
[docs] def cost(self, gid, distance, transmission_multiplier=1, + capacity=None): + """ + Compute levelized cost of transmission (LCOT) for connecting to give + feature + + Parameters + ---------- + gid : int + Feature gid to connect to + distance : float + Distance to feature in kms + line_multiplier : float + Multiplier for region specific line cost increases + capacity : float + Capacity needed in MW, if None DO NOT check if connection is + possible + + Returns + ------- + cost : float + Cost of transmission in $/MW, if None indicates connection is + NOT possible + """ + feature_type = self[gid]['type'] + line_cost = self._line_cost + if feature_type == 'transline': + tie_in_cost = self._line_tie_in_cost + elif feature_type == 'substation': + tie_in_cost = self._station_tie_in_cost + elif feature_type == 'loadcen': + tie_in_cost = self._center_tie_in_cost + elif feature_type == 'pcaloadcen': + tie_in_cost = self._sink_tie_in_cost + else: + tie_in_cost = 0 + msg = ("Do not recognize feature type {}, tie_in_cost set to 0" + .format(feature_type)) + logger.warning(msg) + warn(msg, HandlerWarning) + + cost = self._calc_cost(distance, line_cost=line_cost, + tie_in_cost=tie_in_cost, + transmission_multiplier=transmission_multiplier) + if capacity is not None: + if not self.connect(gid, capacity, apply=False): + cost = None + + return cost
+ +
[docs] @classmethod + def feature_capacity(cls, trans_table, avail_cap_frac=1): + """ + Compute available capacity for all features + + Parameters + ---------- + trans_table : str | pandas.DataFrame + Path to .csv or .json containing supply curve transmission mapping + avail_cap_frac : float, optional + Fraction of capacity that is available for connection, by default 1 + + Returns + ------- + feature_cap : pandas.DataFrame + Available Capacity for each transmission feature + """ + try: + feature = cls(trans_table, avail_cap_frac=avail_cap_frac) + + feature_cap = {} + for gid, _ in feature._features.items(): + feature_cap[gid] = feature.available_capacity(gid) + except Exception: + logger.exception("Error computing available capacity for all " + "features in {}".format(cls)) + raise + + feature_cap = pd.Series(feature_cap) + feature_cap.name = 'avail_cap' + feature_cap.index.name = 'trans_gid' + feature_cap = feature_cap.to_frame().reset_index() + + return feature_cap
+ + +
[docs]class TransmissionCosts(TransmissionFeatures): + """ + Class to compute supply curve -> transmission feature costs + """ + def _features_from_table(self, trans_table): + """ + Extract features and their capacity from supply curve transmission + mapping table and pre-compute the available capacity of each feature + + Parameters + ---------- + trans_table : pandas.DataFrame + DataFrame of transmission features + + Returns + ------- + features : dict + Nested dictionary of features (lines, substations, loadcenters) + lines : {capacity} + substations : {lines} + loadcenters : {capacity} + """ + + features = {} + + if 'avail_cap' not in trans_table: + kwargs = {'avail_cap_frac': self._avail_cap_frac} + fc = TransmissionFeatures.feature_capacity(trans_table, + **kwargs) + trans_table = trans_table.merge(fc, on='trans_gid') + + trans_features = trans_table.groupby('trans_gid').first() + for gid, feature in trans_features.iterrows(): + name = feature['category'].lower() + feature_dict = {'type': name, 'avail_cap': feature['avail_cap']} + features[gid] = feature_dict + + return features + +
[docs] def available_capacity(self, gid): + """ + Get available capacity for given line + + Parameters + ---------- + gid : int + Unique id of feature of interest + + Returns + ------- + avail_cap : float + Available capacity = capacity * available fraction + default = 100% + """ + + return self[gid]['avail_cap']
+ +
[docs] @classmethod + def feature_costs(cls, trans_table, capacity=None, line_tie_in_cost=14000, + line_cost=2279, station_tie_in_cost=0, + center_tie_in_cost=0, sink_tie_in_cost=1e9, + avail_cap_frac=1, line_limited=False): + """ + Compute costs for all connections in given transmission table + + Parameters + ---------- + trans_table : str | pandas.DataFrame + Path to .csv or .json containing supply curve transmission mapping + capacity : float + Capacity needed in MW, if None DO NOT check if connection is + possible + line_tie_in_cost : float, optional + Cost of connecting to a transmission line in $/MW, + by default 14000 + line_cost : float, optional + Cost of building transmission line during connection in $/MW-km, + by default 2279 + station_tie_in_cost : float, optional + Cost of connecting to a substation in $/MW, + by default 0 + center_tie_in_cost : float, optional + Cost of connecting to a load center in $/MW, + by default 0 + sink_tie_in_cost : float, optional + Cost of connecting to a synthetic load center (infinite sink) + in $/MW, by default 1e9 + avail_cap_frac : float, optional + Fraction of capacity that is available for connection, by default 1 + line_limited : bool, optional + Substation connection is limited by maximum capacity of the + attached lines, legacy method, by default False + + Returns + ------- + cost : ndarray + Cost of transmission in $/MW, if None indicates connection is + NOT possible + """ + try: + feature = cls(trans_table, + line_tie_in_cost=line_tie_in_cost, + line_cost=line_cost, + station_tie_in_cost=station_tie_in_cost, + center_tie_in_cost=center_tie_in_cost, + sink_tie_in_cost=sink_tie_in_cost, + avail_cap_frac=avail_cap_frac, + line_limited=line_limited) + + costs = [] + for _, row in trans_table.iterrows(): + tm = row.get('transmission_multiplier', 1) + costs.append(feature.cost(row['trans_gid'], + row['dist_km'], capacity=capacity, + transmission_multiplier=tm)) + except Exception: + logger.exception("Error computing costs for all connections in {}" + .format(cls)) + raise + + return np.array(costs, dtype='float32')
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/hybrids/hybrid_methods.html b/_modules/reV/hybrids/hybrid_methods.html new file mode 100644 index 000000000..ad685119b --- /dev/null +++ b/_modules/reV/hybrids/hybrid_methods.html @@ -0,0 +1,760 @@ + + + + + + reV.hybrids.hybrid_methods — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.hybrids.hybrid_methods

+# -*- coding: utf-8 -*-
+"""Collection of functions used to hybridize columns in rep profiles meta.
+
+@author: ppinchuk
+"""
+
+
+
[docs]def aggregate_solar_capacity(h): + """Compute the total solar capcity allowed in hybridization. + + Note + ---- + No limiting is done on the ratio of wind to solar. This method + checks for an existing 'hybrid_solar_capacity'. If one does not exist, + it is assumed that there is no limit on the solar to wind capacity + ratio and the solar capacity is copied into this new column. + + Parameters + ---------- + h : `reV.hybrids.Hybridization` + Instance of `reV.hybrids.Hybridization` class containing the + attribute `hybrid_meta`, which is a DataFrame containing + hybridized meta data. + + Returns + ------- + data : Series | None + A series of data containing the capacity allowed in the hybrid + capacity sum, or `None` if 'hybrid_solar_capacity' already exists. + + Notes + ----- + + """ + if 'hybrid_solar_capacity' in h.hybrid_meta: + return None + return h.hybrid_meta['solar_capacity']
+ + +
[docs]def aggregate_wind_capacity(h): + """Compute the total wind capcity allowed in hybridization. + + Note + ---- + No limiting is done on the ratio of wind to solar. This method + checks for an existing 'hybrid_wind_capacity'. If one does not exist, + it is assumed that there is no limit on the solar to wind capacity + ratio and the wind capacity is copied into this new column. + + Parameters + ---------- + h : `reV.hybrids.Hybridization` + Instance of `reV.hybrids.Hybridization` class containing the + attribute `hybrid_meta`, which is a DataFrame containing + hybridized meta data. + + Returns + ------- + data : Series | None + A series of data containing the capacity allowed in the hybrid + capacity sum, or `None` if 'hybrid_solar_capacity' already exists. + + Notes + ----- + + """ + if 'hybrid_wind_capacity' in h.hybrid_meta: + return None + return h.hybrid_meta['wind_capacity']
+ + +
[docs]def aggregate_capacity(h): + """Compute the total capcity by summing the individual capacities. + + Parameters + ---------- + h : `reV.hybrids.Hybridization` + Instance of `reV.hybrids.Hybridization` class containing the + attribute `hybrid_meta`, which is a DataFrame containing + hybridized meta data. + + Returns + ------- + data : Series | None + A series of data containing the aggregated capacity, or `None` + if the capacity columns are missing. + """ + + sc, wc = 'hybrid_solar_capacity', 'hybrid_wind_capacity' + missing_solar_cap = sc not in h.hybrid_meta.columns + missing_wind_cap = wc not in h.hybrid_meta.columns + if missing_solar_cap or missing_wind_cap: + return None + + total_cap = h.hybrid_meta[sc] + h.hybrid_meta[wc] + return total_cap
+ + +
[docs]def aggregate_capacity_factor(h): + """Compute the capacity-weighted mean capcity factor. + + Parameters + ---------- + h : `reV.hybrids.Hybridization` + Instance of `reV.hybrids.Hybridization` class containing the + attribute `hybrid_meta`, which is a DataFrame containing + hybridized meta data. + + Returns + ------- + data : Series | None + A series of data containing the aggregated capacity, or `None` + if the capacity and/or mean_cf columns are missing. + """ + + sc, wc = 'hybrid_solar_capacity', 'hybrid_wind_capacity' + scf, wcf = 'solar_mean_cf', 'wind_mean_cf' + missing_solar_cap = sc not in h.hybrid_meta.columns + missing_wind_cap = wc not in h.hybrid_meta.columns + missing_solar_mean_cf = scf not in h.hybrid_meta.columns + missing_wind_mean_cf = wcf not in h.hybrid_meta.columns + missing_any = (missing_solar_cap or missing_wind_cap + or missing_solar_mean_cf or missing_wind_mean_cf) + if missing_any: + return None + + solar_cf_weighted = h.hybrid_meta[sc] * h.hybrid_meta[scf] + wind_cf_weighted = h.hybrid_meta[wc] * h.hybrid_meta[wcf] + total_capacity = aggregate_capacity(h) + hybrid_cf = (solar_cf_weighted + wind_cf_weighted) / total_capacity + return hybrid_cf
+ + +HYBRID_METHODS = { + 'hybrid_solar_capacity': aggregate_solar_capacity, + 'hybrid_wind_capacity': aggregate_wind_capacity, + 'hybrid_capacity': aggregate_capacity, + 'hybrid_mean_cf': aggregate_capacity_factor +} +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/hybrids/hybrids.html b/_modules/reV/hybrids/hybrids.html new file mode 100644 index 000000000..e19bc3430 --- /dev/null +++ b/_modules/reV/hybrids/hybrids.html @@ -0,0 +1,1810 @@ + + + + + + reV.hybrids.hybrids — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.hybrids.hybrids

+# -*- coding: utf-8 -*-
+"""reV Hybridization module.
+
+@author: ppinchuk
+"""
+import logging
+import numpy as np
+import re
+import pandas as pd
+from string import ascii_letters
+from warnings import warn
+from collections import namedtuple
+
+from reV.handlers.outputs import Outputs
+from reV.utilities.exceptions import (FileInputError, InputError,
+                                      InputWarning, OutputWarning)
+from reV.hybrids.hybrid_methods import HYBRID_METHODS
+
+from rex.resource import Resource
+from rex.utilities.utilities import to_records_array
+
+logger = logging.getLogger(__name__)
+
+MERGE_COLUMN = 'sc_point_gid'
+PROFILE_DSET_REGEX = 'rep_profiles_[0-9]+$'
+SOLAR_PREFIX = 'solar_'
+WIND_PREFIX = 'wind_'
+NON_DUPLICATE_COLS = {
+    'latitude', 'longitude', 'country', 'state', 'county', 'elevation',
+    'timezone', 'sc_point_gid', 'sc_row_ind', 'sc_col_ind'
+}
+DROPPED_COLUMNS = ['gid']
+DEFAULT_FILL_VALUES = {'solar_capacity': 0, 'wind_capacity': 0,
+                       'solar_mean_cf': 0, 'wind_mean_cf': 0}
+OUTPUT_PROFILE_NAMES = ['hybrid_profile',
+                        'hybrid_solar_profile',
+                        'hybrid_wind_profile']
+RatioColumns = namedtuple('RatioColumns', ['num', 'denom', 'fixed'],
+                          defaults=(None, None, None))
+
+
+
[docs]class ColNameFormatter: + """Column name formatting helper class. """ + ALLOWED = set(ascii_letters) + +
[docs] @classmethod + def fmt(cls, n): + """Format an input column name to remove excess chars and whitespace. + + This method should help facilitate the merging of column names + between two DataFrames. + + Parameters + ---------- + n : str + Input column name. + + Returns + ------- + str + The column name with all characters except ascii stripped + and all lowercase. + """ + return ''.join(c for c in n if c in cls.ALLOWED).lower()
+ + +
[docs]class HybridsData: + """Hybrids input data container. """ + + def __init__(self, solar_fpath, wind_fpath): + """ + Parameters + ---------- + solar_fpath : str + Filepath to rep profile output file to extract solar profiles and + summaries from. + wind_fpath : str + Filepath to rep profile output file to extract wind profiles and + summaries from. + """ + self.solar_fpath = solar_fpath + self.wind_fpath = wind_fpath + self.profile_dset_names = [] + self.merge_col_overlap_values = set() + self._solar_meta = None + self._wind_meta = None + self._solar_time_index = None + self._wind_time_index = None + self._hybrid_time_index = None + self.__profile_reg_check = re.compile(PROFILE_DSET_REGEX) + self.__solar_cols = self.solar_meta.columns.map(ColNameFormatter.fmt) + self.__wind_cols = self.wind_meta.columns.map(ColNameFormatter.fmt) + + @property + def solar_meta(self): + """Summary for the solar representative profiles. + + Returns + ------- + solar_meta : pd.DataFrame + Summary for the solar representative profiles. + """ + if self._solar_meta is None: + with Resource(self.solar_fpath) as res: + self._solar_meta = res.meta + return self._solar_meta + + @property + def wind_meta(self): + """Summary for the wind representative profiles. + + Returns + ------- + wind_meta : pd.DataFrame + Summary for the wind representative profiles. + """ + if self._wind_meta is None: + with Resource(self.wind_fpath) as res: + self._wind_meta = res.meta + return self._wind_meta + + @property + def solar_time_index(self): + """Get the time index for the solar rep profiles. + + Returns + ------- + solar_time_index : pd.datetimeindex + Time index sourced from the solar reV gen file. + """ + if self._solar_time_index is None: + with Resource(self.solar_fpath) as res: + self._solar_time_index = res.time_index + return self._solar_time_index + + @property + def wind_time_index(self): + """Get the time index for the wind rep profiles. + + Returns + ------- + wind_time_index : pd.datetimeindex + Time index sourced from the wind reV gen file. + """ + if self._wind_time_index is None: + with Resource(self.wind_fpath) as res: + self._wind_time_index = res.time_index + return self._wind_time_index + + @property + def hybrid_time_index(self): + """Get the time index for the hybrid rep profiles. + + Returns + ------- + hybrid_time_index : pd.datetimeindex + Time index for the hybrid rep profiles. + """ + if self._hybrid_time_index is None: + self._hybrid_time_index = self.solar_time_index.join( + self.wind_time_index, how='inner') + return self._hybrid_time_index + +
[docs] def contains_col(self, col_name): + """Check if input column name exists in either meta data set. + + Parameters + ---------- + col_name : str + Name of column to check for. + + Returns + ------- + bool + Whether or not the column is found in either meta data set. + """ + fmt_name = ColNameFormatter.fmt(col_name) + col_in_solar = fmt_name in self.__solar_cols + col_in_wind = fmt_name in self.__wind_cols + return col_in_solar or col_in_wind
+ +
[docs] def validate(self): + """Validate the input data. + + This method checks for a minimum time index length, a unique + profile, and unique merge column that overlaps between both data + sets. + + """ + self._validate_time_index() + self._validate_num_profiles() + self._validate_merge_col_exists() + self._validate_unique_merge_col() + self._validate_merge_col_overlaps()
+ + def _validate_time_index(self): + """Validate the hybrid time index to be of len >= 8760. + + Raises + ------ + FileInputError + If len(time_index) < 8760 for the hybrid profile. + """ + if len(self.hybrid_time_index) < 8760: + msg = ("The length of the merged time index ({}) is less than " + "8760. Please ensure that the input profiles have a " + "time index that overlaps >= 8760 times.") + e = msg.format(len(self.hybrid_time_index)) + logger.error(e) + raise FileInputError(e) + + def _validate_num_profiles(self): + """Validate the number of input profiles. + + Raises + ------ + FileInputError + If # of rep_profiles > 1. + """ + for fp in [self.solar_fpath, self.wind_fpath]: + with Resource(fp) as res: + profile_dset_names = [ + n for n in res.dsets + if self.__profile_reg_check.match(n) + ] + if not profile_dset_names: + msg = ("Did not find any data sets matching the regex: " + "{!r} in {!r}. Please ensure that the profile data " + "exists and that the data set is named correctly.") + e = msg.format(PROFILE_DSET_REGEX, fp) + logger.error(e) + raise FileInputError(e) + elif len(profile_dset_names) > 1: + msg = ("Found more than one profile in {!r}: {}. " + "This module is not intended for hybridization of " + "multiple representative profiles. Please re-run " + "on a single aggregated profile.") + e = msg.format(fp, profile_dset_names) + logger.error(e) + raise FileInputError(e) + else: + self.profile_dset_names += profile_dset_names + + def _validate_merge_col_exists(self): + """Validate the existence of the merge column. + + Raises + ------ + FileInputError + If merge column is missing from either the solar or + the wind meta data. + """ + msg = ("Cannot hybridize: merge column {!r} missing from the " + "{} meta data! ({!r})") + + mc = ColNameFormatter.fmt(MERGE_COLUMN) + for cols, fp, res in zip([self.__solar_cols, self.__wind_cols], + [self.solar_fpath, self.wind_fpath], + ['solar', 'wind']): + if mc not in cols: + e = msg.format(MERGE_COLUMN, res, fp) + logger.error(e) + raise FileInputError(e) + + def _validate_unique_merge_col(self): + """Validate the existence of unique values in the merge column. + + Raises + ------ + FileInputError + If merge column contains duplicate values in either the solar or + the wind meta data. + """ + msg = ("Duplicate {}s were found. This is likely due to resource " + "class binning, which is not supported at this time. " + "Please re-run supply curve aggregation without " + "resource class binning and ensure there are no duplicate " + "values in {!r}. File: {!r}") + + mc = ColNameFormatter.fmt(MERGE_COLUMN) + for ds, cols, fp in zip([self.solar_meta, self.wind_meta], + [self.__solar_cols, self.__wind_cols], + [self.solar_fpath, self.wind_fpath]): + merge_col = ds.columns[cols == mc].item() + if not ds[merge_col].is_unique: + e = msg.format(merge_col, merge_col, fp) + logger.error(e) + raise FileInputError(e) + + def _validate_merge_col_overlaps(self): + """Validate the existence of overlap in the merge column values. + + Raises + ------ + FileInputError + If merge column values do not overlap between the tow input files. + """ + mc = ColNameFormatter.fmt(MERGE_COLUMN) + merge_col = self.solar_meta.columns[self.__solar_cols == mc].item() + solar_vals = set(self.solar_meta[merge_col].values) + merge_col = self.wind_meta.columns[self.__wind_cols == mc].item() + wind_vals = set(self.wind_meta[merge_col].values) + self.merge_col_overlap_values = solar_vals & wind_vals + + if not self.merge_col_overlap_values: + msg = ("No overlap detected in the values of {!r} across the " + "input files. Please ensure that at least one of the " + "{!r} values is the same for input files {!r} and {!r}") + e = msg.format(merge_col, merge_col, self.solar_fpath, + self.wind_fpath) + logger.error(e) + raise FileInputError(e)
+ + +
[docs]class MetaHybridizer: + """Framework to handle hybridization of meta data.""" + + _INTERNAL_COL_PREFIX = '_h_internal' + + def __init__(self, data, allow_solar_only=False, + allow_wind_only=False, fillna=None, + limits=None, ratio_bounds=None, + ratio='solar_capacity/wind_capacity'): + """ + Parameters + ---------- + data : `HybridsData` + Instance of `HybridsData` containing input data to + hybridize. + allow_solar_only : bool, optional + Option to allow SC points with only solar capacity + (no wind). By default, ``False``. + allow_wind_only : bool, optional + Option to allow SC points with only wind capacity + (no solar), By default, ``False``. + fillna : dict, optional + Dictionary containing column_name, fill_value pairs + representing any fill values that should be applied after + merging the wind and solar meta. Note that column names will + likely have to be prefixed with ``solar`` or ``wind``. + By default, ``None``. + limits : dict, optional + Option to specify mapping (in the form of a dictionary) of + {colum_name: max_value} representing the upper limit + (maximum value) for the values of a column in the merged + meta. For example, `limits={'solar_capacity': 100}` would + limit all the values of the solar capacity in the merged + meta to a maximum value of 100. This limit is applied + *BEFORE* ratio calculations. The names of the columns should + match the column names in the merged meta, so they are + likely prefixed with ``solar`` or ``wind`. By default, + ``None`` (no limits applied). + ratio_bounds : tuple, optional + Option to set ratio bounds (in two-tuple form) on the + columns of the `ratio` input. For example, + `ratio_bounds=(0.5, 1.5)` would adjust the values of both of + the `ratio` columns such that their ratio is always between + half and double (e.g., no value would be more than double + the other). To specify a single ratio value, use the same + value as the upper and lower bound. For example, + `ratio_bounds=(1, 1)` would adjust the values of both of the + `ratio` columns such that their ratio is always equal. + By default, ``None`` (no limit on the ratio). + ratio : str, optional + Option to specify the columns used to calculate the ratio + that is limited by the `ratio_bounds` input. This input is a + string in the form + "numerator_column_name/denominator_column_name". + For example, `ratio='solar_capacity/wind_capacity'` would + limit the ratio of the solar to wind capacities as specified + by the `ratio_bounds` input. If `ratio_bounds` is ``None``, + this input does nothing. The names of the columns should be + prefixed with one of the prefixes defined as class + variables. By default ``'solar_capacity/wind_capacity'``. + """ + self.data = data + self._allow_solar_only = allow_solar_only + self._allow_wind_only = allow_wind_only + self._fillna = {**DEFAULT_FILL_VALUES, **(fillna or {})} + self._limits = limits or {} + self._ratio_bounds = ratio_bounds + self._ratio = ratio + self._hybrid_meta = None + self.__hybrid_meta_cols = None + self.__col_name_map = None + self.__solar_rpi_n = '{}_solar_rpidx'.format(self._INTERNAL_COL_PREFIX) + self.__wind_rpi_n = '{}_wind_rpidx'.format(self._INTERNAL_COL_PREFIX) + + @property + def hybrid_meta(self): + """Hybridized summary for the representative profiles. + + Returns + ------- + hybrid_meta : pd.DataFrame + Summary for the hybridized representative profiles. + At the very least, this has a column that the data was merged on. + """ + if self._hybrid_meta is None or self.__hybrid_meta_cols is None: + return self._hybrid_meta + else: + return self._hybrid_meta[self.__hybrid_meta_cols] + +
[docs] def validate_input(self): + """Validate the input parameters. + + This method validates that the input limit, fill, and ratio columns + are formatted correctly. + """ + self._validate_limits_cols_prefixed() + self._validate_fillna_cols_prefixed() + self._validate_ratio_input()
+ + def _validate_limits_cols_prefixed(self): + """Ensure the limits columns are formatted correctly. + + This check is important because the limiting happens + after the meta has been merged (so columns are already prefixed), + but before the hybrid columns are computed. As a result, the limits + columns _must_ have a valid prefix. + + Raises + ------ + InputError + If limits columns are not prefixed correctly. + """ + for col in self._limits: + self.__validate_col_prefix( + col, (SOLAR_PREFIX, WIND_PREFIX), input_name='limits' + ) + + @staticmethod + def __validate_col_prefix(col, prefixes, input_name): + """Validate the the col starts with the correct prefix. """ + + missing = [not col.startswith(p) for p in prefixes] + if all(missing): + msg = ("Input {0} column {1!r} does not start with a valid " + "prefix: {2!r}. Please ensure that the {0} column " + "names specify the correct resource prefix.") + e = msg.format(input_name, col, prefixes) + logger.error(e) + raise InputError(e) + + def _validate_fillna_cols_prefixed(self): + """Ensure the fillna columns are formatted correctly. + + This check is important because the fillna step happens + after the meta has been merged (so columns are already prefixed), + but before the hybrid columns are computed. As a result, the fillna + columns _must_ have a valid prefix. + + Raises + ------ + InputError + If fillna columns are not prefixed correctly. + """ + for col in self._fillna: + self.__validate_col_prefix( + col, (SOLAR_PREFIX, WIND_PREFIX), input_name='fillna' + ) + + def _validate_ratio_input(self): + """Validate the ratio input parameters. + + This method validates that the input ratio columns are formatted + correctly and exist in the input data. It also verifies that + the `ratio_bounds` is correctly formatted. + """ + if self._ratio_bounds is None: + return + + self._validate_ratio_bounds() + self._validate_ratio_type() + self._validate_ratio_format() + self._validate_ratio_cols_prefixed() + self._validate_ratio_cols_exist() + + def _validate_ratio_bounds(self): + """Ensure the ratio value is input correctly. + + Raises + ------ + InputError + If ratio is not a len 2 container of floats. + """ + + try: + if len(self._ratio_bounds) != 2: + msg = ("Length of input for ratio_bounds is {} - but is " + "required to be of length 2. Please make sure this " + "input is a len 2 container of floats. If you would " + "like to specify a single ratio value, use the same " + "float for both limits (i.e. ratio_bounds=(1, 1)).") + e = msg.format(len(self._ratio_bounds)) + logger.error(e) + raise InputError(e) + except TypeError: + msg = ("Input for ratio_bounds not understood: {!r}. " + "Please make sure this value is a len 2 container " + "of floats.") + e = msg.format(self._ratio_bounds) + logger.error(e) + raise InputError(e) from None + + def _validate_ratio_type(self): + """Ensure that the ratio input is a string. + + Raises + ------ + InputError + If `ratio` is not a string. + """ + if not isinstance(self._ratio, str): + msg = ("Ratio input type {} not understood. Please make sure " + "the ratio input is a string in the form " + "'numerator_column_name/denominator_column_name'. Ratio " + "input: {!r}") + e = msg.format(type(self._ratio), self._ratio) + logger.error(e) + raise InputError(e) + + def _validate_ratio_format(self): + """Validate that the ratio input format is correct and can be parsed. + + Raises + ------ + InputError + If the '/' character is missing or of there are too many + '/' characters. + """ + if '/' not in self._ratio: + msg = ("Ratio input {} does not contain the '/' character. " + "Please make sure the ratio input is a string in the form " + "'numerator_column_name/denominator_column_name'") + e = msg.format(self._ratio) + logger.error(e) + raise InputError(e) + + if len(self._ratio_cols) != 2: + msg = ("Ratio input {} contains too many '/' characters. Please " + "make sure the ratio input is a string in the form " + "'numerator_column_name/denominator_column_name'.") + e = msg.format(self._ratio) + logger.error(e) + raise InputError(e) + + def _validate_ratio_cols_prefixed(self): + """Ensure the ratio columns are formatted correctly. + + This check is important because the ratio limit step happens + after the meta has been merged (so columns are already prefixed), + but before the hybrid columns are computed. As a result, the ratio + columns _must_ have a valid prefix. + + Raises + ------ + InputError + If ratio columns are not prefixed correctly. + """ + + for col in self._ratio_cols: + self.__validate_col_prefix( + col, (SOLAR_PREFIX, WIND_PREFIX), input_name='ratios' + ) + + def _validate_ratio_cols_exist(self): + """Ensure the ratio columns exist if a ratio is specified. + + Raises + ------ + FileInputError + If ratio columns are not found in the meta data. + """ + + for col in self._ratio_cols: + no_prefix_name = "_".join(col.split('_')[1:]) + if not self.data.contains_col(no_prefix_name): + msg = ("Input ratios column {!r} not found in either meta " + "data! Please check the input files {!r} and {!r}") + e = msg.format(no_prefix_name, self.data.solar_fpath, + self.data.wind_fpath) + logger.error(e) + raise FileInputError(e) + + @property + def _ratio_cols(self): + """Get the ratio columns from the ratio input. """ + if self._ratio is None: + return [] + return self._ratio.strip().split('/') + +
[docs] def hybridize(self): + """Combine the solar and wind metas and run hybridize methods.""" + self._format_meta_pre_merge() + self._merge_solar_wind_meta() + self._verify_lat_long_match_post_merge() + self._format_meta_post_merge() + self._fillna_meta_cols() + self._apply_limits() + self._limit_by_ratio() + self._add_hybrid_cols() + self._sort_hybrid_meta_cols()
+ + def _format_meta_pre_merge(self): + """Prepare solar and wind meta for merging. """ + self.__col_name_map = { + ColNameFormatter.fmt(c): c + for c in self.data.solar_meta.columns.values + } + + self._rename_cols(self.data.solar_meta, prefix=SOLAR_PREFIX) + self._rename_cols(self.data.wind_meta, prefix=WIND_PREFIX) + + self._save_rep_prof_index_internally() + + @staticmethod + def _rename_cols(df, prefix): + """Replace column names with the ColNameFormatter.fmt is needed. """ + df.columns = [ + ColNameFormatter.fmt(col_name) + if col_name in NON_DUPLICATE_COLS + else '{}{}'.format(prefix, col_name) + for col_name in df.columns.values + ] + + def _save_rep_prof_index_internally(self): + """Save rep profiles index in hybrid meta for access later. """ + + self.data.solar_meta[self.__solar_rpi_n] = self.data.solar_meta.index + self.data.wind_meta[self.__wind_rpi_n] = self.data.wind_meta.index + + def _merge_solar_wind_meta(self): + """Merge the wind and solar meta DataFrames. """ + self._hybrid_meta = self.data.solar_meta.merge( + self.data.wind_meta, + on=ColNameFormatter.fmt(MERGE_COLUMN), + suffixes=[None, '_x'], how=self._merge_type() + ) + + def _merge_type(self): + """Determine the type of merge to use for meta based on user input. """ + if self._allow_solar_only and self._allow_wind_only: + return 'outer' + elif self._allow_solar_only and not self._allow_wind_only: + return 'left' + elif not self._allow_solar_only and self._allow_wind_only: + return 'right' + return 'inner' + + def _format_meta_post_merge(self): + """Format hybrid meta after merging. """ + + duplicate_cols = [n for n in self._hybrid_meta.columns if "_x" in n] + self._propagate_duplicate_cols(duplicate_cols) + self._drop_cols(duplicate_cols) + self._hybrid_meta.rename(self.__col_name_map, inplace=True, axis=1) + self._hybrid_meta.index.name = 'gid' + + def _propagate_duplicate_cols(self, duplicate_cols): + """Fill missing column values from outer merge. """ + for duplicate in duplicate_cols: + no_suffix = "_".join(duplicate.split("_")[:-1]) + null_idx = self._hybrid_meta[no_suffix].isnull() + non_null_vals = self._hybrid_meta.loc[null_idx, duplicate].values + self._hybrid_meta.loc[null_idx, no_suffix] = non_null_vals + + def _drop_cols(self, duplicate_cols): + """Drop any remaning duplicate and 'DROPPED_COLUMNS' columns. """ + self._hybrid_meta.drop( + duplicate_cols + DROPPED_COLUMNS, + axis=1, inplace=True, errors='ignore' + ) + + def _sort_hybrid_meta_cols(self): + """Sort the columns of the hybrid meta. """ + self.__hybrid_meta_cols = sorted( + [c for c in self._hybrid_meta.columns + if not c.startswith(self._INTERNAL_COL_PREFIX)], + key=self._column_sorting_key + ) + + def _column_sorting_key(self, c): + """Helper function to sort hybrid meta columns. """ + first_index = 0 + if c.startswith('hybrid'): + first_index = 1 + elif c.startswith('solar'): + first_index = 2 + elif c.startswith('wind'): + first_index = 3 + elif c == MERGE_COLUMN: + first_index = -1 + return first_index, self._hybrid_meta.columns.get_loc(c) + + def _verify_lat_long_match_post_merge(self): + """Verify that all the lat/lon values match post merge.""" + lat = self._verify_col_match_post_merge(col_name='latitude') + lon = self._verify_col_match_post_merge(col_name='longitude') + if not lat or not lon: + msg = ("Detected mismatched coordinate values (latitude or " + "longitude) post merge. Please ensure that all matching " + "values of {!r} correspond to the same values of latitude " + "and longitude across the input files {!r} and {!r}") + e = msg.format(MERGE_COLUMN, self.data.solar_fpath, + self.data.wind_fpath) + logger.error(e) + raise FileInputError(e) + + def _verify_col_match_post_merge(self, col_name): + """Verify that all (non-null) values in a column match post merge. """ + c1, c2 = col_name, '{}_x'.format(col_name) + if c1 in self._hybrid_meta.columns and c2 in self._hybrid_meta.columns: + compare_df = self._hybrid_meta[ + (self._hybrid_meta[c1].notnull()) + & (self._hybrid_meta[c2].notnull()) + ] + return np.allclose(compare_df[c1], compare_df[c2]) + else: + return True + + def _fillna_meta_cols(self): + """Fill N/A values as specified by user (and internals). """ + for col_name, fill_value in self._fillna.items(): + if col_name in self._hybrid_meta.columns: + self._hybrid_meta[col_name].fillna(fill_value, inplace=True) + else: + self.__warn_missing_col(col_name, action='fill') + + self._hybrid_meta[self.__solar_rpi_n].fillna(-1, inplace=True) + self._hybrid_meta[self.__wind_rpi_n].fillna(-1, inplace=True) + + @staticmethod + def __warn_missing_col(col_name, action): + """Warn that a column the user request an action for is missing. """ + msg = ("Skipping {} values for {!r}: Unable to find column " + "in hybrid meta. Did you forget to prefix with " + "{!r} or {!r}? ") + w = msg.format(action, col_name, SOLAR_PREFIX, WIND_PREFIX) + logger.warning(w) + warn(w, InputWarning) + + def _apply_limits(self): + """Clip column values as specified by user. """ + for col_name, max_value in self._limits.items(): + if col_name in self._hybrid_meta.columns: + self._hybrid_meta[col_name].clip(upper=max_value, inplace=True) + else: + self.__warn_missing_col(col_name, action='limit') + + def _limit_by_ratio(self): + """ Limit the given pair of ratio columns based on input ratio. """ + + if self._ratio_bounds is None: + return + + numerator_col, denominator_col = self._ratio_cols + min_ratio, max_ratio = sorted(self._ratio_bounds) + + overlap_idx = self._hybrid_meta[MERGE_COLUMN].isin( + self.data.merge_col_overlap_values + ) + + numerator_vals = self._hybrid_meta[numerator_col].copy() + denominator_vals = self._hybrid_meta[denominator_col].copy() + + ratios = ( + numerator_vals.loc[overlap_idx] + / denominator_vals.loc[overlap_idx] + ) + ratio_too_low = (ratios < min_ratio) & overlap_idx + ratio_too_high = (ratios > max_ratio) & overlap_idx + + numerator_vals.loc[ratio_too_high] = ( + denominator_vals.loc[ratio_too_high].values * max_ratio + ) + denominator_vals.loc[ratio_too_low] = ( + numerator_vals.loc[ratio_too_low].values / min_ratio + ) + + h_num_name = "hybrid_{}".format(numerator_col) + h_denom_name = "hybrid_{}".format(denominator_col) + self._hybrid_meta[h_num_name] = numerator_vals.values + self._hybrid_meta[h_denom_name] = denominator_vals.values + + def _add_hybrid_cols(self): + """Add new hybrid columns using registered hybrid methods. """ + for new_col_name, method in HYBRID_METHODS.items(): + out = method(self) + if out is not None: + try: + self._hybrid_meta[new_col_name] = out + except ValueError as e: + msg = ("Unable to add {!r} column to hybrid meta. The " + "following exception was raised when adding " + "the data output by '{}': {!r}.") + w = msg.format(new_col_name, method.__name__, e) + logger.warning(w) + warn(w, OutputWarning) + + @property + def solar_profile_indices_map(self): + """Map hybrid to solar rep indices. + + Returns + ------- + hybrid_indices : np.ndarray + Index values corresponding to hybrid rep profiles. + solar_indices : np.ndarray + Index values of the solar rep profiles corresponding + to the hybrid rep profile indices. + """ + + if self._hybrid_meta is None: + return np.array([]), np.array([]) + + idxs = self._hybrid_meta[self.__solar_rpi_n].astype(int) + idxs = idxs[idxs >= 0] + + return idxs.index.values, idxs.values + + @property + def wind_profile_indices_map(self): + """Map hybrid to wind rep indices. + + Returns + ------- + hybrid_indices : np.ndarray + Index values corresponding to hybrid rep profiles. + wind_indices : np.ndarray + Index values of the wind rep profiles corresponding + to the hybrid rep profile indices. + """ + if self._hybrid_meta is None: + return np.array([]), np.array([]) + + idxs = self._hybrid_meta[self.__wind_rpi_n].astype(int) + idxs = idxs[idxs >= 0] + + return idxs.index.values, idxs.values
+ + +
[docs]class Hybridization: + """Hybridization""" + + def __init__(self, solar_fpath, wind_fpath, allow_solar_only=False, + allow_wind_only=False, fillna=None, limits=None, + ratio_bounds=None, ratio='solar_capacity/wind_capacity'): + """Framework to handle hybridization of SC and corresponding profiles. + + ``reV`` hybrids computes a "hybrid" wind and solar supply curve, + where each supply curve point contains some wind and some solar + capacity. Various ratio limits on wind-to-solar farm properties + (e.g. wind-to-solar capacity) can be applied during the + hybridization process. Hybrid generation profiles are also + computed during this process. + + Parameters + ---------- + solar_fpath : str + Filepath to rep profile output file to extract solar + profiles and summaries from. + wind_fpath : str + Filepath to rep profile output file to extract wind profiles + and summaries from. + allow_solar_only : bool, optional + Option to allow SC points with only solar capacity + (no wind). By default, ``False``. + allow_wind_only : bool, optional + Option to allow SC points with only wind capacity + (no solar). By default, ``False``. + fillna : dict, optional + Dictionary containing column_name, fill_value pairs + representing any fill values that should be applied after + merging the wind and solar meta. Note that column names will + likely have to be prefixed with ``solar`` or ``wind``. + By default ``None``. + limits : dict, optional + Option to specify mapping (in the form of a dictionary) of + {colum_name: max_value} representing the upper limit + (maximum value) for the values of a column in the merged + meta. For example, ``limits={'solar_capacity': 100}`` would + limit all the values of the solar capacity in the merged + meta to a maximum value of 100. This limit is applied + *BEFORE* ratio calculations. The names of the columns should + match the column names in the merged meta, so they are + likely prefixed with ``solar`` or ``wind``. + By default, ``None`` (no limits applied). + ratio_bounds : tuple, optional + Option to set ratio bounds (in two-tuple form) on the + columns of the ``ratio`` input. For example, + ``ratio_bounds=(0.5, 1.5)`` would adjust the values of both + of the ``ratio`` columns such that their ratio is always + between half and double (e.g., no value would be more than + double the other). To specify a single ratio value, use the + same value as the upper and lower bound. For example, + ``ratio_bounds=(1, 1)`` would adjust the values of both of + the ``ratio`` columns such that their ratio is always equal. + By default, ``None`` (no limit on the ratio). + ratio : str, optional + Option to specify the columns used to calculate the ratio + that is limited by the `ratio_bounds` input. This input is a + string in the form "{numerator_column}/{denominator_column}". + For example, ``ratio='solar_capacity/wind_capacity'`` + would limit the ratio of the solar to wind capacities as + specified by the ``ratio_bounds`` input. If ``ratio_bounds`` + is None, this input does nothing. The names of the columns + should be prefixed with one of the prefixes defined as class + variables. By default ``'solar_capacity/wind_capacity'``. + """ + + logger.info('Running hybridization of rep profiles with solar_fpath: ' + '"{}"'.format(solar_fpath)) + logger.info('Running hybridization of rep profiles with solar_fpath: ' + '"{}"'.format(wind_fpath)) + logger.info('Running hybridization of rep profiles with ' + 'allow_solar_only: "{}"'.format(allow_solar_only)) + logger.info('Running hybridization of rep profiles with ' + 'allow_wind_only: "{}"'.format(allow_wind_only)) + logger.info('Running hybridization of rep profiles with fillna: "{}"' + .format(fillna)) + logger.info('Running hybridization of rep profiles with limits: "{}"' + .format(limits)) + logger.info('Running hybridization of rep profiles with ratio_bounds: ' + '"{}"'.format(ratio_bounds)) + logger.info('Running hybridization of rep profiles with ratio: "{}"' + .format(ratio)) + + self.data = HybridsData(solar_fpath, wind_fpath) + self.meta_hybridizer = MetaHybridizer( + data=self.data, allow_solar_only=allow_solar_only, + allow_wind_only=allow_wind_only, fillna=fillna, limits=limits, + ratio_bounds=ratio_bounds, ratio=ratio + ) + self._profiles = None + self._validate_input() + + def _validate_input(self): + """Validate the user input and input files. """ + self.data.validate() + self.meta_hybridizer.validate_input() + + @property + def solar_meta(self): + """Summary for the solar representative profiles. + + Returns + ------- + solar_meta : pd.DataFrame + Summary for the solar representative profiles. + """ + return self.data.solar_meta + + @property + def wind_meta(self): + """Summary for the wind representative profiles. + + Returns + ------- + wind_meta : pd.DataFrame + Summary for the wind representative profiles. + """ + return self.data.wind_meta + + @property + def hybrid_meta(self): + """Hybridized summary for the representative profiles. + + Returns + ------- + hybrid_meta : pd.DataFrame + Summary for the hybridized representative profiles. + At the very least, this has a column that the data was merged on. + """ + return self.meta_hybridizer.hybrid_meta + + @property + def solar_time_index(self): + """Get the time index for the solar rep profiles. + + Returns + ------- + solar_time_index : pd.Datetimeindex + Time index sourced from the solar rep profile file. + """ + return self.data.solar_time_index + + @property + def wind_time_index(self): + """Get the time index for the wind rep profiles. + + Returns + ------- + wind_time_index : pd.Datetimeindex + Time index sourced from the wind rep profile file. + """ + return self.data.wind_time_index + + @property + def hybrid_time_index(self): + """Get the time index for the hybrid rep profiles. + + Returns + ------- + hybrid_time_index : pd.Datetimeindex + Time index for the hybrid rep profiles. + """ + return self.data.hybrid_time_index + + @property + def profiles(self): + """Get the arrays of the hybridized representative profiles. + + Returns + ------- + profiles : dict + Dict of hybridized representative profiles. + """ + return self._profiles + +
[docs] def run(self, fout=None, save_hybrid_meta=True): + """Run hybridization of profiles and save to disc. + + Parameters + ---------- + fout : str, optional + Filepath to output HDF5 file. If ``None``, output data are + not written to a file. By default, ``None``. + save_hybrid_meta : bool, optional + Flag to save hybrid SC table to hybrid rep profile output. + By default, ``True``. + + Returns + ------- + str + Filepath to output h5 file. + """ + + self.run_meta() + self.run_profiles() + + if fout is not None: + self.save_profiles(fout, save_hybrid_meta=save_hybrid_meta) + + logger.info('Hybridization of representative profiles complete!') + return fout
+ +
[docs] def run_meta(self): + """Compute the hybridized profiles. + + Returns + ------- + `Hybridization` + Instance of Hybridization object (itself) containing the + hybridized meta as an attribute. + """ + self.meta_hybridizer.hybridize() + return self
+ +
[docs] def run_profiles(self): + """Compute all hybridized profiles. + + Returns + ------- + `Hybridization` + Instance of Hybridization object (itself) containing the + hybridized profiles as attributes. + """ + + logger.info('Running hybrid profile calculations.') + + self._init_profiles() + self._compute_hybridized_profile_components() + self._compute_hybridized_profiles_from_components() + + logger.info('Profile hybridization complete.') + + return self
+ + def _init_profiles(self): + """Initialize the output rep profiles attribute.""" + self._profiles = { + k: np.zeros((len(self.hybrid_time_index), len(self.hybrid_meta)), + dtype=np.float32) + for k in OUTPUT_PROFILE_NAMES} + + def _compute_hybridized_profile_components(self): + """Compute the resource components of the hybridized profiles. """ + + for params in self.__rep_profile_hybridization_params: + col, (hybrid_idxs, solar_idxs), fpath, p_name, dset_name = params + capacity = self.hybrid_meta.loc[hybrid_idxs, col].values + + with Resource(fpath) as res: + data = res[dset_name, + res.time_index.isin(self.hybrid_time_index)] + self._profiles[p_name][:, hybrid_idxs] = (data[:, solar_idxs] + * capacity) + + @property + def __rep_profile_hybridization_params(self): + """Zip the rep profile hybridization parameters. """ + + cap_col_names = ['hybrid_solar_capacity', 'hybrid_wind_capacity'] + idx_maps = [self.meta_hybridizer.solar_profile_indices_map, + self.meta_hybridizer.wind_profile_indices_map] + fpaths = [self.data.solar_fpath, self.data.wind_fpath] + zipped = zip(cap_col_names, idx_maps, fpaths, OUTPUT_PROFILE_NAMES[1:], + self.data.profile_dset_names) + return zipped + + def _compute_hybridized_profiles_from_components(self): + """Compute the hybridized profiles from the resource components. """ + + hp_name, sp_name, wp_name = OUTPUT_PROFILE_NAMES + self._profiles[hp_name] = (self._profiles[sp_name] + + self._profiles[wp_name]) + + def _init_h5_out(self, fout, save_hybrid_meta=True): + """Initialize an output h5 file for hybrid profiles. + + Parameters + ---------- + fout : str + Filepath to output h5 file. + save_hybrid_meta : bool + Flag to save hybrid SC table to hybrid rep profile output. + """ + dsets = [] + shapes = {} + attrs = {} + chunks = {} + dtypes = {} + + for dset, data in self.profiles.items(): + dsets.append(dset) + shapes[dset] = data.shape + chunks[dset] = None + attrs[dset] = {Outputs.UNIT_ATTR: "MW"} + dtypes[dset] = data.dtype + + meta = self.hybrid_meta.copy() + for c in meta.columns: + try: + meta[c] = pd.to_numeric(meta[c]) + except ValueError: + pass + + Outputs.init_h5(fout, dsets, shapes, attrs, chunks, dtypes, + meta, time_index=self.hybrid_time_index) + + if save_hybrid_meta: + with Outputs(fout, mode='a') as out: + hybrid_meta = to_records_array(self.hybrid_meta) + out._create_dset('meta', hybrid_meta.shape, + hybrid_meta.dtype, data=hybrid_meta) + + def _write_h5_out(self, fout, save_hybrid_meta=True): + """Write hybrid profiles and meta to an output file. + + Parameters + ---------- + fout : str + Filepath to output h5 file. + save_hybrid_meta : bool + Flag to save hybrid SC table to hybrid rep profile output. + """ + + with Outputs(fout, mode='a') as out: + if 'meta' in out.datasets and save_hybrid_meta: + hybrid_meta = to_records_array(self.hybrid_meta) + out['meta'] = hybrid_meta + + for dset, data in self.profiles.items(): + out[dset] = data + +
[docs] def save_profiles(self, fout, save_hybrid_meta=True): + """Initialize fout and save profiles. + + Parameters + ---------- + fout : str + Filepath to output h5 file. + save_hybrid_meta : bool + Flag to save hybrid SC table to hybrid rep profile output. + """ + + self._init_h5_out(fout, save_hybrid_meta=save_hybrid_meta) + self._write_h5_out(fout, save_hybrid_meta=save_hybrid_meta)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/losses/power_curve.html b/_modules/reV/losses/power_curve.html new file mode 100644 index 000000000..b88d9b698 --- /dev/null +++ b/_modules/reV/losses/power_curve.html @@ -0,0 +1,1738 @@ + + + + + + reV.losses.power_curve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.losses.power_curve

+# -*- coding: utf-8 -*-
+"""reV power curve losses module.
+
+"""
+import json
+import logging
+import warnings
+from abc import ABC, abstractmethod
+
+import numpy as np
+from scipy.optimize import minimize_scalar
+
+from reV.utilities.exceptions import reVLossesValueError, reVLossesWarning
+from reV.losses.utils import _validate_arrays_not_empty
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class PowerCurve: + """A turbine power curve. + + Attributes + ---------- + wind_speed : :obj:`numpy.array` + An array containing the wind speeds corresponding to the values + in the :attr:`generation` array. + generation : :obj:`numpy.array` + An array containing the generated power in kW at the corresponding + wind speed in the :attr:`wind_speed` array. This input must have + at least one positive value, and if a cutoff speed is detected + (see `Warnings` section below), then all values above that wind + speed must be set to 0. + + Warnings + -------- + This class will attempt to infer a cutoff speed from the + ``generation`` input. Specifically, it will look for a transition + from the highest rated power down to zero in a single ``wind_speed`` + step of the power curve. If such a transition is detected, the wind + speed corresponding to the zero value will be set as the cutoff + speed, and all calculated power curves will be clipped at this + speed. If your input power curve contains a cutoff speed, ensure + that it adheres to the expected pattern of dropping from max rated + power to zero power in a single wind speed step. + """ + + def __init__(self, wind_speed, generation): + """ + Parameters + ---------- + wind_speed : array_like + An iterable containing the wind speeds corresponding to the + generated power values in ``generation`` input. The input + values should all be non-zero. + generation : array_like + An iterable containing the generated power in kW at the + corresponding wind speed in the ``wind_speed`` input. This + input must have at least one positive value, and if a cutoff + speed is detected (see `Warnings` section below), then all + values above that wind speed must be set to 0. + """ + self.wind_speed = np.array(wind_speed) + self.generation = np.array(generation) + self._cutoff_wind_speed = None + self._cutin_wind_speed = None + self.i_cutoff = None + + _validate_arrays_not_empty(self, + array_names=['wind_speed', 'generation']) + self._validate_wind_speed() + self._validate_generation() + + def _validate_wind_speed(self): + """Validate that the input wind speed is non-negative. """ + if not (self.wind_speed >= 0).all(): + msg = ("Invalid wind speed input: Contains negative values! - {}" + .format(self.wind_speed)) + logger.error(msg) + raise reVLossesValueError(msg) + + def _validate_generation(self): + """Validate the input generation. """ + if not (self.generation > 0).any(): + msg = ("Invalid generation input: Found no positive values! - {}" + .format(self.generation)) + logger.error(msg) + raise reVLossesValueError(msg) + + if 0 < self.cutoff_wind_speed < np.inf: + wind_speeds_above_cutoff = np.where(self.wind_speed + >= self.cutoff_wind_speed) + cutoff_wind_speed_ind = wind_speeds_above_cutoff[0].min() + if (self.generation[cutoff_wind_speed_ind:]).any(): + msg = ("Invalid generation input: Found non-zero values above " + "cutoff! - {}".format(self.generation)) + logger.error(msg) + raise reVLossesValueError(msg) + + @property + def cutin_wind_speed(self): + """The detected cut-in wind speed at which power generation begins + + Returns + -------- + float + """ + if self._cutin_wind_speed is None: + ind = np.where(self.generation > 0)[0][0] + if ind > 0: + self._cutin_wind_speed = self.wind_speed[ind - 1] + else: + self._cutin_wind_speed = 0 + return self._cutin_wind_speed + + @property + def cutoff_wind_speed(self): + """The detected cutoff wind speed at which the power generation is zero + + Returns + -------- + float | np.inf + """ + if self._cutoff_wind_speed is None: + ind = np.argmax(self.generation[::-1]) + # pylint: disable=chained-comparison + if ind > 0 and self.generation[-ind] <= 0: + self.i_cutoff = len(self.generation) - ind + self._cutoff_wind_speed = self.wind_speed[-ind] + else: + self._cutoff_wind_speed = np.inf + return self._cutoff_wind_speed + + @property + def rated_power(self): + """Get the rated power (max power) of the turbine power curve. The + units are dependent on the input power curve but this is typically in + units of kW. + + Returns + ------- + float + """ + return np.max(self.generation) + + def __eq__(self, other): + return np.isclose(self.generation, other).all() + + def __ne__(self, other): + return not np.isclose(self.generation, other).all() + + def __lt__(self, other): + return self.generation < other + + def __le__(self, other): + return self.generation <= other + + def __gt__(self, other): + return self.generation > other + + def __ge__(self, other): + return self.generation >= other + + def __len__(self): + return len(self.generation) + + def __getitem__(self, index): + return self.generation[index] + +
[docs] def __call__(self, wind_speed): + """Calculate the power curve value for the given ``wind_speed``. + + Parameters + ---------- + wind_speed : int | float | list | array_like + Wind speed value corresponding to the desired power curve + value. + + Returns + ------- + float | :obj:`numpy.array` + The power curve value(s) for the input wind speed(s). + """ + if isinstance(wind_speed, (int, float)): + wind_speed = np.array([wind_speed]) + power_generated = np.interp(wind_speed, self.wind_speed, + self.generation) + if self.cutoff_wind_speed: + power_generated[wind_speed >= self.cutoff_wind_speed] = 0 + return power_generated
+ + +
[docs]class PowerCurveLosses: + """A converter between annual losses and power curve transformation. + + Given a target annual loss value, this class facilitates the + calculation of a power curve transformation such that the annual + generation losses incurred by using the transformed power curve when + compared to the original (non-transformed) power curve match the + target loss as close as possible. + + The underlying assumption for this approach is that some types of + losses can be realized by a transformation of the power curve (see + the values of :obj:`TRANSFORMATIONS` for details on all of the + power curve transformations that have been implemented). + + The advantage of this approach is that, unlike haircut losses (where + a single loss value is applied across the board to all generation), + the losses are distributed non-uniformly across the power curve. For + example, even in the overly simplified case of a horizontal + translation of the power curve (which is only physically realistic + for certain types of losses like blade degradation), the losses are + distributed primarily across region 2 of the power curve (the steep, + almost linear, portion where the generation rapidly increases). This + means that, unlike with haircut losses, generation is able to reach + max rated power (albeit at a greater wind speed). + + Attributes + ---------- + power_curve : :obj:`PowerCurve` + The original Power Curve. + wind_resource : :obj:`numpy.array` + An array containing the wind speeds (i.e. wind speed + distribution) for the site at which the power curve will be + used. This distribution is used to calculate the annual + generation of the original power curve as well as any additional + calculated power curves. The generation values are then compared + in order to calculate the loss resulting from a transformed + power curve. + weights : :obj:`numpy.array` + An array of the same length as ``wind_resource`` containing + weights to apply to each generation value calculated for the + corresponding wind speed. + """ + + def __init__(self, power_curve, wind_resource, weights=None, site=None): + """ + Parameters + ---------- + power_curve : :obj:`PowerCurve` + The "original" power curve to be adjusted. + wind_resource : array_like + An iterable containing the wind speeds measured at the site + where this power curve will be applied to calculate + generation. These values are used to calculate the loss + resulting from a transformed power curve compared to the + generation of the original power curve. The input + values should all be non-zero, and the units of + should match the units of the ``power_curve`` input + (typically, m/s). + weights : array_like, optional + An iterable of the same length as ``wind_resource`` + containing weights to apply to each generation value + calculated for the corresponding wind speed. + site : int | str, optional + Site number (gid) for debugging and logging. + By default, ``None``. + """ + + self.power_curve = power_curve + self.wind_resource = np.array(wind_resource) + if weights is None: + self.weights = np.ones_like(self.wind_resource) + else: + self.weights = np.array(weights) + self._power_gen = None + self.site = "[unknown]" if site is None else site + + _validate_arrays_not_empty(self, + array_names=['wind_resource', 'weights']) + self._validate_wind_resource() + self._validate_weights() + + def _validate_wind_resource(self): + """Validate that the input wind resource is non-negative. """ + if not (self.wind_resource >= 0).all(): + msg = ("Invalid wind resource input for site {}: Contains " + "negative values! - {}" + .format(self.site, self.wind_resource)) + msg = msg.format(self.wind_resource) + logger.error(msg) + raise reVLossesValueError(msg) + + def _validate_weights(self): + """Validate that the input weights size matches the wind resource. """ + if self.wind_resource.size != self.weights.size: + msg = ("Invalid weights input: Does not match size of wind " + "resource for site {}! - {} vs {}" + .format(self.site, self.weights.size, + self.wind_resource.size)) + logger.error(msg) + raise reVLossesValueError(msg) + +
[docs] def annual_losses_with_transformed_power_curve( + self, transformed_power_curve + ): + """Calculate the annual losses from a transformed power curve. + + This function uses the wind resource data that the object was + initialized with to calculate the total annual power generation + with a transformed power curve. This generation is compared with + the generation of the original (non-transformed) power curve to + compute the total annual losses as a result of the + transformation. + + Parameters + ---------- + transformed_power_curve : :obj:`PowerCurve` + A transformed power curve. The power generated with this + power curve will be compared with the power generated by the + "original" power curve to calculate annual losses. + + Returns + ------- + float + Total losses (%) as a result of a the power curve + transformation. + """ + power_gen_with_losses = transformed_power_curve(self.wind_resource) + power_gen_with_losses *= self.weights + power_gen_with_losses = power_gen_with_losses.sum() + return (1 - power_gen_with_losses / self.power_gen_no_losses) * 100
+ + def _obj(self, transformation_variable, target, transformation): + """Objective function: |output - target|.""" + new_power_curve = transformation.apply(transformation_variable) + losses = self.annual_losses_with_transformed_power_curve( + new_power_curve + ) + return np.abs(losses - target) + +
[docs] def fit(self, target, transformation): + """Fit a power curve transformation. + + This function fits a transformation to the input power curve + (the one used to initialize the object) to generate an annual + loss percentage closest to the ``target``. The losses are + computed w.r.t the generation of the original (non-transformed) + power curve. + + Parameters + ---------- + target : float + Target value for annual generation losses (%). + transformation : PowerCurveTransformation + A PowerCurveTransformation class representing the power + curve transformation to use. + + Returns + ------- + :obj:`numpy.array` + An array containing a transformed power curve that most + closely yields the ``target`` annual generation losses. + + Warns + ----- + reVLossesWarning + If the fit did not meet the target annual losses to within + 1%. + + Warnings + -------- + This function attempts to find an optimal transformation for the + power curve such that the annual generation losses match the + ``target`` value, but there is no guarantee that a close match + can be found, if it even exists. Therefore, it is possible that + the losses resulting from the transformed power curve will not + match the ``target``. This is especially likely if the + ``target`` is large or if the input power curve and/or wind + resource is abnormal. + """ + transformation = transformation(self.power_curve) + fit_var = minimize_scalar(self._obj, + args=(target, transformation), + bounds=transformation.optm_bounds, + method='bounded').x + + if fit_var > np.max(transformation.bounds): + msg = ('Transformation "{}" for site {} resulted in fit parameter ' + '{} greater than the max bound of {}. Limiting to the max ' + 'bound, but the applied losses may not be correct.' + .format(transformation, self.site, fit_var, + np.max(transformation.bounds))) + logger.warning(msg) + warnings.warn(msg, reVLossesWarning) + fit_var = np.max(transformation.bounds) + + if fit_var < np.min(transformation.bounds): + msg = ('Transformation "{}" for site {} resulted in fit parameter ' + '{} less than the min bound of {}. Limiting to the min ' + 'bound, but the applied losses may not be correct.' + .format(transformation, self.site, fit_var, + np.min(transformation.bounds))) + logger.warning(msg) + warnings.warn(msg, reVLossesWarning) + fit_var = np.min(transformation.bounds) + + error = self._obj(fit_var, target, transformation) + + if error > 1: + new_power_curve = transformation.apply(fit_var) + losses = self.annual_losses_with_transformed_power_curve( + new_power_curve) + msg = ("Unable to find a transformation for site {} such that the " + "losses meet the target within 1%! Obtained fit with loss " + "percentage {}% when target was {}%. Consider using a " + "different transformation or reducing the target losses!" + .format(self.site, losses, target)) + logger.warning(msg) + warnings.warn(msg, reVLossesWarning) + + return transformation.apply(fit_var)
+ + @property + def power_gen_no_losses(self): + """float: Total power generation from original power curve.""" + if self._power_gen is None: + self._power_gen = self.power_curve(self.wind_resource) + self._power_gen *= self.weights + self._power_gen = self._power_gen.sum() + return self._power_gen
+ + +
[docs]class PowerCurveLossesInput: + """Power curve losses specification. + + This class stores and validates information about the desired losses + from a given type of power curve transformation. In particular, the + target loss percentage must be provided. This input is then + validated to be used power curve transformation fitting. + + """ + + REQUIRED_KEYS = {'target_losses_percent'} + """Required keys in the input specification dictionary.""" + + def __init__(self, specs): + """ + + Parameters + ---------- + specs : dict + A dictionary containing specifications for the power curve + losses. This dictionary must contain the following keys: + + - ``target_losses_percent`` + An integer or float value representing the + total percentage of annual energy production that + should be lost due to the power curve transformation. + This value must be in the range [0, 100]. + + The input dictionary can also provide the following optional + keys: + + - ``transformation`` - by default, ``horizontal_translation`` + A string representing the type of transformation to + apply to the power curve. This sting must be one of + the keys of :obj:`TRANSFORMATIONS`. See the relevant + transformation class documentation for detailed + information on that type of power curve + transformation. + + + """ + self._specs = specs + self._transformation_name = self._specs.get('transformation', + 'exponential_stretching') + self._validate() + + def _validate(self): + """Validate the input specs.""" + self._validate_required_keys_exist() + self._validate_transformation() + self._validate_percentage() + + def _validate_required_keys_exist(self): + """Raise an error if any required keys are missing.""" + missing_keys = [n not in self._specs for n in self.REQUIRED_KEYS] + if any(missing_keys): + msg = ("The following required keys are missing from the power " + "curve losses specification: {}" + .format(sorted(missing_keys))) + logger.error(msg) + raise reVLossesValueError(msg) + + def _validate_transformation(self): + """Validate that the transformation exists in TRANSFORMATIONS. """ + if self._transformation_name not in TRANSFORMATIONS: + msg = ("Transformation {!r} not understood! " + "Input must be one of: {} " + .format(self._transformation_name, + list(TRANSFORMATIONS.keys()))) + logger.error(msg) + raise reVLossesValueError(msg) + + def _validate_percentage(self): + """Validate that the percentage is in the range [0, 100]. """ + if not 0 <= self.target <= 100: + msg = ("Percentage of annual energy production loss to be " + "attributed to the power curve transformation must be in " + "the range [0, 100], but got {} for transformation {!r}" + .format(self.target, self._transformation_name)) + logger.error(msg) + raise reVLossesValueError(msg) + + def __repr__(self): + specs = self._specs.copy() + specs.update({'transformation': self._transformation_name}) + specs_as_str = ", ".join(["{}={!r}".format(k, v) + for k, v in specs.items()]) + return "PowerCurveLossesInput({})".format(specs_as_str) + + @property + def target(self): + """int or float: Target loss percentage due to transformation.""" + return self._specs['target_losses_percent'] + + @property + def transformation(self): + """PowerCurveTransformation: Power curve transformation.""" + return TRANSFORMATIONS[self._transformation_name]
+ + +
[docs]class PowerCurveWindResource: + """Wind resource data for calculating power curve shift.""" + + def __init__(self, temperature, pressure, wind_speed): + """Power Curve Wind Resource. + + Parameters + ---------- + temperature : array_like + An iterable representing the temperatures at a single site + (in C). Must be the same length as the `pressure` and + `wind_speed` inputs. + pressure : array_like + An iterable representing the pressures at a single site + (in PA or ATM). Must be the same length as the `temperature` + and `wind_speed` inputs. + wind_speed : array_like + An iterable representing the wind speeds at a single site + (in m/s). Must be the same length as the `temperature` and + `pressure` inputs. + """ + self._temperatures = np.array(temperature) + self._pressures = np.array(pressure) + self._wind_speeds = np.array(wind_speed) + self.wind_speed_weights = None + +
[docs] def wind_resource_for_site(self): + """Extract scaled wind speeds at the resource site. + + Get the wind speeds for this site, accounting for the scaling + done in SAM [1]_ based on air pressure [2]_. These wind speeds + can then be used to sample the power curve and obtain generation + values. + + Returns + ------- + array-like + Array of scaled wind speeds. + + References + ---------- + .. [1] Scaling done in SAM: https://tinyurl.com/2uzjawpe + .. [2] SAM Wind Power Reference Manual for explanations on + generation and air density calculations (pp. 18): + https://tinyurl.com/2p8fjba6 + + """ + if self._pressures.max() < 2: # units are ATM + pressures_pascal = self._pressures * 101325.027383 + elif self._pressures.min() > 1e4: # units are PA + pressures_pascal = self._pressures + else: + msg = ("Unable to determine pressure units: pressure values " + "found in the range {:.2f} to {:.2f}. Please make " + "sure input pressures are in units of PA or ATM" + .format(self._pressures.min(), self._pressures.max())) + logger.error(msg) + raise reVLossesValueError(msg) + + temperatures_K = self._temperatures + 273.15 # originally in celsius + specific_gas_constant_dry_air = 287.058 # units: J / kg / K + sea_level_air_density = 1.225 # units: kg/m**3 at 15 degrees celsius + + site_air_densities = pressures_pascal / (specific_gas_constant_dry_air + * temperatures_K) + weights = (sea_level_air_density / site_air_densities) ** (1 / 3) + return self._wind_speeds / weights
+ + @property + def wind_speeds(self): + """:obj:`numpy.array`: Array of adjusted wind speeds. """ + return self.wind_resource_for_site()
+ + +class _PowerCurveWindDistribution: + """`PowerCurveWindResource` interface mocker for wind distributions. """ + + def __init__(self, speeds, weights): + """Power Curve Wind Resource for Wind Distributions. + + Parameters + ---------- + speeds : array_like + An iterable representing the wind speeds at a single site + (in m/s). Must be the same length as the `weights` input. + weights : array_like + An iterable representing the wind speed weights at a single + site. Must be the same length as the `speeds` input. + """ + self.wind_speeds = np.array(speeds) + self.wind_speed_weights = np.array(weights) + + +
[docs]def adjust_power_curve(power_curve, resource_data, target_losses, site=None): + """Adjust power curve to account for losses. + + This function computes a new power curve that accounts for the + loss percentage specified from the target loss. + + Parameters + ---------- + power_curve : :obj:`PowerCurve` + Power curve to be adjusted to match target losses. + resource_data : :obj:`PowerCurveWindResource` + Resource data for the site being investigated. + target_losses : :obj:`PowerCurveLossesInput` + Target loss and power curve shift info. + site : int | str, optional + Site number (gid) for debugging and logging. + By default, ``None``. + + Returns + ------- + :obj:`PowerCurve` + Power Curve shifted to meet the target losses. Power Curve is + not adjusted if all wind speeds are above the cutout or below + the cutin speed. + + See Also + -------- + :obj:`PowerCurveLosses` : Power curve re-calculation. + """ + site = "[unknown]" if site is None else site + + if (resource_data.wind_speeds <= power_curve.cutin_wind_speed).all(): + msg = ("All wind speeds for site {} are below the wind speed " + "cutin ({} m/s). No power curve adjustments made!" + .format(site, power_curve.cutin_wind_speed)) + logger.warning(msg) + warnings.warn(msg, reVLossesWarning) + return power_curve + + if (resource_data.wind_speeds >= power_curve.cutoff_wind_speed).all(): + msg = ("All wind speeds for site {} are above the wind speed " + "cutoff ({} m/s). No power curve adjustments made!" + .format(site, power_curve.cutoff_wind_speed)) + logger.warning(msg) + warnings.warn(msg, reVLossesWarning) + return power_curve + + pc_losses = PowerCurveLosses(power_curve, resource_data.wind_speeds, + resource_data.wind_speed_weights, site=site) + + logger.debug("Transforming power curve using the {} transformation to " + "meet {}% loss target..." + .format(target_losses.transformation, target_losses.target)) + + new_curve = pc_losses.fit(target_losses.target, + target_losses.transformation) + logger.debug("Transformed power curve: {}".format(list(new_curve))) + return new_curve
+ + +
[docs]class PowerCurveLossesMixin: + """Mixin class for :class:`reV.SAM.generation.AbstractSamWind`. + + Warnings + -------- + Using this class for anything except as a mixin for + :class:`~reV.SAM.generation.AbstractSamWind` may result in + unexpected results and/or errors. + """ + + POWER_CURVE_CONFIG_KEY = 'reV_power_curve_losses' + """Specify power curve loss target in the config file using this key.""" + +
[docs] def add_power_curve_losses(self): + """Adjust power curve in SAM config file to account for losses. + + This function reads the information in the + ``reV_power_curve_losses`` key of the ``sam_sys_inputs`` + dictionary and computes a new power curve that accounts for the + loss percentage specified from that input. If no power curve + loss info is specified in ``sam_sys_inputs``, the power curve + will not be adjusted. + + See Also + -------- + :func:`adjust_power_curve` : Power curve shift calculation. + """ + loss_input = self._user_power_curve_input() + if not loss_input: + return + + resource = self.wind_resource_from_input() + site = getattr(self, 'site', "[unknown]") + new_curve = adjust_power_curve(self.input_power_curve, resource, + loss_input, site=site) + self.sam_sys_inputs['wind_turbine_powercurve_powerout'] = new_curve
+ + def _user_power_curve_input(self): + """Get power curve loss info from config. """ + power_curve_losses_info = self.sam_sys_inputs.pop( + self.POWER_CURVE_CONFIG_KEY, None + ) + if power_curve_losses_info is None: + return + + # site-specific info is input as str + if isinstance(power_curve_losses_info, str): + power_curve_losses_info = json.loads(power_curve_losses_info) + + loss_input = PowerCurveLossesInput(power_curve_losses_info) + if loss_input.target <= 0: + logger.debug("Power curve target loss is 0. Skipping power curve " + "transformation.") + return + + return loss_input + + @property + def input_power_curve(self): + """:obj:`PowerCurve`: Original power curve for site. """ + wind_speed = self.sam_sys_inputs['wind_turbine_powercurve_windspeeds'] + generation = self.sam_sys_inputs['wind_turbine_powercurve_powerout'] + return PowerCurve(wind_speed, generation) + +
[docs] def wind_resource_from_input(self): + """Collect wind resource and weights from inputs. + + Returns + ------- + :obj:`PowerCurveWindResource` + Wind resource used to compute power curve shift. + + Raises + ------ + reVLossesValueError + If power curve losses are not compatible with the + 'wind_resource_model_choice'. + """ + if self['wind_resource_model_choice'] == 0: + temperatures, pressures, wind_speeds, __, = map( + np.array, zip(*self['wind_resource_data']['data']) + ) + return PowerCurveWindResource(temperatures, pressures, wind_speeds) + elif self['wind_resource_model_choice'] == 2: + wrd = np.array(self['wind_resource_distribution']) + return _PowerCurveWindDistribution(wrd[:, 0], wrd[:, -1]) + else: + msg = ("reV power curve losses cannot be used with " + "'wind_resource_model_choice' = {}" + .format(self['wind_resource_model_choice'])) + logger.error(msg) + raise reVLossesValueError(msg)
+ + +
[docs]class AbstractPowerCurveTransformation(ABC): + """Abstract base class for power curve transformations. + + **This class is not meant to be instantiated**. + + This class provides an interface for power curve transformations, + which are meant to more realistically represent certain types of + losses when compared to simple haircut losses (i.e. constant loss + value applied at all points on the power curve). + + If you would like to implement your own power curve transformation, + you should subclass this class and implement the :meth:`apply` + method and the :attr:`bounds` property. See the documentation for + each of these below for more details. + + Attributes + ---------- + power_curve : :obj:`PowerCurve` + The "original" input power curve. + """ + + def __init__(self, power_curve): + """Abstract Power Curve Transformation class. + + Parameters + ---------- + power_curve : :obj:`PowerCurve` + The turbine power curve. This input is treated as the + "original" power curve. + """ + self.power_curve = power_curve + self._transformed_generation = None + + def _validate_non_zero_generation(self, new_curve): + """Ensure new power curve has some non-zero generation.""" + mask = (self.power_curve.wind_speed + <= self.power_curve.cutoff_wind_speed) + min_expected_power_gen = self.power_curve[self.power_curve > 0].min() + if not (new_curve[mask] > min_expected_power_gen).any(): + msg = ("Calculated power curve is invalid. No power generation " + "below the cutoff wind speed ({} m/s) detected. Target " + "loss percentage may be too large! Please try again with " + "a lower target value." + .format(self.power_curve.cutoff_wind_speed)) + logger.error(msg) + raise reVLossesValueError(msg) + + def _validate_same_cutoff(self, new_curve): + """Validate that the new power curve has the same high-wind cutout as + the original curve.""" + old_cut = self.power_curve.cutoff_wind_speed + new_cut = new_curve.cutoff_wind_speed + if old_cut != new_cut: + msg = ('Original power curve windspeed cutout is {}m/s and new ' + 'curve cutout is {}m/s. Something went wrong!' + .format(old_cut, new_cut)) + logger.error(msg) + raise reVLossesValueError(msg) + +
[docs] @abstractmethod + def apply(self, transformation_var): + """Apply a transformation to the original power curve. + + Parameters + ---------- + transformation_var : : float + A single variable controlling the "strength" of the + transformation. The :obj:`PowerCurveLosses` object will + run an optimization using this variable to fit the target + annual losses incurred with the transformed power curve + compared to the original power curve using the given wind + resource distribution. + + Returns + ------- + :obj:`PowerCurve` + An new power curve containing the generation values from the + transformed power curve. + + Raises + ------ + NotImplementedError + If the transformation implementation did not set the + ``_transformed_generation`` attribute. + + Notes + ----- + When implementing a new transformation, override this method and + set the ``_transformed_generation`` protected attribute to be + the generation corresponding to the transformed power curve. + Then, call ``super().apply(transformation_var)`` in order to + apply cutout speed curtailment and validation for the + transformed power curve. For example, here is the implementation + for a transformation that shifts the power curve horizontally:: + + self._transformed_generation = self.power_curve( + self.power_curve.wind_speed - transformation_var + ) + return super().apply(transformation_var) + + """ + if self._transformed_generation is None: + msg = ("Transformation implementation for {}.apply did not set " + "the `_transformed_generation` attribute." + .format(type(self).__name__)) + logger.error(msg) + raise NotImplementedError(msg) + + if not np.isinf(self.power_curve.cutoff_wind_speed): + mask = (self.power_curve.wind_speed + >= self.power_curve.cutoff_wind_speed) + self._transformed_generation[mask] = 0 + new_curve = PowerCurve(self.power_curve.wind_speed, + self._transformed_generation) + self._validate_non_zero_generation(new_curve) + + i_max = np.argmax(self._transformed_generation) + i_cutoff = self.power_curve.i_cutoff + rated_power = self.power_curve.rated_power + self._transformed_generation[i_max:i_cutoff] = rated_power + + new_curve = PowerCurve(self.power_curve.wind_speed, + self._transformed_generation) + self._validate_non_zero_generation(new_curve) + self._validate_same_cutoff(new_curve) + return new_curve
+ + @property + @abstractmethod + def bounds(self): + """tuple: true Bounds on the ``transformation_var``.""" + + @property + def optm_bounds(self): + """Bounds for scipy optimization, sometimes more generous than the + actual fit parameter bounds which are enforced after the + optimization.""" + return self.bounds
+ + +
[docs]class HorizontalTranslation(AbstractPowerCurveTransformation): + """Utility for applying horizontal power curve translations. + + The mathematical representation of this transformation is: + + .. math:: P_{transformed}(u) = P_{original}(u - t), + + where :math:`P_{transformed}` is the transformed power curve, + :math:`P_{original}` is the original power curve, :math:`u` is + the wind speed, and :math:`t` is the transformation variable + (horizontal translation amount). + + This kind of power curve transformation is simplistic, and should + only be used for a small handful of applicable turbine losses + (i.e. blade degradation). See ``Warnings`` for more details. + + The losses in this type of transformation are distributed primarily + across region 2 of the power curve (the steep, almost linear, + portion where the generation rapidly increases): + + .. image:: ../../../examples/rev_losses/horizontal_translation.png + :align: center + + Attributes + ---------- + power_curve : :obj:`PowerCurve` + The "original" input power curve. + + Warnings + -------- + This kind of power curve translation is not generally realistic. + Using this transformation as a primary source of losses (i.e. many + different kinds of losses bundled together) is extremely likely to + yield unrealistic results! + """ + +
[docs] def apply(self, transformation_var): + """Apply a horizontal translation to the original power curve. + + This function shifts the original power curve horizontally, + along the "wind speed" (x) axis, by the given amount. Any power + above the cutoff speed (if one was detected) is truncated after + the transformation. + + Parameters + ---------- + transformation_var : float + The amount to shift the original power curve by, in wind + speed units (typically, m/s). + + Returns + ------- + :obj:`PowerCurve` + An new power curve containing the generation values from the + shifted power curve. + """ + self._transformed_generation = self.power_curve( + self.power_curve.wind_speed - transformation_var + ) + return super().apply(transformation_var)
+ + @property + def bounds(self): + """tuple: true Bounds on the power curve shift (different from the + optimization boundaries)""" + min_ind = np.where(self.power_curve)[0][0] + max_ind = np.where(self.power_curve[::-1])[0][0] + max_shift = (self.power_curve.wind_speed[-max_ind - 1] + - self.power_curve.wind_speed[min_ind]) + return (0, max_shift)
+ + +
[docs]class LinearStretching(AbstractPowerCurveTransformation): + """Utility for applying a linear stretch to the power curve. + + The mathematical representation of this transformation is: + + .. math:: P_{transformed}(u) = P_{original}(u/t), + + where :math:`P_{transformed}` is the transformed power curve, + :math:`P_{original}` is the original power curve, :math:`u` is + the wind speed, and :math:`t` is the transformation variable + (wind speed multiplier). + + The losses in this type of transformation are distributed primarily + across regions 2 and 3 of the power curve. In particular, losses are + smaller for wind speeds closer to the cut-in speed, and larger for + speeds close to rated power: + + .. image:: ../../../examples/rev_losses/linear_stretching.png + :align: center + + Attributes + ---------- + power_curve : :obj:`PowerCurve` + The "original" input power curve. + """ + +
[docs] def apply(self, transformation_var): + """Apply a linear stretch to the original power curve. + + This function stretches the original power curve along the + "wind speed" (x) axis. Any power above the cutoff speed (if one + was detected) is truncated after the transformation. + + Parameters + ---------- + transformation_var : float + The linear multiplier of the wind speed scaling. + + Returns + ------- + :obj:`PowerCurve` + An new power curve containing the generation values from the + shifted power curve. + """ + self._transformed_generation = self.power_curve( + self.power_curve.wind_speed / transformation_var + ) + return super().apply(transformation_var)
+ + @property + def bounds(self): + """tuple: true Bounds on the wind speed multiplier (different from the + optimization boundaries)""" + min_ind_pc = np.where(self.power_curve)[0][0] + min_ind_ws = np.where(self.power_curve.wind_speed > 1)[0][0] + min_ws = self.power_curve.wind_speed[max(min_ind_pc, min_ind_ws)] + max_ws = min(self.power_curve.wind_speed.max(), + self.power_curve.cutoff_wind_speed) + max_multiplier = np.ceil(max_ws / min_ws) + return (1, max_multiplier)
+ + +
[docs]class ExponentialStretching(AbstractPowerCurveTransformation): + """Utility for applying an exponential stretch to the power curve. + + The mathematical representation of this transformation is: + + .. math:: P_{transformed}(u) = P_{original}(u^{1/t}), + + where :math:`P_{transformed}` is the transformed power curve, + :math:`P_{original}` is the original power curve, :math:`u` is + the wind speed, and :math:`t` is the transformation variable + (wind speed exponent). + + The losses in this type of transformation are distributed primarily + across regions 2 and 3 of the power curve. In particular, losses are + smaller for wind speeds closer to the cut-in speed, and larger for + speeds close to rated power: + + .. image:: ../../../examples/rev_losses/exponential_stretching.png + :align: center + + Attributes + ---------- + power_curve : :obj:`PowerCurve` + The "original" input power curve. + """ + +
[docs] def apply(self, transformation_var): + """Apply an exponential stretch to the original power curve. + + This function stretches the original power curve along the + "wind speed" (x) axis. Any power above the cutoff speed (if one + was detected) is truncated after the transformation. + + Parameters + ---------- + transformation_var : float + The exponent of the wind speed scaling. + + Returns + ------- + :obj:`PowerCurve` + An new power curve containing the generation values from the + shifted power curve. + """ + self._transformed_generation = self.power_curve( + self.power_curve.wind_speed ** (1 / transformation_var) + ) + return super().apply(transformation_var)
+ + @property + def bounds(self): + """tuple: Bounds on the wind speed exponent.""" + min_ind_pc = np.where(self.power_curve)[0][0] + min_ind_ws = np.where(self.power_curve.wind_speed > 1)[0][0] + min_ws = self.power_curve.wind_speed[max(min_ind_pc, min_ind_ws)] + max_ws = min(self.power_curve.wind_speed.max(), + self.power_curve.cutoff_wind_speed) + max_exponent = np.ceil(np.log(max_ws) / np.log(min_ws)) + return (1, max_exponent) + + @property + def optm_bounds(self): + """Bounds for scipy optimization, sometimes more generous than the + actual fit parameter bounds which are enforced after the + optimization.""" + return (0.5, self.bounds[1])
+ + +TRANSFORMATIONS = { + 'horizontal_translation': HorizontalTranslation, + 'linear_stretching': LinearStretching, + 'exponential_stretching': ExponentialStretching +} +"""Implemented power curve transformations.""" +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/losses/scheduled.html b/_modules/reV/losses/scheduled.html new file mode 100644 index 000000000..b1a7ecf26 --- /dev/null +++ b/_modules/reV/losses/scheduled.html @@ -0,0 +1,1249 @@ + + + + + + reV.losses.scheduled — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.losses.scheduled

+# -*- coding: utf-8 -*-
+"""reV scheduled losses module.
+
+"""
+import logging
+import warnings
+import json
+
+import numpy as np
+
+from reV.losses.utils import (convert_to_full_month_names,
+                              filter_unknown_month_names,
+                              hourly_indices_for_months)
+from reV.utilities.exceptions import reVLossesValueError, reVLossesWarning
+
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class Outage: + """A specific type of outage. + + This class stores and validates information about a single type of + outage. In particular, the number of outages, duration, percentage + of farm down, and the allowed months for scheduling the outage + must all be provided. These inputs are then validated so that they + can be used in instances of scheduling objects. + + """ + + REQUIRED_KEYS = {'count', + 'duration', + 'percentage_of_capacity_lost', + 'allowed_months'} + """Required keys in the input specification dictionary.""" + + def __init__(self, specs): + """ + + Parameters + ---------- + specs : dict + A dictionary containing specifications for this outage. This + dictionary must contain the following keys: + + - `count` + An integer value representing the total number of + times this outage should be scheduled. This number + should be larger than 0. + - `duration` + An integer value representing the total number of + consecutive hours that this outage should take. This + value must be larger than 0 and less than the number + of hours in the allowed months. + - `percentage_of_capacity_lost` + An integer or float value representing the total + percentage of the total capacity that will be lost + for the duration of the outage. This value must be + in the range (0, 100]. + - `allowed_months` + A list of month names corresponding to the allowed + months for the scheduled outages. Month names can be + unformatted and can be specified using 3-letter + month abbreviations. + + The input dictionary can also provide the following optional + keys: + + - `allow_outage_overlap` - by default, ``True`` + A bool flag indicating whether or not this outage is + allowed to overlap with other outages, including + itself. It is recommended to set this value to + ``True`` whenever possible, as it allows for more + flexible scheduling. + - `name` - by default, string containing init parameters + A unique name for the outage, used for more + descriptive error messages. + + """ + self._specs = specs + self._full_month_names = None + self._total_available_hours = None + self._name = None + self._validate() + + def _validate(self): + """Validate the input specs.""" + self._validate_required_keys_exist() + self._validate_count() + self._validate_and_convert_to_full_name_months() + self._validate_duration() + self._validate_percentage() + + def _validate_required_keys_exist(self): + """Raise an error if any required keys are missing.""" + missing_keys = [n for n in self.REQUIRED_KEYS if n not in self._specs] + if any(missing_keys): + msg = ("The following required keys are missing from the Outage " + "specification: {}".format(sorted(missing_keys))) + logger.error(msg) + raise reVLossesValueError(msg) + + def _validate_count(self): + """Validate that the total number of outages is an integer. """ + if not isinstance(self.count, int): + msg = ("Number of outages must be an integer, but got {} for {}" + .format(self.count, self.name)) + logger.error(msg) + raise reVLossesValueError(msg) + + if self.count < 1: + msg = ("Number of outages must be greater than 0, but got " + "{} for {}".format(self.count, self.name)) + logger.error(msg) + raise reVLossesValueError(msg) + + def _validate_and_convert_to_full_name_months(self): + """Validate month input and convert to full month names. """ + months = convert_to_full_month_names(self._specs['allowed_months']) + known_months, unknown_months = filter_unknown_month_names(months) + + if unknown_months: + msg = ("The following month names were not understood: {}. Please " + "use either the full month name or the standard 3-letter " + "month abbreviation. For more info, see the month name " + "documentation for the python standard package `calendar`." + .format(unknown_months)) + logger.warning(msg) + warnings.warn(msg, reVLossesWarning) + + if not known_months: + msg = ("No known month names were provided! Please use either the " + "full month name or the standard 3-letter month " + "abbreviation. For more info, see the month name " + "documentation for the python standard package `calendar`. " + "Received input: {!r}" + .format(self._specs['allowed_months'])) + logger.error(msg) + raise reVLossesValueError(msg) + + self._full_month_names = list(set(known_months)) + + def _validate_duration(self): + """Validate that the duration is between 0 and the max total. """ + if not isinstance(self.duration, int): + msg = ("Duration must be an integer number of hours, " + "but got {} for {}".format(self.duration, self.name)) + logger.error(msg) + raise reVLossesValueError(msg) + + if not 1 <= self.duration <= self.total_available_hours: + msg = ("Duration of outage must be between 1 and the total " + "available hours based on allowed month input ({} for " + "a total hour count of {}), but got {} for {}" + .format(self.allowed_months, self.total_available_hours, + self.percentage_of_capacity_lost, self.name)) + logger.error(msg) + raise reVLossesValueError(msg) + + def _validate_percentage(self): + """Validate that the percentage is in the range (0, 100]. """ + if not 0 < self.percentage_of_capacity_lost <= 100: + msg = ("Percentage of farm down during outage must be in the " + "range (0, 100], but got {} for {}" + .format(self.percentage_of_capacity_lost, self.name)) + logger.error(msg) + raise reVLossesValueError(msg) + + def __repr__(self): + return "Outage({!r})".format(self._specs) + + def __str__(self): + if self._name is None: + self._name = self._specs.get('name') or self._default_name() + return self._name + + def _default_name(self): + """Generate a default name for the outage.""" + specs = self._specs.copy() + specs.update({'allowed_months': self.allowed_months, + 'allow_outage_overlap': self.allow_outage_overlap}) + specs_as_str = ", ".join(["{}={}".format(k, v) + for k, v in specs.items()]) + return "Outage({})".format(specs_as_str) + + @property + def count(self): + """int: Total number of times outage should be scheduled.""" + return self._specs['count'] + + @property + def duration(self): + """int: Total number of consecutive hours per outage.""" + return self._specs['duration'] + + @property + def percentage_of_capacity_lost(self): + """int | float: Percent of capacity taken down per outage.""" + return self._specs['percentage_of_capacity_lost'] + + @property + def allowed_months(self): + """list: Months during which outage can be scheduled.""" + return self._full_month_names + + @property + def allow_outage_overlap(self): + """bool: Indicator for overlap with other outages.""" + return self._specs.get('allow_outage_overlap', True) + + @property + def name(self): + """str: Name of the outage.""" + return self._specs.get('name', str(self)) + + @property + def total_available_hours(self): + """int: Total number of hours available based on allowed months.""" + if self._total_available_hours is None: + self._total_available_hours = len( + hourly_indices_for_months(self.allowed_months)) + return self._total_available_hours
+ + +
[docs]class OutageScheduler: + """A scheduler for multiple input outages. + + Given a list of information about different types of desired + outages, this class leverages the stochastic scheduling routines of + :class:`SingleOutageScheduler` to calculate the total losses due to + the input outages on an hourly basis. + + Attributes + ---------- + outages : :obj:`list` of :obj:`Outages <Outage>` + The user-provided list of :obj:`Outages <Outage>` containing + info about all types of outages to be scheduled. + seed : :obj:`int` + The seed value used to seed the random generator in order + to produce random but reproducible losses. This is useful + for ensuring that stochastically scheduled losses vary + between different sites (i.e. that randomly scheduled + outages in two different location do not match perfectly on + an hourly basis). + total_losses : :obj:`np.array` + An array (of length 8760) containing the per-hour total loss + percentage resulting from the stochastically scheduled outages. + This array contains only zero values before the + :meth:`~OutageScheduler.calculate` method is run. + can_schedule_more : :obj:`np.array` + A boolean array (of length 8760) indicating wether or not more + losses can be scheduled for a given hour. This array keeps track + of all the scheduling conflicts between input outages. + + Warnings + -------- + It is possible that not all outages input by the user will be + scheduled. This can happen when there is not enough time allowed + for all of the input outages. To avoid this issue, always be sure to + allow a large enough month range for long outages that take up a big + portion of the farm and try to allow outage overlap whenever + possible. + + See Also + -------- + :class:`SingleOutageScheduler` : Single outage scheduler. + :class:`Outage` : Specifications for a single outage. + """ + + def __init__(self, outages, seed=0): + """ + Parameters + ---------- + outages : list of :obj:`Outages <Outage>` + A list of :obj:`Outages <Outage>`, where each :obj:`Outage` + contains info about a single type of outage. See the + documentation of :class:`Outage` for a description of the + required keys of each outage dictionary. + seed : int, optional + An integer value used to seed the random generator in order + to produce random but reproducible losses. This is useful + for ensuring that stochastically scheduled losses vary + between different sites (i.e. that randomly scheduled + outages in two different location do not match perfectly on + an hourly basis). By default, the seed is set to 0. + """ + self.outages = outages + self.seed = seed + self.total_losses = np.zeros(8760) + self.can_schedule_more = np.full(8760, True) + +
[docs] def calculate(self): + """Calculate total losses from stochastically scheduled outages. + + This function calls :meth:`SingleOutageScheduler.calculate` + on every outage input (sorted by largest duration and then + largest number of outages) and returns the aggregate the losses + from the result. + + Returns + ------- + :obj:`np.array` + An array (of length 8760) containing the per-hour total loss + percentage resulting from the stochastically scheduled + outages. + """ + sorted_outages = sorted(self.outages, + key=lambda o: (o.duration, + o.count, + o.percentage_of_capacity_lost, + sum(sum(map(ord, name)) + for name + in o.allowed_months), + o.allow_outage_overlap)) + for outage in sorted_outages[::-1]: + self.seed += 1 + SingleOutageScheduler(outage, self).calculate() + return self.total_losses
+ + +
[docs]class SingleOutageScheduler: + """A scheduler for a single outage. + + Given information about a single type of outage, this class + facilitates the (randomized) scheduling of all requested instances + of the outage. See :meth:`SingleOutageScheduler.calculate` for + specific details about the scheduling process. + + Attributes + ---------- + outage : :obj:`Outage` + The user-provided :obj:`Outage` containing info about the outage + to be scheduled. + scheduler : :obj:`OutageScheduler` + A scheduler object that keeps track of the total hourly losses + from the input outage as well as any other outages it has + already scheduled. + can_schedule_more : :obj:`np.array` + A boolean array (of length 8760) indicating wether or not more + losses can be scheduled for a given hour. This is specific + to the input outage only. + + Warnings + -------- + It is possible that not all outages input by the user can be + scheduled. This can happen when there is not enough time allowed + for all of the input outages. To avoid this issue, always be sure to + allow a large enough month range for long outages that take up a big + portion of the farm and try to allow outage overlap whenever + possible. + + See Also + -------- + :class:`OutageScheduler` : Scheduler for multiple outages. + :class:`Outage` : Specifications for a single outage. + """ + + MAX_ITER = 10_000 + """Max number of extra attempts to schedule outages.""" + + def __init__(self, outage, scheduler): + """ + + Parameters + ---------- + outage : Outage + An outage object containing info about the outage to be + scheduled. + scheduler : OutageScheduler + A scheduler object that keeps track of the total hourly + losses from the input outage as well as any other outages + it has already scheduled. + """ + self.outage = outage + self.scheduler = scheduler + self.can_schedule_more = np.full(8760, False) + self._scheduled_outage_inds = [] + +
[docs] def calculate(self): + """Calculate losses from stochastically scheduled outages. + + This function attempts to schedule outages according to the + specification provided in the :obj:`Outage` input. Specifically, + it checks the available hours based on the main + :obj:`Scheduler <OutageScheduler>` (which may have other outages + already scheduled) and attempts to randomly add new outages with + the specified duration and percent of losses. The function + terminates when the desired number of outages (specified by + :attr:`Outage.count`) have been successfully scheduled, or when + the number of attempts exceeds + :attr:`~SingleOutageScheduler.MAX_ITER` + :attr:`Outage.count`. + + Warns + ----- + reVLossesWarning + If the number of requested outages could not be scheduled. + """ + self.update_when_can_schedule_from_months() + + for iter_ind in range(self.outage.count + self.MAX_ITER): + self.update_when_can_schedule() + if not self.can_schedule_more.any(): + break + seed = self.scheduler.seed + iter_ind + outage_slice = self.find_random_outage_slice(seed=seed) + if self.can_schedule_more[outage_slice].all(): + self.schedule_losses(outage_slice) + if len(self._scheduled_outage_inds) == self.outage.count: + break + + if len(self._scheduled_outage_inds) < self.outage.count: + if len(self._scheduled_outage_inds) == 0: + msg_start = "Could not schedule any requested outages" + else: + msg_start = ("Could only schedule {} out of {} requested " + "outages" + .format(len(self._scheduled_outage_inds), + self.outage.count)) + msg = ("{} after a max of {:,} iterations. You are likely " + "attempting to schedule a lot of long outages or a lot " + "of short outages with a large percentage of the farm at " + "a time. Please adjust the outage specifications and try " + "again" + .format(msg_start, self.outage.count + self.MAX_ITER)) + logger.warning(msg) + warnings.warn(msg, reVLossesWarning)
+ +
[docs] def update_when_can_schedule_from_months(self): + """ + Update :attr:`can_schedule_more` using :attr:`Outage.allowed_months`. + + This function sets the :attr:`can_schedule_more` bool array to + `True` for all of the months in :attr:`Outage.allowed_months`. + """ + inds = hourly_indices_for_months(self.outage.allowed_months) + self.can_schedule_more[inds] = True
+ +
[docs] def update_when_can_schedule(self): + """Update :attr:`can_schedule_more` using :obj:`OutageScheduler`. + + This function sets the :attr:`can_schedule_more` bool array to + `True` wherever :attr:`OutageScheduler.can_schedule_more` is + also `True` and wherever the losses from this outage would not + cause the :attr:`OutageScheduler.total_losses` to exceed 100%. + """ + self.can_schedule_more &= self.scheduler.can_schedule_more + if self.outage.allow_outage_overlap: + total_new_losses = (self.scheduler.total_losses + + self.outage.percentage_of_capacity_lost) + losses_will_not_exceed_100 = total_new_losses <= 100 + self.can_schedule_more &= losses_will_not_exceed_100 + else: + self.can_schedule_more &= self.scheduler.total_losses == 0
+ +
[docs] def find_random_outage_slice(self, seed=None): + """Find a random slot of time for this type of outage. + + This function randomly selects a starting time for this outage + given the allowed times in :attr:`can_schedule_more`. It does + **not** verify that the outage can be scheduled for the entire + requested duration. + + Parameters + ---------- + seed : int, optional + Integer used to seed the :func:`np.random.choice` call. + If :obj:`None`, seed is not used. + + Returns + ------- + :obj:`slice` + A slice corresponding to the random slot of time for this + type of outage. + """ + if seed is not None: + np.random.seed(seed) + outage_ind = np.random.choice(np.where(self.can_schedule_more)[0]) + return slice(outage_ind, outage_ind + self.outage.duration)
+ +
[docs] def schedule_losses(self, outage_slice): + """Schedule the input outage during the given slice of time. + + Given a slice in the hourly loss array, add the losses from this + outage (which is equivalent to scheduling them). + + Parameters + ---------- + outage_slice : slice + A slice corresponding to the slot of time to schedule this + outage. + """ + self._scheduled_outage_inds.append(outage_slice.start) + self.scheduler.total_losses[outage_slice] += ( + self.outage.percentage_of_capacity_lost) + if not self.outage.allow_outage_overlap: + self.scheduler.can_schedule_more[outage_slice] = False
+ + +
[docs]class ScheduledLossesMixin: + """Mixin class for :class:`reV.SAM.generation.AbstractSamGeneration`. + + Warning + ------- + Using this class for anything except as a mixin for + :class:`~reV.SAM.generation.AbstractSamGeneration` may result in + unexpected results and/or errors. + """ + + OUTAGE_CONFIG_KEY = 'reV_outages' + """Specify outage information in the config file using this key.""" + OUTAGE_SEED_CONFIG_KEY = 'reV_outages_seed' + """Specify a randomizer seed in the config file using this key.""" + +
[docs] def add_scheduled_losses(self, resource=None): + """Add stochastically scheduled losses to SAM config file. + + This function reads the information in the ``reV_outages`` key + of the ``sam_sys_inputs`` dictionary and computes stochastically + scheduled losses from that input. If the value for + ``reV_outages`` is a string, it must have been generated by + calling :func:`json.dumps` on the list of dictionaries + containing outage specifications. Otherwise, the outage + information is expected to be a list of dictionaries containing + outage specifications. See :class:`Outage` for a description of + the specifications allowed for each outage. The scheduled losses + are passed to SAM via the ``hourly`` key to signify which hourly + capacity factors should be adjusted with outage losses. If no + outage info is specified in ``sam_sys_inputs``, no scheduled + losses are added. + + Parameters + ---------- + resource : pd.DataFrame, optional + Time series resource data for a single location with a + pandas DatetimeIndex. The ``year`` value of the index will + be used to seed the stochastically scheduled losses. If + `None`, no yearly seed will be used. + + See Also + -------- + :class:`Outage` : Single outage specification. + + Notes + ----- + The scheduled losses are passed to SAM via the ``hourly`` key to + signify which hourly capacity factors should be adjusted with + outage losses. If the user specifies other hourly adjustment + factors via the ``hourly`` key, the effect is combined. For + example, if the user inputs a 33% hourly adjustment factor and + reV schedules an outage for 70% of the farm down for the same + hour, then the resulting adjustment factor is + + .. math: 1 - [(1 - 70/100) * (1 - 33/100)] = 0.799 + + This means the generation will be reduced by ~80%, because the + user requested 33% losses for the 30% the farm that remained + operational during the scheduled outage (i.e. 20% remaining of + the original generation). + """ + + outages = self._user_outage_input() + if not outages: + return + + self._set_base_seed(resource) + + logger.debug("Adding the following stochastically scheduled outages: " + "{}".format(outages)) + logger.debug("Scheduled outages seed: {}".format(self.outage_seed)) + + scheduler = OutageScheduler(outages, seed=self.outage_seed) + hourly_outages = scheduler.calculate() + self._add_outages_to_sam_inputs(hourly_outages) + + logger.debug("Hourly adjustment factors after scheduled outages: {}" + .format(list(self.sam_sys_inputs['hourly'])))
+ + def _user_outage_input(self): + """Get outage and seed info from config. """ + outage_specs = self.sam_sys_inputs.pop(self.OUTAGE_CONFIG_KEY, None) + if outage_specs is None: + return + + # site-specific info is input as str + if isinstance(outage_specs, str): + outage_specs = json.loads(outage_specs) + + outages = [Outage(spec) for spec in outage_specs] + return outages + + def _set_base_seed(self, resource): + """Set the base seed base don user input. """ + self.__base_seed = 0 + if resource is not None: + self.__base_seed += int(resource.index.year.values[0]) + self.__base_seed += self.sam_sys_inputs.pop( + self.OUTAGE_SEED_CONFIG_KEY, 0) + + def _add_outages_to_sam_inputs(self, outages): + """Add the hourly adjustment factors to config, checking user input.""" + + hourly_mult = 1 - outages / 100 + + user_hourly_input = self.sam_sys_inputs.pop('hourly', [0] * 8760) + user_hourly_mult = 1 - np.array(user_hourly_input) / 100 + + final_hourly_mult = hourly_mult * user_hourly_mult + self.sam_sys_inputs['hourly'] = (1 - final_hourly_mult) * 100 + + @property + def outage_seed(self): + """int: A value to use as the seed for the outage losses. """ + # numpy seeds must be between 0 and 2**32 - 1 + return self._seed_from_inputs() % 2**32 + + def _seed_from_inputs(self): + """Get seed value from inputs. """ + try: + return int(self.meta.name) + self.__base_seed + except (AttributeError, TypeError, ValueError): + pass + + try: + return hash(tuple(self.meta)) + self.__base_seed + except (AttributeError, TypeError): + pass + + return self.__base_seed
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/losses/utils.html b/_modules/reV/losses/utils.html new file mode 100644 index 000000000..42405c684 --- /dev/null +++ b/_modules/reV/losses/utils.html @@ -0,0 +1,878 @@ + + + + + + reV.losses.utils — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.losses.utils

+# -*- coding: utf-8 -*-
+"""reV-losses utilities.
+
+"""
+import calendar
+import logging
+
+import numpy as np
+
+from reV.utilities.exceptions import reVLossesValueError
+
+logger = logging.getLogger(__name__)
+
+# 1900 is just a representative year, since a year input is required
+DAYS_PER_MONTH = [calendar.monthrange(1900, i)[1] for i in range(1, 13)]
+FIRST_DAY_INDEX_OF_MONTH = np.cumsum([0] + DAYS_PER_MONTH[:-1])
+
+
+
[docs]def convert_to_full_month_names(month_names): + """Format an iterable of month names to match those in :mod:`calendar`. + + This function will format each input name to match the formatting + in :obj:`calendar.month_name` (upper case, no extra whitespace), and + it will convert all abbreviations to full month names. No other + assumptions are made about the inputs, so an input string " abc " + will get formatted and passed though as "Abc". + + Parameters + ---------- + month_names : iter + An iterable of strings representing the input month names. + Month names can be unformatted and contain 3-letter month + abbreviations. + + Returns + ------- + :obj:`list` + A list of month names matching the formatting of + :obj:`calendar.month_name` (upper case, no extra whitespace). + Abbreviations are also converted to a full month name. + + Examples + -------- + >>> input_names = ['March', ' aprIl ', 'Jun', 'jul', ' abc '] + >>> convert_to_full_month_names(input_names) + ['March', 'April', 'June', 'July', 'Abc'] + """ + formatted_names = [] + for name in month_names: + month_name = format_month_name(name) + month_name = full_month_name_from_abbr(month_name) or month_name + formatted_names.append(month_name) + return formatted_names
+ + +
[docs]def filter_unknown_month_names(month_names): + """Split the input into known and unknown month names. + + Parameters + ---------- + month_names : iter + An iterable of strings representing the input month names. Month + names must match the formatting in :obj:`calendar.month_name` + (upper case, no extra whitespace), otherwise they will be placed + into the ``unknown_months`` return list. + + Returns + ------- + known_months : :obj:`list` + List of known month names. + unknown_months : :obj:`list` + List of unknown month names. + """ + known_months, unknown_months = [], [] + for name in month_names: + if name in calendar.month_name: + known_months.append(name) + else: + unknown_months.append(name) + + return known_months, unknown_months
+ + +
[docs]def hourly_indices_for_months(month_names): + """Convert month names into a list of hourly indices. + + Given a list of month names, this function will return a list + of indices such that any index value corresponds to an hour within + the input months. + + Parameters + ---------- + month_names : iter + An iterable of month names for the desired starting indices. + The month names must match the formatting in + :obj:`calendar.month_name` (upper case, no extra whitespace), + otherwise their hourly indices will not be included in the + output. + + Returns + ------- + :obj:`list` + A list of hourly index values such that any index corresponds to + an hour within the input months. + """ + + indices = [] + for ind in sorted(month_indices(month_names)): + start_index = FIRST_DAY_INDEX_OF_MONTH[ind] * 24 + hours_in_month = DAYS_PER_MONTH[ind] * 24 + indices += list(range(start_index, start_index + hours_in_month)) + + return indices
+ + +
[docs]def month_indices(month_names): + """Convert input month names to an indices (0-11) of the months. + + Parameters + ---------- + month_names : iter + An iterable of month names for the desired starting indices. + The month names must match the formatting in + :obj:`calendar.month_name` (upper case, no extra whitespace), + otherwise their index will not be included in the output. + + Returns + ------- + :obj:`set` + A set of month indices for the input month names. Unknown + month indices (-1) are removed. + """ + return {month_index(name) for name in month_names} - {-1}
+ + +
[docs]def month_index(month_name): + """Convert a month name (as string) to an index (0-11) of the month. + + Parameters + ---------- + month_name : str + Name of month to corresponding to desired index. This input + must match the formatting in :obj:`calendar.month_name` + (upper case, no extra whitespace). + + Returns + ------- + :obj:`int` + The 0-index of the month, or -1 if the month name is not + understood. + + Examples + -------- + >>> month_index("June") + 5 + >>> month_index("July") + 6 + >>> month_index("Jun") + -1 + >>> month_index("july") + -1 + """ + for month_ind in range(12): + if calendar.month_name[month_ind + 1] == month_name: + return month_ind + + return -1
+ + +
[docs]def format_month_name(month_name): + """Format a month name to match the names in the :mod:`calendar` module. + + In particular, any extra spaces at the beginning or end of the + string are stripped, and the name is converted to a title (first + letter is uppercase). + + Parameters + ---------- + month_name : str + Name of month. + + Returns + ------- + :obj:`str` + Name of month, formatted to match the month names in the + :mod:`calendar` module. + + Examples + -------- + >>> format_month_name("June") + "June" + >>> format_month_name("aprIl") + "April" + >>> format_month_name(" aug ") + "Aug" + """ + return month_name.strip().title()
+ + +
[docs]def full_month_name_from_abbr(month_name): + """Convert a month abbreviation to a full month name. + + Parameters + ---------- + month_name : str + Abbreviated month name. Must be one of: + + - "Jan" + - "Feb" + - "Mar" + - "Apr" + - "May" + - "Jun" + - "Jul" + - "Aug" + - "Sep" + - "Oct" + - "Nov" + - "Dec" + + If the input does not match one of these, this function returns + :obj:`None`. + + + Returns + ------- + :obj:`str` | :obj:`None` + Unabbreviated month name, or :obj:`None` if input abbreviation + is not understood. + + Examples + -------- + >>> full_month_name_from_abbr("Jun") + "June" + >>> full_month_name_from_abbr("June") is None + True + >>> full_month_name_from_abbr('Abcdef') is None + True + """ + for month_index in range(1, 13): + if calendar.month_abbr[month_index] == month_name: + return calendar.month_name[month_index]
+ + +def _validate_arrays_not_empty(obj, array_names=None): + """Validate that the input data arrays are not empty. """ + array_names = array_names or [] + for name in array_names: + try: + arr = getattr(obj, name) + except AttributeError: + continue + if not arr.size: + msg = "Invalid {} input: Array is empty! - {}" + msg = msg.format(name.replace('_', ' '), arr) + logger.error(msg) + raise reVLossesValueError(msg) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/nrwal/nrwal.html b/_modules/reV/nrwal/nrwal.html new file mode 100644 index 000000000..41a65767e --- /dev/null +++ b/_modules/reV/nrwal/nrwal.html @@ -0,0 +1,1471 @@ + + + + + + reV.nrwal.nrwal — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.nrwal.nrwal

+# -*- coding: utf-8 -*-
+# pylint: disable=no-member
+"""
+reV-NRWAL analysis module.
+
+This module runs reV data through the NRWAL compute library. This code was
+first developed to use a custom offshore wind LCOE equation library but has
+since been refactored to analyze any equation library in NRWAL.
+
+Everything in this module operates on the spatiotemporal resolution of the reV
+generation output file. This is usually the wind or solar resource resolution
+but could be the supply curve resolution after representative profiles is run.
+"""
+import numpy as np
+import pandas as pd
+import logging
+from warnings import warn
+
+from reV.generation.generation import Gen
+from reV.handlers.outputs import Outputs
+from reV.utilities.exceptions import (DataShapeError,
+                                      OffshoreWindInputWarning,
+                                      OffshoreWindInputError)
+from reV.utilities import log_versions
+
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class RevNrwal: + """RevNrwal""" + + DEFAULT_META_COLS = ('config', ) + """Columns from the `site_data` table to join to the output meta data""" + + def __init__(self, gen_fpath, site_data, sam_files, nrwal_configs, + output_request, save_raw=True, meta_gid_col='gid', + site_meta_cols=None): + """Framework to handle reV-NRWAL analysis. + + ``reV`` NRWAL analysis runs ``reV`` data through the NRWAL + compute library. Everything in this module operates on the + spatiotemporal resolution of the ``reV`` generation output file + (usually the wind or solar resource resolution but could also be + the supply curve resolution after representative profiles is + run). + + Parameters + ---------- + gen_fpath : str + Full filepath to HDF5 file with ``reV`` generation or + rep_profiles output. Anything in the `output_request` input + is added to and/or manipulated within this file. + + .. Note:: If executing ``reV`` from the command line, this + input can also be ``"PIPELINE"`` to parse the output of + one of the previous step and use it as input to this call. + However, note that duplicate executions of ``reV`` + commands prior to this one within the pipeline may + invalidate this parsing, meaning the `gen_fpath` input + will have to be specified manually. + + site_data : str | pd.DataFrame + Site-specific input data for NRWAL calculation.If this input + is a string, it should be a path that points to a CSV file. + Otherwise, this input should be a DataFrame with + pre-extracted site data. Rows in this table should match + the `meta_gid_col` in the `gen_fpath` meta data input + sites via a ``gid`` column. A ``config`` column must also be + provided that corresponds to the `nrwal_configs` input. Only + sites with a gid in this file's ``gid`` column will be run + through NRWAL. + sam_files : dict | str + A dictionary mapping SAM input configuration ID(s) to SAM + configuration(s). Keys are the SAM config ID(s) which + correspond to the keys in the `nrwal_configs` input. Values + for each key are either a path to a corresponding SAM + config file or a full dictionary of SAM config inputs. For + example:: + + sam_files = { + "default": "/path/to/default/sam.json", + "onshore": "/path/to/onshore/sam_config.yaml", + "offshore": { + "sam_key_1": "sam_value_1", + "sam_key_2": "sam_value_2", + ... + }, + ... + } + + This input can also be a string pointing to a single SAM + config file. In this case, the ``config`` column of the + CSV points input should be set to ``None`` or left out + completely. See the documentation for the ``reV`` SAM class + (e.g. :class:`reV.SAM.generation.WindPower`, + :class:`reV.SAM.generation.PvWattsv8`, + :class:`reV.SAM.generation.Geothermal`, etc.) for + documentation on the allowed and/or required SAM config file + inputs. + nrwal_configs : dict + A dictionary mapping SAM input configuration ID(s) to NRWAL + configuration(s). Keys are the SAM config ID(s) which + correspond to the keys in the `sam_files` input. Values + for each key are either a path to a corresponding NRWAL YAML + or JSON config file or a full dictionary of NRWAL config + inputs. For example:: + + nrwal_configs = { + "default": "/path/to/default/nrwal.json", + "onshore": "/path/to/onshore/nrwal_config.yaml", + "offshore": { + "nrwal_key_1": "nrwal_value_1", + "nrwal_key_2": "nrwal_value_2", + ... + }, + ... + } + + output_request : list | tuple + List of output dataset names to be written to the + `gen_fpath` file. Any key from the NRWAL configs or any of + the inputs (site_data or sam_files) is available to be + exported as an output dataset. If you want to manipulate a + dset like ``cf_mean`` from `gen_fpath` and include it in the + `output_request`, you should set ``save_raw=True`` and then + use ``cf_mean_raw`` in the NRWAL equations as the input. + This allows you to define an equation in the NRWAL configs + for a manipulated ``cf_mean`` output that can be included in + the `output_request` list. + save_raw : bool, optional + Flag to save an initial ("raw") copy of input datasets from + `gen_fpath` that are also part of the `output_request`. For + example, if you request ``cf_mean`` in output_request but + also manipulate the ``cf_mean`` dataset in the NRWAL + equations, the original ``cf_mean`` will be archived under + the ``cf_mean_raw`` dataset in `gen_fpath`. + By default, ``True``. + meta_gid_col : str, optional + Column label in the source meta data from `gen_fpath` that + contains the unique gid identifier. This will be joined to + the site_data ``gid`` column. By default, ``"gid"``. + site_meta_cols : list | tuple, optional + Column labels from `site_data` to be added to the meta data + table in `gen_fpath`. If ``None``, only the columns in + :attr:`DEFAULT_META_COLS` will be added. Any columns + requested via this input will be considered *in addition to* + the :attr:`DEFAULT_META_COLS`. By default, ``None``. + """ + + log_versions(logger) + + # delayed NRWAL import to cause less errors with old reV installs + # if not running nrwal. + from NRWAL import NrwalConfig + + self._meta_gid_col = meta_gid_col + self._gen_fpath = gen_fpath + self._site_data = site_data + self._output_request = output_request + self._meta_out = None + self._time_index = None + self._save_raw = save_raw + self._nrwal_inputs = self._out = None + + self._nrwal_configs = {k: NrwalConfig(v) for k, v in + nrwal_configs.items()} + + self._site_meta_cols = site_meta_cols + if self._site_meta_cols is None: + self._site_meta_cols = list(self.DEFAULT_META_COLS) + else: + self._site_meta_cols = list(self._site_meta_cols) + self._site_meta_cols += list(self.DEFAULT_META_COLS) + self._site_meta_cols = list(set(self._site_meta_cols)) + + self._site_data = self._parse_site_data() + self._meta_source = self._parse_gen_data() + self._analysis_gids, self._site_data = self._parse_analysis_gids() + + pc = Gen.get_pc(self._site_data[['gid', 'config']], points_range=None, + sam_configs=sam_files, tech='windpower') + self._project_points = pc.project_points + + self._sam_sys_inputs = self._parse_sam_sys_inputs() + meta_gids = self.meta_source[self._meta_gid_col].values + logger.info('Finished initializing NRWAL analysis module for "{}" ' + '{} through {} with {} total generation points and ' + '{} NRWAL analysis points.' + .format(self._meta_gid_col, meta_gids.min(), + meta_gids.max(), len(self.meta_source), + len(self.analysis_gids))) + + def _parse_site_data(self, required_columns=('gid', 'config')): + """Parse the site-specific spatial input data file + + Parameters + ---------- + required_columns : tuple | list + List of column names that must be in the site_data in + order to run the reV NRWAL module. + + Returns + ------- + site_data : pd.DataFrame + Dataframe of extracted site_data. Each row is an analysis point and + columns are spatial data inputs. + """ + + if isinstance(self._site_data, str): + self._site_data = pd.read_csv(self._site_data) + + if 'dist_l_to_ts' in self._site_data: + if self._site_data['dist_l_to_ts'].sum() > 0: + w = ('Possible incorrect Offshore data input! "dist_l_to_ts" ' + '(distance land to transmission) input is non-zero. ' + 'Most reV runs set this to zero and input the cost ' + 'of transmission from landfall tie-in to ' + 'transmission feature in the supply curve module.') + logger.warning(w) + warn(w, OffshoreWindInputWarning) + + for c in required_columns: + if c not in self._site_data: + msg = ('Did not find required "{}" column in site_data!' + .format(c)) + logger.error(msg) + raise KeyError(msg) + + self._site_data = self._site_data.sort_values('gid') + + return self._site_data + + def _parse_gen_data(self): + """Parse generation data and get meta data + + Returns + ------- + meta : pd.DataFrame + Full meta data from gen_fpath. + """ + + with Outputs(self._gen_fpath, mode='r') as out: + meta = out.meta + + msg = ('Could not find "{}" column in source generation h5 file ' + 'meta data! Available cols: {}' + .format(self._meta_gid_col, meta.columns.values.tolist())) + assert self._meta_gid_col in meta, msg + + # currently an assumption of sorted gids in the reV gen output + msg = ('Source capacity factor meta data is not ordered!') + meta_gids = list(meta[self._meta_gid_col]) + assert meta_gids == sorted(meta_gids), msg + + return meta + + def _parse_analysis_gids(self): + """Check the intersection of the generation gids and the site_data + input gids. + + Returns + ------- + analysis_gids : np.ndarray + Array indicating which sites in the source meta data to process + with NRWAL. This is the intersection of the gids in the generation + meta data and the gids in the site_data input. + site_data : pd.DataFrame + The site_data table reduced to only those gids that are in the + analysis_gids + """ + + meta_gids = self.meta_source[self._meta_gid_col].values + + missing = ~np.isin(meta_gids, self._site_data['gid']) + if any(missing): + msg = ('{} sites from the generation meta data input were ' + 'missing from the "site_data" input and will not be ' + 'run through NRWAL: {}' + .format(missing.sum(), meta_gids[missing])) + logger.info(msg) + + missing = ~np.isin(self._site_data['gid'], meta_gids) + if any(missing): + missing = self._site_data['gid'].values[missing] + msg = ('{} sites from the "site_data" input were missing from the ' + 'generation meta data and will not be run through NRWAL: {}' + .format(len(missing), missing)) + logger.info(msg) + + analysis_gids = set(meta_gids) & set(self._site_data['gid']) + analysis_gids = np.array(sorted(list(analysis_gids))) + + # reduce the site data table to only those sites being analyzed + mask = np.isin(self._site_data['gid'], meta_gids) + self._site_data = self._site_data[mask] + + return analysis_gids, self._site_data + + def _parse_sam_sys_inputs(self): + """Get the SAM system inputs dict from project points. + + Returns + ------- + system_inputs : pd.DataFrame + DataFrame of SAM config inputs (columns) for every active nrwal + analysis gid (row). Index is resource gids and there is also a + column "gid" with the copied gids. + """ + + system_inputs = {} + + for gid in self.analysis_gids: + system_inputs[gid] = self._project_points[gid][1] + + system_inputs = pd.DataFrame(system_inputs).T + system_inputs = system_inputs.sort_index() + system_inputs['gid'] = system_inputs.index.values + system_inputs.index.name = 'gid' + mask = system_inputs['gid'].isin(self.analysis_gids) + system_inputs = system_inputs[mask] + + return system_inputs + + def _init_outputs(self): + """Initialize a dictionary of outputs with dataset names as keys and + numpy arrays as values. All datasets are initialized as 1D arrays and + must be overwritten if found to be 2D. Only active analysis sites will + have data in the output, sites that were not found in the site_data + "gid" column will not have data in these output arrays + + Returns + ------- + out : dict + Dictionary of output data + """ + out = {} + + for key in self._output_request: + out[key] = np.full(len(self.analysis_gids), np.nan, + dtype=np.float32) + + if key in self.gen_dsets and not self._save_raw: + msg = ('Output request "{0}" was also found in ' + 'the source gen file but save_raw=False! If ' + 'you are manipulating this ' + 'dset, make sure you set save_raw=False ' + 'and reference "{0}_raw" as the ' + 'input in the NRWAL equations and then define "{0}" ' + 'as the final manipulated dataset.'.format(key)) + logger.warning(msg) + warn(msg) + elif key in self.gen_dsets: + msg = ('Output request "{0}" was also found in ' + 'the source gen file. If you are manipulating this ' + 'dset, make sure you reference "{0}_raw" as the ' + 'input in the NRWAL equations and then define "{0}" ' + 'as the final manipulated dataset.'.format(key)) + logger.info(msg) + + if key in self._nrwal_inputs: + out[key] = self._nrwal_inputs[key] + + return out + + def _preflight_checks(self): + """Run some preflight checks on the offshore inputs""" + sam_files = {k: v for k, v in + self._project_points.sam_inputs.items() + if k in self._nrwal_configs} + + for cid, sys_in in sam_files.items(): + loss1 = sys_in.get('wind_farm_losses_percent', 0) + loss2 = sys_in.get('turb_generic_loss', 0) + if loss1 != 0 or loss2 != 0: + msg = ('Wind farm loss for config "{}" is not 0. When using ' + 'NRWAL for offshore analysis, consider using gross ' + 'capacity factors from reV generation and applying ' + 'spatially dependent losses from the NRWAL equations' + .format(cid)) + logger.info(msg) + + available_ids = list(self._nrwal_configs.keys()) + requested_ids = list(self._site_data['config'].values) + missing = set(requested_ids) - set(available_ids) + if any(missing): + msg = ('The following config ids were requested in the offshore ' + 'data input but were not available in the NRWAL config ' + 'input dict: {}'.format(missing)) + logger.error(msg) + raise OffshoreWindInputError(msg) + + check_gid_order = (self._site_data['gid'].values + == self._sam_sys_inputs['gid'].values) + msg = 'NRWAL site_data and system input dataframe had bad order' + assert (check_gid_order).all(), msg + + missing = [c for c in self._site_meta_cols if c not in self._site_data] + if any(missing): + msg = ('Could not find requested NRWAL site data pass through ' + 'columns in offshore input data: {}'.format(missing)) + logger.error(msg) + raise OffshoreWindInputError(msg) + + def _get_input_data(self): + """Get all the input data from the site_data, SAM system configs, and + generation h5 file, formatted together in one dictionary for NRWAL. + + Returns + ------- + nrwal_inputs : dict + Dictionary mapping required NRWAL input variable names (keys) to 1 + or 2D arrays of inputs for all the analysis_gids + """ + + logger.info('Setting up input data for NRWAL...') + + # preconditions for this to work properly + assert len(self._site_data) == len(self.analysis_gids) + assert len(self._sam_sys_inputs) == len(self.analysis_gids) + + all_required = [] + for config_id, nrwal_config in self._nrwal_configs.items(): + all_required += list(nrwal_config.required_inputs) + all_required = list(set(all_required)) + + missing_vars = [var for var in nrwal_config.required_inputs + if var not in self._site_data + and var not in self.meta_source + and var not in self._sam_sys_inputs + and var not in self.gen_dsets] + + if any(missing_vars): + msg = ('Could not find required input variables {} ' + 'for NRWAL config "{}" in either the offshore ' + 'data or the SAM system data!' + .format(missing_vars, config_id)) + logger.error(msg) + raise OffshoreWindInputError(msg) + + meta_data_vars = [var for var in all_required + if var in self.meta_source] + logger.info('Pulling the following inputs from the gen meta data: {}' + .format(meta_data_vars)) + nrwal_inputs = {var: self.meta_source[var].values[self.analysis_mask] + for var in meta_data_vars} + + site_data_vars = [var for var in all_required + if var in self._site_data + and var not in nrwal_inputs] + site_data_vars.append('config') + logger.info('Pulling the following inputs from the site_data input: {}' + .format(site_data_vars)) + for var in site_data_vars: + nrwal_inputs[var] = self._site_data[var].values + + sam_sys_vars = [var for var in all_required + if var in self._sam_sys_inputs + and var not in nrwal_inputs] + logger.info('Pulling the following inputs from the SAM system ' + 'configs: {}'.format(sam_sys_vars)) + for var in sam_sys_vars: + nrwal_inputs[var] = self._sam_sys_inputs[var].values + + gen_vars = [var for var in all_required + if var in self.gen_dsets + and var not in nrwal_inputs] + logger.info('Pulling the following inputs from the generation ' + 'h5 file: {}'.format(gen_vars)) + with Outputs(self._gen_fpath, mode='r') as f: + source_gids = self.meta_source[self._meta_gid_col] + gen_gids = np.where(source_gids.isin(self.analysis_gids))[0] + for var in gen_vars: + shape = f.shapes[var] + if len(shape) == 1: + nrwal_inputs[var] = f[var, gen_gids] + elif len(shape) == 2: + nrwal_inputs[var] = f[var, :, gen_gids] + else: + msg = ('Data shape for "{}" must be 1 or 2D but ' + 'received: {}'.format(var, shape)) + logger.error(msg) + raise DataShapeError(msg) + + logger.info('Finished setting up input data for NRWAL!') + + return nrwal_inputs + + @property + def time_index(self): + """Get the source time index.""" + if self._time_index is None: + with Outputs(self._gen_fpath, mode='r') as out: + self._time_index = out.time_index + + return self._time_index + + @property + def gen_dsets(self): + """Get the available datasets from the gen source file""" + with Outputs(self._gen_fpath, mode='r') as out: + dsets = out.dsets + + return dsets + + @property + def meta_source(self): + """Get the full meta data (onshore + offshore)""" + return self._meta_source + + @property + def meta_out(self): + """Get the combined onshore and offshore meta data.""" + if self._meta_out is None: + self._meta_out = self._meta_source.copy() + for col in self._site_meta_cols: + data = self._nrwal_inputs[col] + self._meta_out.loc[self.analysis_mask, col] = data + + return self._meta_out + + @property + def analysis_mask(self): + """Get a boolean array to mask the source generation meta data where + True is sites that are to be analyzed by NRWAL. + + Returns + ------- + np.ndarray + """ + mask = np.isin(self.meta_source[self._meta_gid_col], + self.analysis_gids) + return mask + + @property + def analysis_gids(self): + """Get an array of gids from the source generation meta data that are + to-be analyzed by nrwal. + + Returns + ------- + np.ndarray + """ + return self._analysis_gids + + @property + def outputs(self): + """Get a dict of NRWAL outputs. Only active analysis sites will have + data in the output, sites that were not found in the site_data "gid" + column will not have data in these output arrays""" + return self._out + + def _save_nrwal_out(self, name, nrwal_out, output_mask): + """Save a dataset from the nrwal_out dictionary to the self._out + attribute + + Parameters + ---------- + name : str + Dataset name of the nrwal output to be saved. + nrwal_out : dict + Output dictionary from a successfully evaluated NrwalConfig object + containing the dataset with the input name + output_mask : np.ndarray + Boolean array showing which gids in self.analysis_gids should be + assigned data from this NRWAL output. If not all true, there are + probably multiple NrwalConfig objects that map to different sets of + gids. + """ + value = nrwal_out[name] + value = self._value_to_array(value, name) + + if len(value.shape) == 1: + self._out[name][output_mask] = value[output_mask] + + elif len(value.shape) == 2: + if len(self._out[name].shape) == 1: + if not all(np.isnan(self._out[name])): + msg = ('Output dataset "{}" was initialized as 1D but was ' + 'later found to be 2D but was not all NaN!' + .format(name)) + logger.error(msg) + raise DataShapeError(msg) + + # re-initialize the dataset as 2D now that we + # know what the output looks like + out_shape = (len(self.time_index), len(self.analysis_gids)) + self._out[name] = np.full(out_shape, np.nan, dtype=np.float32) + + self._out[name][:, output_mask] = value[:, output_mask] + + else: + msg = ('Could not make sense of NRWAL output "{}" ' + 'with shape {}'.format(name, value.shape)) + logger.error(msg) + raise DataShapeError(msg) + + def _save_nrwal_misc(self, name, nrwal_config, output_mask): + """Save miscellaneous output requests from a NRWAL config object (not + NRWAL output dictionary) to the self._out attribute. + + Parameters + ---------- + name : str + Dataset name of the nrwal output to be saved. + nrwal_config : NrwalConfig + NrwalConfig object containing NRWAL Equation objects or something + else that is to be exported to the outputs + output_mask : np.ndarray + Boolean array showing which gids in self.analysis_gids should be + assigned data from this NRWAL output. If not all true, there are + probably multiple NrwalConfig objects that map to different sets of + gids. + """ + + from NRWAL import Equation + value = nrwal_config[name] + + if isinstance(value, Equation): + msg = ('Cannot retrieve Equation "{}" from NRWAL. ' + 'Must be a number!'.format(name)) + assert not any(value.variables), msg + value = value.eval() + + value = self._value_to_array(value, name) + self._out[name][output_mask] = value[output_mask] + + def _value_to_array(self, value, name): + """Turn the input into numpy array if it isn't already.""" + if np.issubdtype(type(value), np.number): + value *= np.ones(len(self.analysis_gids), dtype=np.float32) + + if not isinstance(value, np.ndarray): + msg = ('NRWAL key "{}" returned bad type of "{}", needs to be ' + 'numeric or an output array.'.format(name, type(value))) + logger.error(msg) + raise TypeError(msg) + return value + +
[docs] def run_nrwal(self): + """Run analysis via the NRWAL analysis library""" + + self._preflight_checks() + self.save_raw_dsets() + self._nrwal_inputs = self._get_input_data() + self._out = self._init_outputs() + + for i, (cid, nrwal_config) in enumerate(self._nrwal_configs.items()): + output_mask = self._site_data['config'].values == cid + logger.info('Running NRWAL config {} of {}: "{}" and applying ' + 'to {} out of {} total sites' + .format(i + 1, len(self._nrwal_configs), cid, + output_mask.sum(), len(output_mask))) + + nrwal_out = nrwal_config.eval(inputs=self._nrwal_inputs) + + # pylint: disable=C0201 + for name in self._out.keys(): + if name in nrwal_out: + self._save_nrwal_out(name, nrwal_out, output_mask) + + elif name in nrwal_config.keys(): + self._save_nrwal_misc(name, nrwal_config, output_mask) + + elif name not in self._nrwal_inputs: + msg = ('Could not find "{}" in the output dict of NRWAL ' + 'config {}'.format(name, cid)) + logger.warning(msg) + warn(msg)
+ +
[docs] def check_outputs(self): + """Check the nrwal outputs for nan values and raise errors if found.""" + for name, arr in self._out.items(): + if np.isnan(arr).all(): + msg = ('Output array "{}" is all NaN! Probably was not ' + 'found in the available NRWAL keys.'.format(name)) + logger.warning(msg) + warn(msg) + elif np.isnan(arr).any(): + mask = np.isnan(arr) + nan_meta = self.meta_source[self.analysis_mask][mask] + nan_gids = nan_meta[self._meta_gid_col].values + msg = ('NaN values ({} out of {}) persist in NRWAL ' + 'output "{}"!' + .format(np.isnan(arr).sum(), len(arr), name)) + logger.warning(msg) + logger.warning('This is the NRWAL meta that is causing NaN ' + 'outputs: {}'.format(nan_meta)) + logger.warning('These are the resource gids causing NaN ' + 'outputs: {}'.format(nan_gids)) + warn(msg)
+ +
[docs] def save_raw_dsets(self): + """If requested by save_raw=True, archive raw datasets that exist in + the gen_fpath file and are also requested in the output_request""" + if self._save_raw: + with Outputs(self._gen_fpath, 'a') as f: + for dset in self._output_request: + dset_raw = '{}_raw'.format(dset) + if dset in f and dset_raw not in f: + logger.info('Saving raw data from "{}" to "{}"' + .format(dset, dset_raw)) + f._add_dset(dset_raw, f[dset], f.dtypes[dset], + attrs=f.attrs[dset])
+ +
[docs] def write_to_gen_fpath(self): + """Save NRWAL outputs to input generation fpath file. + + Returns + ------- + str + Path to output file. + """ + + logger.info('Writing NRWAL outputs to: {}'.format(self._gen_fpath)) + write_all = self.analysis_mask.all() + + with Outputs(self._gen_fpath, 'a') as f: + meta_attrs = f.attrs['meta'] + del f._h5['meta'] + f._set_meta('meta', self.meta_out, attrs=meta_attrs) + + for dset, arr in self._out.items(): + if len(arr.shape) == 1: + data = np.full(len(self.meta_source), np.nan, + dtype=np.float32) + else: + full_shape = (len(self.time_index), + len(self.meta_source)) + data = np.full(full_shape, np.nan, dtype=np.float32) + + dset_attrs = {'scale_factor': 1} + dset_dtype = np.float32 + if dset in f.dsets: + logger.info('Found "{}" in file, loading data and ' + 'overwriting data for {} out of {} sites.' + .format(dset, self.analysis_mask.sum(), + len(self.analysis_mask))) + dset_attrs = f.attrs[dset] + dset_dtype = f.dtypes[dset] + if not write_all: + data = f[dset] + + if len(arr.shape) == 1: + data[self.analysis_mask] = arr + else: + data[:, self.analysis_mask] = arr + + logger.info('Writing final "{}" to: {}' + .format(dset, self._gen_fpath)) + f._add_dset(dset, data, dset_dtype, attrs=dset_attrs) + + logger.info('Finished writing NRWAL outputs to: {}' + .format(self._gen_fpath)) + return self._gen_fpath
+ +
[docs] def write_meta_to_csv(self, out_fpath=None): + """Combine NRWAL outputs with meta and write to output csv. + + Parameters + ---------- + out_fpath : str, optional + Full path to output NRWAL CSV file. The file path does not + need to include file ending - it will be added automatically + if missing. If ``None``, the generation HDF5 filepath will + be converted to a CSV out path by replacing the ".h5" file + ending with ".csv". By default, ``None``. + + Returns + ------- + str + Path to output file. + """ + if out_fpath is None: + out_fpath = self._gen_fpath.replace(".h5", ".csv") + elif not out_fpath.endswith(".csv"): + out_fpath = "{}.csv".format(out_fpath) + + logger.info('Writing NRWAL outputs to: {}'.format(out_fpath)) + meta_out = self.meta_out[self.analysis_mask].copy() + + for dset, arr in self._out.items(): + if len(arr.shape) != 1 or arr.shape[0] != meta_out.shape[0]: + msg = ('Skipping output {!r}: shape {} cannot be combined ' + 'with meta of shape {}!' + .format(dset, arr.shape, meta_out.shape)) + logger.warning(msg) + warn(msg) + continue + meta_out[dset] = arr + + meta_out.to_csv(out_fpath, index=False) + logger.info('Finished writing NRWAL outputs to: {}'.format(out_fpath)) + return out_fpath
+ +
[docs] def run(self, csv_output=False, out_fpath=None): + """Run NRWAL analysis. + + Parameters + ---------- + csv_output : bool, optional + Option to write H5 file meta + all requested outputs to + CSV file instead of storing in the HDF5 file directly. This + can be useful if the same HDF5 file is used for multiple + sets of NRWAL runs. Note that all requested output datasets + must be 1-dimensional in order to fir within the CSV output. + + .. Important:: This option is not compatible with + ``save_raw=True``. If you set ``csv_output=True``, then + the `save_raw` option is forced to be ``False``. + Therefore, make sure that you do not have any references + to "input_dataset_name_raw" in your NRWAL config. If you + need to manipulate an input dataset, save it to a + different output name in the NRWAL config or manually add + an "input_dataset_name_raw" dataset to your generation + HDF5 file before running NRWAL. + + By default, ``False``. + out_fpath : str, optional + This option has no effect if ``csv_output=False``. + Otherwise, this should be the full path to output NRWAL CSV + file. The file path does not need to include file ending - + it will be added automatically if missing. If ``None``, the + generation HDF5 filepath will be converted to a CSV out path + by replacing the ".h5" file ending with ".csv". + By default, ``None``. + + Returns + ------- + str + Path to output file. + """ + if csv_output and self._save_raw: + msg = ("`save_raw` option not allowed with `csv_output`. Setting" + "`save_raw=False`") + logger.warning(msg) + warn(msg) + self._save_raw = False + + if any(self.analysis_gids): + self.run_nrwal() + self.check_outputs() + if csv_output: + out_fp = self.write_meta_to_csv(out_fpath) + else: + out_fp = self.write_to_gen_fpath() + + logger.info('NRWAL module complete!') + + return out_fp
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/qa_qc/cli_qa_qc.html b/_modules/reV/qa_qc/cli_qa_qc.html new file mode 100644 index 000000000..d3121087a --- /dev/null +++ b/_modules/reV/qa_qc/cli_qa_qc.html @@ -0,0 +1,859 @@ + + + + + + reV.qa_qc.cli_qa_qc — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.qa_qc.cli_qa_qc

+# -*- coding: utf-8 -*-
+"""
+QA/QC CLI utility functions.
+"""
+import click
+import logging
+import numpy as np
+import os
+
+from rex.utilities.cli_dtypes import STR, STRLIST, INT
+from rex.utilities.loggers import init_logger
+from gaps.cli import as_click_command, CLICommandFromFunction
+
+from reV.utilities import ModuleName
+from reV.qa_qc.qa_qc import QaQc, QaQcModule
+from reV.qa_qc.summary import (SummarizeH5, SummarizeSupplyCurve,
+                               SupplyCurvePlot, ExclusionsMask)
+from reV import __version__
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]def cli_qa_qc(modules, out_dir, max_workers=None): + """Run QA/QC on reV outputs + + ``reV`` QA/QC performs quality assurance checks on ``reV`` output + data. Users can specify the type of QA/QC that should be applied + to each ``reV`` module. + + Parameters + ---------- + modules : dict + Dictionary of modules to QA/QC. Keys should be the names of the + modules to QA/QC. The values are dictionaries that represent the + config for the respective QA/QC step. Allowed config keys for + QA/QC are the "property" attributes of + :class:`~reV.qa_qc.qa_qc.QaQcModule`. + out_dir : str + Path to output directory. + max_workers : int, optional + Max number of workers to run for QA/QA. If ``None``, uses all + CPU cores. By default, ``None``. + + Raises + ------ + ValueError + If fpath is not an H5 or CSV file. + """ + for module, mcf in modules.items(): + module_config = QaQcModule(module, mcf, out_dir) + + qa_dir = out_dir + if module_config.sub_dir is not None: + qa_dir = os.path.join(out_dir, module_config.sub_dir) + + if module.lower() == 'exclusions': + QaQc.exclusions_mask(module_config.fpath, qa_dir, + layers_dict=module_config.excl_dict, + min_area=module_config.min_area, + kernel=module_config.area_filter_kernel, + plot_type=module_config.plot_type, + cmap=module_config.cmap, + plot_step=module_config.plot_step) + + elif module_config.fpath.endswith('.h5'): + QaQc.h5(module_config.fpath, qa_dir, dsets=module_config.dsets, + group=module_config.group, + process_size=module_config.process_size, + max_workers=max_workers, + plot_type=module_config.plot_type, cmap=module_config.cmap) + + elif module_config.fpath.endswith('.csv'): + QaQc.supply_curve(module_config.fpath, qa_dir, + columns=module_config.columns, + lcoe=module_config.lcoe, + plot_type=module_config.plot_type, + cmap=module_config.cmap) + else: + msg = ("Cannot run QA/QC for {}: 'fpath' must be a '*.h5' " + "or '*.csv' reV output file, but {} was given!" + .format(module, module_config.fpath)) + logger.error(msg) + raise ValueError(msg)
+ + +qa_qc_command = CLICommandFromFunction(cli_qa_qc, name=str(ModuleName.QA_QC), + split_keys=None) +main = as_click_command(qa_qc_command) + + +@click.group() +@click.version_option(version=__version__) +@click.option('-v', '--verbose', is_flag=True, + help='Flag to turn on debug logging. Default is not verbose.') +@click.pass_context +def qa_qc_extra(ctx, verbose): + """Execute extra QA/QC utility""" + ctx.ensure_object(dict) + ctx.obj['VERBOSE'] = verbose + + +@qa_qc_extra.group(chain=True) +@click.option('--out_dir', '-o', type=click.Path(), required=True, + help="Directory path to save summary tables and plots too") +@click.option('--log_file', '-log', type=click.Path(), default=None, + show_default=True, + help='File to log to, by default None') +@click.option('-v', '--verbose', is_flag=True, + help='Flag to turn on debug logging.') +@click.pass_context +def summarize(ctx, out_dir, log_file, verbose): + """ + Summarize reV data + """ + ctx.obj['OUT_DIR'] = out_dir + if any([verbose, ctx.obj['VERBOSE']]): + log_level = 'DEBUG' + else: + log_level = 'INFO' + + init_logger('reV', log_file=log_file, log_level=log_level) + + +@summarize.command() +@click.option('--h5_file', '-h5', type=click.Path(exists=True), required=True, + help='Path to .h5 file to summarize') +@click.option('--dsets', '-ds', type=STRLIST, default=None, + show_default=True, + help='Datasets to summarize, by default None') +@click.option('--group', '-grp', type=STR, default=None, + show_default=True, + help=('Group within h5_file to summarize datasets for, by ' + 'default None')) +@click.option('--process_size', '-ps', type=INT, default=None, + show_default=True, + help='Number of sites to process at a time, by default None') +@click.option('--max_workers', '-w', type=INT, default=None, + show_default=True, + help=('Number of workers to use when summarizing 2D datasets,' + ' by default None')) +@click.pass_context +def h5(ctx, h5_file, dsets, group, process_size, max_workers): + """ + Summarize datasets in .h5 file + """ + SummarizeH5.run(h5_file, ctx.obj['OUT_DIR'], group=group, dsets=dsets, + process_size=process_size, max_workers=max_workers) + + +@summarize.command() +@click.option('--plot_type', '-plt', default='plotly', + type=click.Choice(['plot', 'plotly'], case_sensitive=False), + show_default=True, + help=(" plot_type of plot to create 'plot' or 'plotly', by " + "default 'plot'")) +@click.option('--cmap', '-cmap', type=str, default='viridis', + show_default=True, + help="Colormap name, by default 'viridis'") +@click.pass_context +def scatter_plots(ctx, plot_type, cmap): + """ + create scatter plots from h5 summary tables + """ + QaQc.create_scatter_plots(ctx.obj['OUT_DIR'], plot_type, cmap) + + +@summarize.command() +@click.option('--sc_table', '-sct', type=click.Path(exists=True), + required=True, help='Path to .csv containing Supply Curve table') +@click.option('--columns', '-cols', type=STRLIST, default=None, + show_default=True, + help=('Column(s) to summarize, if None summarize all numeric ' + 'columns, by default None')) +@click.pass_context +def supply_curve_table(ctx, sc_table, columns): + """ + Summarize Supply Curve Table + """ + ctx.obj['SC_TABLE'] = sc_table + SummarizeSupplyCurve.run(sc_table, ctx.obj['OUT_DIR'], columns=columns) + + +@summarize.command() +@click.option('--sc_table', '-sct', type=click.Path(exists=True), default=None, + show_default=True, + help=("Path to .csv containing Supply Curve table, can be " + "supplied in 'supply-curve-table'")) +@click.option('--plot_type', '-plt', default='plotly', + type=click.Choice(['plot', 'plotly'], case_sensitive=False), + show_default=True, + help=(" plot_type of plot to create 'plot' or 'plotly', by " + "default 'plot'")) +@click.option('--lcoe', '-lcoe', type=STR, default='mean_lcoe', + help="LCOE value to plot, by default 'mean_lcoe'") +@click.pass_context +def supply_curve_plot(ctx, sc_table, plot_type, lcoe): + """ + Plot Supply Curve (cumulative capacity vs LCOE) + """ + if sc_table is None: + sc_table = ctx.obj['SC_TABLE'] + + SupplyCurvePlot.plot(sc_table, ctx.obj['OUT_DIR'], + plot_type=plot_type, lcoe=lcoe) + + +@summarize.command() +@click.option('--excl_mask', '-mask', type=click.Path(exists=True), + required=True, + help='Path to .npy file containing final exclusions mask') +@click.option('--plot_type', '-plt', default='plotly', + type=click.Choice(['plot', 'plotly'], case_sensitive=False), + show_default=True, + help=(" plot_type of plot to create 'plot' or 'plotly', by " + "default 'plot'")) +@click.option('--cmap', '-cmap', type=str, default='viridis', + show_default=True, + help="Colormap name, by default 'viridis'") +@click.option('--plot_step', '-step', type=int, default=100, + show_default=True, + help="Step between points to plot") +@click.pass_context +def exclusions_mask(ctx, excl_mask, plot_type, cmap, plot_step): + """ + create heat map of exclusions mask + """ + excl_mask = np.load(excl_mask) + ExclusionsMask.plot(excl_mask, ctx.obj['OUT_DIR'], + plot_type=plot_type, cmap=cmap, + plot_step=plot_step) + + +if __name__ == '__main__': + try: + main(obj={}) + except Exception: + logger.exception('Error running reV QA/QC CLI.') + raise +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/qa_qc/qa_qc.html b/_modules/reV/qa_qc/qa_qc.html new file mode 100644 index 000000000..08afdd64a --- /dev/null +++ b/_modules/reV/qa_qc/qa_qc.html @@ -0,0 +1,1062 @@ + + + + + + reV.qa_qc.qa_qc — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.qa_qc.qa_qc

+# -*- coding: utf-8 -*-
+"""
+reV quality assurance and control classes
+"""
+import logging
+import numpy as np
+import os
+import pandas as pd
+from warnings import warn
+
+from reV.qa_qc.summary import (SummarizeH5, SummarizeSupplyCurve, SummaryPlots,
+                               SupplyCurvePlot, ExclusionsMask)
+from reV.supply_curve.exclusions import ExclusionMaskFromDict
+from reV.utilities import log_versions, ModuleName
+from reV.utilities.exceptions import PipelineError
+
+from gaps.status import Status
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class QaQc: + """ + reV QA/QC + """ + def __init__(self, out_dir): + """ + Parameters + ---------- + out_dir : str + Directory path to save summary data and plots too + """ + log_versions(logger) + logger.info('QA/QC results to be saved to: {}'.format(out_dir)) + if not os.path.exists(out_dir): + os.makedirs(out_dir, exist_ok=True) + + self._out_dir = out_dir + + @property + def out_dir(self): + """ + Output directory + + Returns + ------- + str + """ + return self._out_dir + + @staticmethod + def _scatter_plot(summary_csv, out_root, plot_type='plotly', + cmap='viridis', **kwargs): + """ + Create scatter plot for all summary stats in summary table and save to + out_dir + + Parameters + ---------- + summary_csv : str + Path to .csv file containing summary table + out_root : str + Output directory to save plots to + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + cmap : str, optional + Colormap name, by default 'viridis' + kwargs : dict + Additional plotting kwargs + """ + out_dir = os.path.join(out_root, + os.path.basename(summary_csv).rstrip('.csv')) + if not os.path.exists(out_dir): + os.makedirs(out_dir, exist_ok=True) + + SummaryPlots.scatter_all(summary_csv, out_dir, plot_type=plot_type, + cmap=cmap, **kwargs) + +
[docs] def create_scatter_plots(self, plot_type='plotly', cmap='viridis', + **kwargs): + """ + Create scatter plot for all compatible summary .csv files + + Parameters + ---------- + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + cmap : str, optional + Colormap name, by default 'viridis' + kwargs : dict + Additional plotting kwargs + """ + for file in os.listdir(self.out_dir): + if file.endswith('.csv'): + summary_csv = os.path.join(self.out_dir, file) + summary = pd.read_csv(summary_csv) + if ('gid' in summary and 'latitude' in summary + and 'longitude' in summary): + self._scatter_plot(summary_csv, self.out_dir, + plot_type=plot_type, cmap=cmap, + **kwargs)
+ +
[docs] @classmethod + def h5(cls, h5_file, out_dir, dsets=None, group=None, process_size=None, + max_workers=None, plot_type='plotly', cmap='viridis', **kwargs): + """ + Run QA/QC by computing summary stats from dsets in h5_file and + plotting scatters plots of compatible summary stats + + Parameters + ---------- + h5_file : str + Path to .h5 file to run QA/QC on + out_dir : str + Directory path to save summary tables and plots too + dsets : str | list, optional + Datasets to summarize, by default None + group : str, optional + Group within h5_file to summarize datasets for, by default None + process_size : int, optional + Number of sites to process at a time, by default None + max_workers : int, optional + Number of workers to use when summarizing 2D datasets, + by default None + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + cmap : str, optional + Colormap name, by default 'viridis' + kwargs : dict + Additional plotting kwargs + """ + try: + qa_qc = cls(out_dir) + SummarizeH5.run(h5_file, out_dir, group=group, + dsets=dsets, process_size=process_size, + max_workers=max_workers) + qa_qc.create_scatter_plots(plot_type=plot_type, cmap=cmap, + **kwargs) + except Exception as e: + logger.exception('QAQC failed on file: {}. Received exception:\n{}' + .format(os.path.basename(h5_file), e)) + raise e + else: + logger.info('Finished QAQC on file: {} output directory: {}' + .format(os.path.basename(h5_file), out_dir))
+ +
[docs] @classmethod + def supply_curve(cls, sc_table, out_dir, columns=None, lcoe='mean_lcoe', + plot_type='plotly', cmap='viridis', sc_plot_kwargs=None, + scatter_plot_kwargs=None): + """ + Plot supply curve + + Parameters + ---------- + sc_table : str + Path to .csv file containing supply curve table + out_dir : str + Directory path to save summary tables and plots too + columns : str | list, optional + Column(s) to summarize, if None summarize all numeric columns, + by default None + lcoe : str, optional + LCOE value to plot, by default 'mean_lcoe' + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + cmap : str, optional + Colormap name, by default 'viridis' + sc_plot_kwargs : dict, optional + Kwargs for supply curve plot, by default None + scatter_plot_kwargs : dict + Kwargs for scatter plot, by default None + """ + if sc_plot_kwargs is None: + sc_plot_kwargs = {} + + if scatter_plot_kwargs is None: + scatter_plot_kwargs = {} + + try: + qa_qc = cls(out_dir) + SummarizeSupplyCurve.run(sc_table, out_dir, columns=columns) + SupplyCurvePlot.plot(sc_table, out_dir, plot_type=plot_type, + lcoe=lcoe, **sc_plot_kwargs) + qa_qc._scatter_plot(sc_table, out_dir, plot_type=plot_type, + cmap=cmap, **scatter_plot_kwargs) + except Exception as e: + logger.exception('QAQC failed on file: {}. Received exception:\n{}' + .format(os.path.basename(sc_table), e)) + raise e + else: + logger.info('Finished QAQC on file: {} output directory: {}' + .format(os.path.basename(sc_table), out_dir))
+ +
[docs] @classmethod + def exclusions_mask(cls, excl_h5, out_dir, layers_dict=None, min_area=None, + kernel='queen', hsds=False, plot_type='plotly', + cmap='viridis', plot_step=100, **kwargs): + """ + Create inclusion mask from given layers dictionary, dump to disk and + plot + + Parameters + ---------- + excl_h5 : str + Path to exclusions .h5 file + layers_dict : dict | NoneType + Dictionary of LayerMask arugments {layer: {kwarg: value}} + min_area : float | NoneType + Minimum required contiguous area in sq-km + kernel : str + Contiguous filter method to use on final exclusions + hsds : bool + Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS + behind HSDS + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + cmap : str, optional + Colormap name, by default 'viridis' + plot_step : int + Step between points to plot + kwargs : dict + Additional plotting kwargs + """ + try: + cls(out_dir) + excl_mask = ExclusionMaskFromDict.run(excl_h5, + layers_dict=layers_dict, + min_area=min_area, + kernel=kernel, + hsds=hsds) + excl_mask = np.round(excl_mask * 100).astype('uint8') + + out_file = os.path.basename(excl_h5).replace('.h5', '_mask.npy') + out_file = os.path.join(out_dir, out_file) + np.save(out_file, excl_mask) + + ExclusionsMask.plot(excl_mask, out_dir, plot_type=plot_type, + cmap=cmap, plot_step=plot_step, **kwargs) + except Exception as e: + logger.exception('QAQC failed on file: {}. Received exception:\n{}' + .format(os.path.basename(excl_h5), e)) + raise e + else: + logger.info('Finished QAQC on file: {} output directory: {}' + .format(os.path.basename(excl_h5), out_dir))
+ + +
[docs]class QaQcModule: + """Class to handle Module QA/QC""" + + def __init__(self, module_name, config, out_root): + """ + Parameters + ---------- + config : dict + Dictionary with pre-extracted config input group. + """ + if not isinstance(config, dict): + raise TypeError('Config input must be a dict but received: {}' + .format(type(config))) + + self._name = module_name + self._config = config + self._out_root = out_root + self._default_plot_type = 'plotly' + self._default_cmap = 'viridis' + self._default_plot_step = 100 + self._default_lcoe = 'mean_lcoe' + self._default_area_filter_kernel = 'queen' + + @property + def fpath(self): + """Get the reV module output filepath(s) + + Returns + ------- + fpaths : str | list + One or more filepaths output by current module being QA'd + """ + + fpath = self._config['fpath'] + + if fpath == 'PIPELINE': + target_modules = [self._name] + for target_module in target_modules: + fpath = Status.parse_step_status(self._out_root, target_module) + if fpath: + break + else: + raise PipelineError('Could not parse fpath from previous ' + 'pipeline jobs.') + fpath = fpath[0] + logger.info('QA/QC using the following ' + 'pipeline input for fpath: {}'.format(fpath)) + + return fpath + + @property + def sub_dir(self): + """ + QA/QC sub directory for this module's outputs + """ + return self._config.get('sub_dir', None) + + @property + def plot_type(self): + """Get the QA/QC plot type: either 'plot' or 'plotly'""" + return self._config.get('plot_type', self._default_plot_type) + + @property + def dsets(self): + """Get the reV_h5 dsets to QA/QC""" + return self._config.get('dsets', None) + + @property + def group(self): + """Get the reV_h5 group to QA/QC""" + return self._config.get('group', None) + + @property + def process_size(self): + """Get the reV_h5 process_size for QA/QC""" + return self._config.get('process_size', None) + + @property + def cmap(self): + """Get the QA/QC plot colormap""" + return self._config.get('cmap', self._default_cmap) + + @property + def plot_step(self): + """Get the QA/QC step between exclusion mask points to plot""" + return self._config.get('cmap', self._default_plot_step) + + @property + def columns(self): + """Get the supply_curve columns to QA/QC""" + return self._config.get('columns', None) + + @property + def lcoe(self): + """Get the supply_curve lcoe column to plot""" + return self._config.get('lcoe', self._default_lcoe) + + @property + def excl_fpath(self): + """Get the source exclusions filepath""" + excl_fpath = self._config.get('excl_fpath', 'PIPELINE') + + if excl_fpath == 'PIPELINE': + target_module = ModuleName.SUPPLY_CURVE_AGGREGATION + excl_fpath = Status.parse_step_status(self._out_root, + target_module, + key='excl_fpath') + if not excl_fpath: + excl_fpath = None + msg = ('Could not parse excl_fpath from previous ' + 'pipeline jobs, defaulting to: {}'.format(excl_fpath)) + logger.warning(msg) + warn(msg) + else: + excl_fpath = excl_fpath[0] + logger.info('QA/QC using the following ' + 'pipeline input for excl_fpath: {}' + .format(excl_fpath)) + + return excl_fpath + + @property + def excl_dict(self): + """Get the exclusions dictionary""" + excl_dict = self._config.get('excl_dict', 'PIPELINE') + + if excl_dict == 'PIPELINE': + target_module = ModuleName.SUPPLY_CURVE_AGGREGATION + excl_dict = Status.parse_step_status(self._out_root, target_module, + key='excl_dict') + if not excl_dict: + excl_dict = None + msg = ('Could not parse excl_dict from previous ' + 'pipeline jobs, defaulting to: {}'.format(excl_dict)) + logger.warning(msg) + warn(msg) + else: + excl_dict = excl_dict[0] + logger.info('QA/QC using the following ' + 'pipeline input for excl_dict: {}' + .format(excl_dict)) + + return excl_dict + + @property + def area_filter_kernel(self): + """Get the minimum area filter kernel name ('queen' or 'rook').""" + area_filter_kernel = self._config.get('area_filter_kernel', 'PIPELINE') + + if area_filter_kernel == 'PIPELINE': + target_module = ModuleName.SUPPLY_CURVE_AGGREGATION + key = 'area_filter_kernel' + area_filter_kernel = Status.parse_step_status(self._out_root, + target_module, + key=key) + if not area_filter_kernel: + area_filter_kernel = self._default_area_filter_kernel + msg = ('Could not parse area_filter_kernel from previous ' + 'pipeline jobs, defaulting to: {}' + .format(area_filter_kernel)) + logger.warning(msg) + warn(msg) + else: + area_filter_kernel = area_filter_kernel[0] + logger.info('QA/QC using the following ' + 'pipeline input for area_filter_kernel: {}' + .format(area_filter_kernel)) + + return area_filter_kernel + + @property + def min_area(self): + """Get the minimum area filter minimum area in km2.""" + min_area = self._config.get('min_area', 'PIPELINE') + + if min_area == 'PIPELINE': + target_module = ModuleName.SUPPLY_CURVE_AGGREGATION + min_area = Status.parse_step_status(self._out_root, target_module, + key='min_area') + if not min_area: + min_area = None + msg = ('Could not parse min_area from previous ' + 'pipeline jobs, defaulting to: {}' + .format(min_area)) + logger.warning(msg) + warn(msg) + else: + min_area = min_area[0] + logger.info('QA/QC using the following ' + 'pipeline input for min_area: {}' + .format(min_area)) + + return min_area
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/qa_qc/summary.html b/_modules/reV/qa_qc/summary.html new file mode 100644 index 000000000..b92309e6b --- /dev/null +++ b/_modules/reV/qa_qc/summary.html @@ -0,0 +1,1589 @@ + + + + + + reV.qa_qc.summary — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.qa_qc.summary

+# -*- coding: utf-8 -*-
+"""
+Compute and plot summary data
+"""
+import logging
+import numpy as np
+import os
+import pandas as pd
+import plotting as mplt
+import plotly.express as px
+
+from rex import Resource
+from rex.utilities import SpawnProcessPool, parse_table
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SummarizeH5: + """ + reV Summary data for QA/QC + """ + def __init__(self, h5_file, group=None): + """ + Parameters + ---------- + h5_file : str + Path to .h5 file to summarize data from + group : str, optional + Group within h5_file to summarize datasets for, by default None + """ + logger.info('QAQC Summarize initializing on: {}'.format(h5_file)) + self._h5_file = h5_file + self._group = group + + def __repr__(self): + msg = "{} for {}".format(self.__class__.__name__, self.h5_file) + + return msg + + @property + def h5_file(self): + """ + .h5 file path + + Returns + ------- + str + """ + return self._h5_file + + @staticmethod + def _compute_sites_summary(h5_file, ds_name, sites=None, group=None): + """ + Compute summary stats for given sites of given dataset + + Parameters + ---------- + h5_file : str + Path to .h5 file to summarize data from + ds_name : str + Dataset name of interest + sites : list | slice, optional + sites of interest, by default None + group : str, optional + Group within h5_file to summarize datasets for, by default None + + Returns + ------- + sites_summary : pandas.DataFrame + Summary stats for given sites / dataset + """ + if sites is None: + sites = slice(None) + + with Resource(h5_file, group=group) as f: + sites_meta = f['meta', sites] + sites_data = f[ds_name, :, sites] + + sites_summary = pd.DataFrame(sites_data, columns=sites_meta.index) + sites_summary = sites_summary.describe().T.drop(columns=['count']) + sites_summary['sum'] = sites_data.sum(axis=0) + + return sites_summary + + @staticmethod + def _compute_ds_summary(h5_file, ds_name, group=None): + """ + Compute summary statistics for given dataset (assumed to be a vector) + + Parameters + ---------- + h5_file : str + Path to .h5 file to summarize data from + ds_name : str + Dataset name of interest + group : str, optional + Group within h5_file to summarize datasets for, by default None + + Returns + ------- + ds_summary : pandas.DataFrame + Summary statistics for dataset + """ + with Resource(h5_file, group=group) as f: + ds_data = f[ds_name, :] + + ds_summary = pd.DataFrame(ds_data, columns=[ds_name]) + ds_summary = ds_summary.describe().drop(['count']) + ds_summary.at['sum', ds_name] = ds_data.sum() + + return ds_summary + +
[docs] def summarize_dset(self, ds_name, process_size=None, max_workers=None, + out_path=None): + """ + Compute dataset summary. If dataset is 2D compute temporal statistics + for each site + + Parameters + ---------- + ds_name : str + Dataset name of interest + process_size : int, optional + Number of sites to process at a time, by default None + max_workers : int, optional + Number of workers to use in parallel, if 1 run in serial, + if None use all available cores, by default None + out_path : str + File path to save summary to + + Returns + ------- + summary : pandas.DataFrame + Summary summary for dataset + """ + with Resource(self.h5_file, group=self._group) as f: + ds_shape, _, ds_chunks = f.get_dset_properties(ds_name) + + if len(ds_shape) > 1: + sites = np.arange(ds_shape[1]) + if max_workers != 1: + if process_size is None and ds_chunks is not None: + process_size = ds_chunks[1] + if process_size is None: + process_size = ds_shape[-1] + + sites = \ + np.array_split(sites, + int(np.ceil(len(sites) / process_size))) + loggers = [__name__, 'reV'] + with SpawnProcessPool(max_workers=max_workers, + loggers=loggers) as ex: + futures = [] + for site_slice in sites: + futures.append(ex.submit( + self._compute_sites_summary, + self.h5_file, ds_name, sites=site_slice, + group=self._group)) + + summary = [future.result() for future in futures] + + summary = pd.concat(summary) + else: + if process_size is None: + summary = self._compute_sites_summary(self.h5_file, + ds_name, + sites=sites, + group=self._group) + else: + sites = np.array_split( + sites, int(np.ceil(len(sites) / process_size))) + + summary = [] + for site_slice in sites: + summary.append(self._compute_sites_summary( + self.h5_file, ds_name, + sites=site_slice, + group=self._group)) + + summary = pd.concat(summary) + + summary.index.name = 'gid' + + else: + summary = self._compute_ds_summary(self.h5_file, ds_name, + group=self._group) + + if out_path is not None: + summary.to_csv(out_path) + + return summary
+ +
[docs] def summarize_means(self, out_path=None): + """ + Add means datasets to meta data + + Parameters + ---------- + out_path : str, optional + Path to .csv file to save update meta data to, by default None + + Returns + ------- + meta : pandas.DataFrame + Meta data with means datasets added + """ + with Resource(self.h5_file, group=self._group) as f: + meta = f.meta + if 'gid' not in meta: + if meta.index.name != 'gid': + meta.index.name = 'gid' + + meta = meta.reset_index() + + for ds_name in f.datasets: + shape, dtype, _ = f.get_dset_properties(ds_name) + if len(shape) == 1 and np.issubdtype(dtype, np.number): + meta[ds_name] = f[ds_name] + + if out_path is not None: + meta.to_csv(out_path, index=False) + + return meta
+ +
[docs] @classmethod + def run(cls, h5_file, out_dir, group=None, dsets=None, + process_size=None, max_workers=None): + """ + Summarize all datasets in h5_file and dump to out_dir + + Parameters + ---------- + h5_file : str + Path to .h5 file to summarize data from + out_dir : str + Directory to dump summary .csv files to + group : str, optional + Group within h5_file to summarize datasets for, by default None + dsets : str | list, optional + Datasets to summarize, by default None + process_size : int, optional + Number of sites to process at a time, by default None + max_workers : int, optional + Number of workers to use when summarizing 2D datasets, + by default None + """ + if not os.path.exists(out_dir): + os.makedirs(out_dir, exist_ok=True) + + if dsets is None: + with Resource(h5_file, group=group) as f: + dsets = [dset for dset in f.datasets + if dset not in ['meta', 'time_index']] + elif isinstance(dsets, str): + dsets = [dsets] + + summary = cls(h5_file) + for ds_name in dsets: + out_path = os.path.join(out_dir, + "{}_summary.csv".format(ds_name)) + summary.summarize_dset(ds_name, process_size=process_size, + max_workers=max_workers, out_path=out_path) + + out_path = os.path.basename(h5_file).replace('.h5', '_summary.csv') + out_path = os.path.join(out_dir, out_path) + summary.summarize_means(out_path=out_path)
+ + +
[docs]class SummarizeSupplyCurve: + """ + Summarize Supply Curve table + """ + def __init__(self, sc_table): + self._sc_table = self._parse_summary(sc_table) + + def __repr__(self): + msg = "{}".format(self.__class__.__name__) + + return msg + + @property + def sc_table(self): + """ + Supply Curve table + + Returns + ------- + pd.DataFrame + """ + return self._sc_table + + @staticmethod + def _parse_summary(summary): + """ + Extract summary statistics + + Parameters + ---------- + summary : str | pd.DataFrame + Path to .csv or .json or DataFrame to parse + + Returns + ------- + summary : pandas.DataFrame + DataFrame of summary statistics + """ + try: + summary = parse_table(summary) + except ValueError as ex: + logger.error(ex) + raise + + return summary + +
[docs] def supply_curve_summary(self, columns=None, out_path=None): + """ + Summarize Supply Curve Table + + Parameters + ---------- + sc_table : str | pandas.DataFrame + Supply curve table or .csv containing table + columns : str | list, optional + Column(s) to summarize, if None summarize all numeric columns, + by default None + out_path : str, optional + Path to .csv to save summary to, by default None + + Returns + ------- + sc_summary : pandas.DataFrame + Summary statistics (mean, stdev, median, min, max, sum) for + Supply Curve table columns + """ + sc_table = self.sc_table + if columns is not None: + if isinstance(columns, str): + columns = [columns] + + sc_table = sc_table[columns] + + sc_table = sc_table.select_dtypes(include=np.number) + + sc_summary = [] + sc_stat = sc_table.mean(axis=0) + sc_stat.name = 'mean' + sc_summary.append(sc_stat) + + sc_stat = sc_table.std(axis=0) + sc_stat.name = 'stdev' + sc_summary.append(sc_stat) + + sc_stat = sc_table.median(axis=0) + sc_stat.name = 'median' + sc_summary.append(sc_stat) + + sc_stat = sc_table.min(axis=0) + sc_stat.name = 'min' + sc_summary.append(sc_stat) + + sc_stat = sc_table.max(axis=0) + sc_stat.name = 'max' + sc_summary.append(sc_stat) + + sc_stat = sc_table.sum(axis=0) + sc_stat.name = 'sum' + sc_summary.append(sc_stat) + + sc_summary = pd.concat(sc_summary, axis=1).T + + if out_path is not None: + sc_summary.to_csv(out_path) + + return sc_summary
+ +
[docs] @classmethod + def run(cls, sc_table, out_dir, columns=None): + """ + Summarize Supply Curve Table and save to disk + + Parameters + ---------- + sc_table : str | pandas.DataFrame + Path to .csv containing Supply Curve table + out_dir : str + Directory to dump summary .csv files to + columns : str | list, optional + Column(s) to summarize, if None summarize all numeric columns, + by default None + """ + if not os.path.exists(out_dir): + os.makedirs(out_dir, exist_ok=True) + + summary = cls(sc_table) + out_path = os.path.basename(sc_table).replace('.csv', '_summary.csv') + out_path = os.path.join(out_dir, out_path) + summary.supply_curve_summary(columns=columns, out_path=out_path)
+ + +
[docs]class PlotBase: + """ + QA/QC Plotting base class + """ + def __init__(self, data): + """ + Parameters + ---------- + data : str | pandas.DataFrame | ndarray + data to plot or file containing data to plot + """ + self._data = data + + def __repr__(self): + msg = "{}".format(self.__class__.__name__) + + return msg + + @property + def data(self): + """ + Data to plot + + Returns + ------- + pandas.DataFrame | ndarray + """ + return self._data + + @staticmethod + def _save_plotly(fig, out_path): + """ + Save plotly figure to disk + + Parameters + ---------- + fig : plotly.Figure + Plotly Figure object + out_path : str + File path to save plot to, can be a .html or static image + """ + if out_path.endswith('.html'): + fig.write_html(out_path) + else: + fig.write_image(out_path) + + @staticmethod + def _check_value(df, values, scatter=True): + """ + Check DataFrame for needed columns + + Parameters + ---------- + df : pandas.DataFrame + DataFrame to check + values : str | list + Column(s) to plot + scatter : bool, optional + Flag to check for latitude and longitude columns, by default True + """ + if isinstance(values, str): + values = [values] + + if scatter: + values += ['latitude', 'longitude'] + + for value in values: + if value not in df: + msg = ("{} is not a valid column in summary table:\n{}" + .format(value, df)) + logger.error(msg) + raise ValueError(msg)
+ + +
[docs]class SummaryPlots(PlotBase): + """ + Plot summary data for QA/QC + """ + def __init__(self, summary): + """ + Parameters + ---------- + summary : str | pandas.DataFrame + Summary DataFrame or path to summary .csv + """ + self._data = SummarizeSupplyCurve._parse_summary(summary) + + @property + def summary(self): + """ + Summary table + + Returns + ------- + pandas.DataFrame + """ + return self._data + + @property + def columns(self): + """ + Available columns in summary table + + Returns + ------- + list + """ + return list(self.summary.columns) + +
[docs] def scatter_plot(self, value, cmap='viridis', out_path=None, **kwargs): + """ + Plot scatter plot of value versus longitude and latitude using + pandas.plot.scatter + + Parameters + ---------- + value : str + Column name to plot as color + cmap : str, optional + Matplotlib colormap name, by default 'viridis' + out_path : str, optional + File path to save plot to, by default None + kwargs : dict + Additional kwargs for plotting.dataframes.df_scatter + """ + self._check_value(self.summary, value) + mplt.df_scatter(self.summary, x='longitude', y='latitude', c=value, + colormap=cmap, filename=out_path, **kwargs)
+ +
[docs] def scatter_plotly(self, value, cmap='Viridis', out_path=None, **kwargs): + """ + Plot scatter plot of value versus longitude and latitude using + plotly + + Parameters + ---------- + value : str + Column name to plot as color + cmap : str | px.color, optional + Continuous color scale to use, by default 'Viridis' + out_path : str, optional + File path to save plot to, can be a .html or static image, + by default None + kwargs : dict + Additional kwargs for plotly.express.scatter + """ + self._check_value(self.summary, value) + fig = px.scatter(self.summary, x='longitude', y='latitude', + color=value, color_continuous_scale=cmap, **kwargs) + fig.update_layout(font=dict(family="Arial", size=18, color="black")) + + if out_path is not None: + self._save_plotly(fig, out_path) + + fig.show()
+ + def _extract_sc_data(self, lcoe='mean_lcoe'): + """ + Extract supply curve data + + Parameters + ---------- + lcoe : str, optional + LCOE value to use for supply curve, by default 'mean_lcoe' + + Returns + ------- + sc_df : pandas.DataFrame + Supply curve data + """ + values = ['capacity', lcoe] + self._check_value(self.summary, values, scatter=False) + sc_df = self.summary[values].sort_values(lcoe) + sc_df['cumulative_capacity'] = sc_df['capacity'].cumsum() + + return sc_df + +
[docs] def dist_plot(self, value, out_path=None, **kwargs): + """ + Plot distribution plot of value using seaborn.distplot + + Parameters + ---------- + value : str + Column name to plot + out_path : str, optional + File path to save plot to, by default None + kwargs : dict + Additional kwargs for plotting.dataframes.dist_plot + """ + self._check_value(self.summary, value, scatter=False) + series = self.summary[value] + mplt.dist_plot(series, filename=out_path, **kwargs)
+ +
[docs] def dist_plotly(self, value, out_path=None, **kwargs): + """ + Plot histogram of value using plotly + + Parameters + ---------- + value : str + Column name to plot + out_path : str, optional + File path to save plot to, by default None + kwargs : dict + Additional kwargs for plotly.express.histogram + """ + self._check_value(self.summary, value, scatter=False) + + fig = px.histogram(self.summary, x=value) + + if out_path is not None: + self._save_plotly(fig, out_path, **kwargs) + + fig.show()
+ +
[docs] @classmethod + def scatter(cls, summary_csv, out_dir, value, plot_type='plotly', + cmap='viridis', **kwargs): + """ + Create scatter plot for given value in summary table and save to + out_dir + + Parameters + ---------- + summary_csv : str + Path to .csv file containing summary table + out_dir : str + Output directory to save plots to + value : str + Column name to plot as color + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + cmap : str, optional + Colormap name, by default 'viridis' + kwargs : dict + Additional plotting kwargs + """ + splt = cls(summary_csv) + if plot_type == 'plot': + out_path = os.path.basename(summary_csv).replace('.csv', '.png') + out_path = os.path.join(out_dir, out_path) + splt.scatter_plot(value, cmap=cmap.lower(), out_path=out_path, + **kwargs) + elif plot_type == 'plotly': + out_path = os.path.basename(summary_csv).replace('.csv', '.html') + out_path = os.path.join(out_dir, out_path) + splt.scatter_plotly(value, cmap=cmap.capitalize(), + out_path=out_path, **kwargs) + else: + msg = ("plot_type must be 'plot' or 'plotly' but {} was given" + .format(plot_type)) + logger.error(msg) + raise ValueError(msg)
+ +
[docs] @classmethod + def scatter_all(cls, summary_csv, out_dir, plot_type='plotly', + cmap='viridis', **kwargs): + """ + Create scatter plot for all summary stats in summary table and save to + out_dir + + Parameters + ---------- + summary_csv : str + Path to .csv file containing summary table + out_dir : str + Output directory to save plots to + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + cmap : str, optional + Colormap name, by default 'viridis' + kwargs : dict + Additional plotting kwargs + """ + splt = cls(summary_csv) + splt._data = splt.summary.select_dtypes(include=np.number) + datasets = [c for c in splt.summary.columns + if not c.startswith(('lat', 'lon'))] + + for value in datasets: + if plot_type == 'plot': + out_path = '_{}.png'.format(value) + out_path = \ + os.path.basename(summary_csv).replace('.csv', out_path) + out_path = os.path.join(out_dir, out_path) + splt.scatter_plot(value, cmap=cmap.lower(), out_path=out_path, + **kwargs) + elif plot_type == 'plotly': + out_path = '_{}.html'.format(value) + out_path = \ + os.path.basename(summary_csv).replace('.csv', out_path) + out_path = os.path.join(out_dir, out_path) + splt.scatter_plotly(value, cmap=cmap.capitalize(), + out_path=out_path, **kwargs) + else: + msg = ("plot_type must be 'plot' or 'plotly' but {} was given" + .format(plot_type)) + logger.error(msg) + raise ValueError(msg)
+ + +
[docs]class SupplyCurvePlot(PlotBase): + """ + Plot supply curve data for QA/QC + """ + + def __init__(self, sc_table): + """ + Parameters + ---------- + sc_table : str | pandas.DataFrame + Supply curve table or path to supply curve .csv + """ + self._data = SummarizeSupplyCurve._parse_summary(sc_table) + + @property + def sc_table(self): + """ + Supply curve table + + Returns + ------- + pandas.DataFrame + """ + return self._data + + @property + def columns(self): + """ + Available columns in supply curve table + + Returns + ------- + list + """ + return list(self.sc_table.columns) + + def _extract_sc_data(self, lcoe='mean_lcoe'): + """ + Extract supply curve data + + Parameters + ---------- + lcoe : str, optional + LCOE value to use for supply curve, by default 'mean_lcoe' + + Returns + ------- + sc_df : pandas.DataFrame + Supply curve data + """ + values = ['capacity', lcoe] + self._check_value(self.sc_table, values, scatter=False) + sc_df = self.sc_table[values].sort_values(lcoe) + sc_df['cumulative_capacity'] = sc_df['capacity'].cumsum() + + return sc_df + +
[docs] def supply_curve_plot(self, lcoe='mean_lcoe', out_path=None, **kwargs): + """ + Plot supply curve (cumulative capacity vs lcoe) using seaborn.scatter + + Parameters + ---------- + lcoe : str, optional + LCOE value to plot, by default 'mean_lcoe' + out_path : str, optional + File path to save plot to, by default None + kwargs : dict + Additional kwargs for plotting.dataframes.df_scatter + """ + sc_df = self._extract_sc_data(lcoe=lcoe) + mplt.df_scatter(sc_df, x='cumulative_capacity', y=lcoe, + filename=out_path, **kwargs)
+ +
[docs] def supply_curve_plotly(self, lcoe='mean_lcoe', out_path=None, **kwargs): + """ + Plot supply curve (cumulative capacity vs lcoe) using plotly + + Parameters + ---------- + lcoe : str, optional + LCOE value to plot, by default 'mean_lcoe' + out_path : str, optional + File path to save plot to, can be a .html or static image, + by default None + kwargs : dict + Additional kwargs for plotly.express.scatter + """ + sc_df = self._extract_sc_data(lcoe=lcoe) + fig = px.scatter(sc_df, x='cumulative_capacity', y=lcoe, **kwargs) + fig.update_layout(font=dict(family="Arial", size=18, color="black")) + + if out_path is not None: + self._save_plotly(fig, out_path) + + fig.show()
+ +
[docs] @classmethod + def plot(cls, sc_table, out_dir, plot_type='plotly', lcoe='mean_lcoe', + **kwargs): + """ + Create supply curve plot from supply curve table using lcoe value + and save to out_dir + + Parameters + ---------- + sc_table : str + Path to .csv file containing Supply Curve table + out_dir : str + Output directory to save plots to + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + lcoe : str, optional + LCOE value to plot, by default 'mean_lcoe' + kwargs : dict + Additional plotting kwargs + """ + splt = cls(sc_table) + if plot_type == 'plot': + out_path = os.path.basename(sc_table).replace('.csv', '.png') + out_path = os.path.join(out_dir, out_path) + splt.supply_curve_plot(lcoe=lcoe, out_path=out_path, **kwargs) + elif plot_type == 'plotly': + out_path = os.path.basename(sc_table).replace('.csv', '.html') + out_path = os.path.join(out_dir, out_path) + splt.supply_curve_plotly(lcoe=lcoe, out_path=out_path, **kwargs) + else: + msg = ("plot_type must be 'plot' or 'plotly' but {} was given" + .format(plot_type)) + logger.error(msg) + raise ValueError(msg)
+ + +
[docs]class ExclusionsMask(PlotBase): + """ + Plot Exclusions mask as a heat map data for QA/QC + """ + + def __init__(self, excl_mask): + """ + Parameters + ---------- + excl_mask : str | ndarray + Exclusions mask or path to .npy file containing final mask + """ + self._data = self._parse_mask(excl_mask) + + @property + def mask(self): + """ + Final Exclusions mask + + Returns + ------- + ndarray + """ + return self._data + + @staticmethod + def _parse_mask(excl_mask): + """ + Load exclusions mask if needed + + Parameters + ---------- + excl_mask : str | ndarray + Exclusions mask or path to .npy file containing final mask + + Returns + ------- + excl_mask : ndarray + [n, m] array of final exclusion values + """ + if isinstance(excl_mask, str): + excl_mask = np.load(excl_mask) + elif not isinstance(excl_mask, np.ndarray): + raise ValueError("excl_mask must be a .npy file or an ndarray") + + return excl_mask + +
[docs] def exclusions_plot(self, cmap='Viridis', plot_step=100, out_path=None, + **kwargs): + """ + Plot exclusions mask as a seaborn heatmap + + Parameters + ---------- + cmap : str | px.color, optional + Continuous color scale to use, by default 'Viridis' + plot_step : int + Step between points to plot + out_path : str, optional + File path to save plot to, can be a .html or static image, + by default None + kwargs : dict + Additional kwargs for plotting.colormaps.heatmap_plot + """ + mplt.heatmap_plot(self.mask[::plot_step, ::plot_step], cmap=cmap, + filename=out_path, **kwargs)
+ +
[docs] def exclusions_plotly(self, cmap='Viridis', plot_step=100, out_path=None, + **kwargs): + """ + Plot exclusions mask as a plotly heatmap + + Parameters + ---------- + cmap : str | px.color, optional + Continuous color scale to use, by default 'Viridis' + plot_step : int + Step between points to plot + out_path : str, optional + File path to save plot to, can be a .html or static image, + by default None + kwargs : dict + Additional kwargs for plotly.express.imshow + """ + fig = px.imshow(self.mask[::plot_step, ::plot_step], + color_continuous_scale=cmap, **kwargs) + fig.update_layout(font=dict(family="Arial", size=18, color="black")) + + if out_path is not None: + SummaryPlots._save_plotly(fig, out_path) + + fig.show()
+ +
[docs] @classmethod + def plot(cls, mask, out_dir, plot_type='plotly', cmap='Viridis', + plot_step=100, **kwargs): + """ + Plot exclusions mask and save to out_dir + + Parameters + ---------- + mask : ndarray + ndarray of final exclusions mask + out_dir : str + Output directory to save plots to + plot_type : str, optional + plot_type of plot to create 'plot' or 'plotly', by default 'plotly' + cmap : str, optional + Colormap name, by default 'viridis' + plot_step : int + Step between points to plot + kwargs : dict + Additional plotting kwargs + """ + excl_mask = cls(mask) + if plot_type == 'plot': + out_path = 'exclusions_mask.png' + out_path = os.path.join(out_dir, out_path) + excl_mask.exclusions_plot(cmap=cmap.lower(), + plot_step=plot_step, + out_path=out_path, + **kwargs) + elif plot_type == 'plotly': + out_path = 'exclusions_mask.html' + out_path = os.path.join(out_dir, out_path) + excl_mask.exclusions_plotly(cmap=cmap.capitalize(), + plot_step=plot_step, + out_path=out_path, + **kwargs) + else: + msg = ("plot_type must be 'plot' or 'plotly' but {} was given" + .format(plot_type)) + logger.error(msg) + raise ValueError(msg)
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/rep_profiles/rep_profiles.html b/_modules/reV/rep_profiles/rep_profiles.html new file mode 100644 index 000000000..378e16750 --- /dev/null +++ b/_modules/reV/rep_profiles/rep_profiles.html @@ -0,0 +1,1836 @@ + + + + + + reV.rep_profiles.rep_profiles — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for reV.rep_profiles.rep_profiles

+# -*- coding: utf-8 -*-
+"""Representative profile extraction utilities.
+
+Created on Thu Oct 31 12:49:23 2019
+
+@author: gbuster
+"""
+from abc import ABC, abstractmethod
+from concurrent.futures import as_completed
+from copy import deepcopy
+import json
+import logging
+import numpy as np
+import os
+import pandas as pd
+from scipy import stats
+from warnings import warn
+
+
+from reV.handlers.outputs import Outputs
+from reV.utilities.exceptions import FileInputError, DataShapeError
+from reV.utilities import log_versions
+
+from rex.resource import Resource
+from rex.utilities.execution import SpawnProcessPool
+from rex.utilities.loggers import log_mem
+from rex.utilities.utilities import parse_year, to_records_array
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class RepresentativeMethods: + """Class for organizing the methods to determine representative-ness""" + + def __init__(self, profiles, weights=None, rep_method='meanoid', + err_method='rmse'): + """ + Parameters + ---------- + profiles : np.ndarray + (time, sites) timeseries array of cf profile data. + weights : np.ndarray | list + 1D array of weighting factors (multiplicative) for profiles. + rep_method : str + Method identifier for calculation of the representative profile. + err_method : str | None + Method identifier for calculation of error from the representative + profile (e.g. "rmse", "mae", "mbe"). If this is None, the + representative meanoid / medianoid profile will be returned + directly + """ + self._rep_method = self.rep_methods[rep_method] + self._err_method = self.err_methods[err_method] + self._profiles = profiles + self._weights = weights + self._parse_weights() + + def _parse_weights(self): + """Parse the weights attribute. Check shape and make np.array.""" + if isinstance(self._weights, (list, tuple)): + self._weights = np.array(self._weights) + + if self._weights is not None: + emsg = ('Weighting factors array of length {} does not match ' + 'profiles of shape {}' + .format(len(self._weights), self._profiles.shape[1])) + assert len(self._weights) == self._profiles.shape[1], emsg + + @property + def rep_methods(self): + """Lookup table of representative methods""" + methods = {'mean': self.meanoid, + 'meanoid': self.meanoid, + 'median': self.medianoid, + 'medianoid': self.medianoid, + } + + return methods + + @property + def err_methods(self): + """Lookup table of error methods""" + methods = {'mbe': self.mbe, + 'mae': self.mae, + 'rmse': self.rmse, + None: None, + } + + return methods + +
[docs] @staticmethod + def nargmin(arr, n): + """Get the index of the Nth min value in arr. + + Parameters + ---------- + arr : np.ndarray + 1D array. + n : int + If n is 0, this returns the location of the min value in arr. + If n is 1, this returns the location of the 2nd min value in arr. + + Returns + ------- + i : int + Location of the Nth min value in arr. + """ + return arr.argsort()[:(n + 1)][-1]
+ +
[docs] @staticmethod + def meanoid(profiles, weights=None): + """Find the mean profile across all sites. + + Parameters + ---------- + profiles : np.ndarray + (time, sites) timeseries array of cf profile data. + weights : np.ndarray | list + 1D array of weighting factors (multiplicative) for profiles. + + Returns + ------- + arr : np.ndarray + (time, 1) timeseries of the mean of all cf profiles across sites. + """ + if weights is None: + arr = profiles.mean(axis=1).reshape((len(profiles), 1)) + else: + if not isinstance(weights, np.ndarray): + weights = np.array(weights) + + arr = (profiles * weights).sum(axis=1) / weights.sum() + if len(arr.shape) == 1: + arr = np.expand_dims(arr, axis=1) + + return arr
+ +
[docs] @staticmethod + def medianoid(profiles): + """Find the median profile across all sites. + + Parameters + ---------- + profiles : np.ndarray + (time, sites) timeseries array of cf profile data. + + Returns + ------- + arr : np.ndarray + (time, 1) timeseries of the median at every timestep of all + cf profiles across sites. + """ + arr = np.median(profiles, axis=1) + arr = arr.reshape((len(profiles), 1)) + + return arr
+ +
[docs] @classmethod + def mbe(cls, profiles, baseline, i_profile=0): + """Calculate the mean bias error of profiles vs. a baseline profile. + + Parameters + ---------- + profiles : np.ndarray + (time, sites) timeseries array of cf profile data. + baseline : np.ndarray + (time, 1) timeseries of the meanoid or medianoid to which + cf profiles should be compared. + i_profile : int + The index of the represntative profile being saved + (for n_profiles). 0 is the most representative profile. + + Returns + ------- + profile : np.ndarray + (time, 1) array for the most representative profile + i_rep : int + Column Index in profiles of the representative profile. + """ + diff = profiles - baseline.reshape((len(baseline), 1)) + mbe = diff.mean(axis=0) + i_rep = cls.nargmin(mbe, i_profile) + + return profiles[:, i_rep], i_rep
+ +
[docs] @classmethod + def mae(cls, profiles, baseline, i_profile=0): + """Calculate the mean absolute error of profiles vs. a baseline profile + + Parameters + ---------- + profiles : np.ndarray + (time, sites) timeseries array of cf profile data. + baseline : np.ndarray + (time, 1) timeseries of the meanoid or medianoid to which + cf profiles should be compared. + i_profile : int + The index of the represntative profile being saved + (for n_profiles). 0 is the most representative profile. + + Returns + ------- + profile : np.ndarray + (time, 1) array for the most representative profile + i_rep : int + Column Index in profiles of the representative profile. + """ + diff = profiles - baseline.reshape((len(baseline), 1)) + mae = np.abs(diff).mean(axis=0) + i_rep = cls.nargmin(mae, i_profile) + + return profiles[:, i_rep], i_rep
+ +
[docs] @classmethod + def rmse(cls, profiles, baseline, i_profile=0): + """Calculate the RMSE of profiles vs. a baseline profile + + Parameters + ---------- + profiles : np.ndarray + (time, sites) timeseries array of cf profile data. + baseline : np.ndarray + (time, 1) timeseries of the meanoid or medianoid to which + cf profiles should be compared. + i_profile : int + The index of the represntative profile being saved + (for n_profiles). 0 is the most representative profile. + + Returns + ------- + profile : np.ndarray + (time, 1) array for the most representative profile + i_rep : int + Column Index in profiles of the representative profile. + """ + rmse = profiles - baseline.reshape((len(baseline), 1)) + rmse **= 2 + rmse = np.sqrt(np.mean(rmse, axis=0)) + i_rep = cls.nargmin(rmse, i_profile) + + return profiles[:, i_rep], i_rep
+ +
[docs] @classmethod + def run(cls, profiles, weights=None, rep_method='meanoid', + err_method='rmse', n_profiles=1): + """Run representative profile methods. + + Parameters + ---------- + profiles : np.ndarray + (time, sites) timeseries array of cf profile data. + weights : np.ndarray | list + 1D array of weighting factors (multiplicative) for profiles. + rep_method : str + Method identifier for calculation of the representative profile. + err_method : str | None + Method identifier for calculation of error from the representative + profile (e.g. "rmse", "mae", "mbe"). If this is None, the + representative meanoid / medianoid profile will be returned + directly. + n_profiles : int + Number of representative profiles to save to fout. + + Returns + ------- + profiles : np.ndarray + (time, n_profiles) array for the most representative profile(s) + i_reps : list | None + List (length of n_profiles) with column Index in profiles of the + representative profile(s). If err_method is None, this value is + also set to None. + """ + inst = cls(profiles, weights=weights, rep_method=rep_method, + err_method=err_method) + + if inst._weights is not None: + baseline = inst._rep_method(inst._profiles, weights=inst._weights) + else: + baseline = inst._rep_method(inst._profiles) + + if err_method is None: + profiles = baseline + i_reps = [None] + + else: + profiles = None + i_reps = [] + for i in range(n_profiles): + p, ir = inst._err_method(inst._profiles, baseline, i_profile=i) + if profiles is None: + profiles = np.zeros((len(p), n_profiles), dtype=p.dtype) + + profiles[:, i] = p + i_reps.append(ir) + + return profiles, i_reps
+ + +
[docs]class RegionRepProfile: + """Framework to handle rep profile for one resource region""" + + RES_GID_COL = 'res_gids' + GEN_GID_COL = 'gen_gids' + + def __init__(self, gen_fpath, rev_summary, cf_dset='cf_profile', + rep_method='meanoid', err_method='rmse', weight='gid_counts', + n_profiles=1): + """ + Parameters + ---------- + gen_fpath : str + Filepath to reV gen output file to extract "cf_profile" from. + rev_summary : pd.DataFrame + Aggregated rev supply curve summary file trimmed to just one + region to get a rep profile for. + Must include "res_gids", "gen_gids", and the "weight" column (if + weight is not None) + cf_dset : str + Dataset name to pull generation profiles from. + rep_method : str + Method identifier for calculation of the representative profile. + err_method : str | None + Method identifier for calculation of error from the representative + profile (e.g. "rmse", "mae", "mbe"). If this is None, the + representative meanoid / medianoid profile will be returned + directly + weight : str | None + Column in rev_summary used to apply weighted mean to profiles. + The supply curve table data in the weight column should have + weight values corresponding to the res_gids in the same row. + n_profiles : int + Number of representative profiles to retrieve. + """ + + self._gen_fpath = gen_fpath + self._rev_summary = rev_summary + self._cf_dset = cf_dset + self._profiles = None + self._source_profiles = None + self._weights = None + self._i_reps = None + self._rep_method = rep_method + self._err_method = err_method + self._weight = weight + self._n_profiles = n_profiles + self._gen_gids = None + self._res_gids = None + + self._init_profiles_weights() + + def _init_profiles_weights(self): + """Initialize the base source profiles and weight arrays""" + gen_gids = self._get_region_attr(self._rev_summary, self.GEN_GID_COL) + res_gids = self._get_region_attr(self._rev_summary, self.RES_GID_COL) + + self._weights = np.ones(len(res_gids)) + if self._weight is not None: + self._weights = self._get_region_attr(self._rev_summary, + self._weight) + + df = pd.DataFrame({self.GEN_GID_COL: gen_gids, + self.RES_GID_COL: res_gids, + 'weights': self._weights}) + df = df.sort_values(self.RES_GID_COL) + self._gen_gids = df[self.GEN_GID_COL].values + self._res_gids = df[self.RES_GID_COL].values + if self._weight is not None: + self._weights = df['weights'].values + else: + self._weights = None + + with Resource(self._gen_fpath) as res: + meta = res.meta + + assert 'gid' in meta + source_res_gids = meta['gid'].values + msg = ('Resource gids from "gid" column in meta data from "{}" ' + 'must be sorted! reV generation should always be run with ' + 'sequential project points.'.format(self._gen_fpath)) + assert np.all(source_res_gids[:-1] <= source_res_gids[1:]), msg + + missing = set(self._res_gids) - set(source_res_gids) + msg = ('The following resource gids were found in the rev summary ' + 'supply curve file but not in the source generation meta ' + 'data: {}'.format(missing)) + assert not any(missing), msg + + unique_res_gids, u_idxs = np.unique(self._res_gids, + return_inverse=True) + iloc = np.where(np.isin(source_res_gids, unique_res_gids))[0] + self._source_profiles = res[self._cf_dset, :, iloc[u_idxs]] + + @property + def source_profiles(self): + """Retrieve the cf profile array from the source generation h5 file. + + Returns + ------- + profiles : np.ndarray + Timeseries array of cf profile data. + """ + return self._source_profiles + + @property + def weights(self): + """Get the weights array + + Returns + ------- + weights : np.ndarray | None + Flat array of weight values from the weight column. The supply + curve table data in the weight column should have a list of weight + values corresponding to the gen_gids list in the same row. + """ + return self._weights + + @staticmethod + def _get_region_attr(rev_summary, attr_name): + """Retrieve a flat list of attribute data from a col in rev summary. + + Parameters + ---------- + rev_summary : pd.DataFrame + Aggregated rev supply curve summary file trimmed to just one + region to get a rep profile for. + Must include "res_gids", "gen_gids", and the "weight" column (if + weight is not None) + attr_name : str + Column label to extract flattened data from (gen_gids, + gid_counts, etc...) + + Returns + ------- + data : list + Flat list of data from the column with label "attr_name". + Either a list of numbers or strings. Lists of jsonified lists + will be unpacked. + """ + data = rev_summary[attr_name].values.tolist() + + if any(data): + if isinstance(data[0], str): + # pylint: disable=simplifiable-condition + if ('[' and ']' in data[0]) or ('(' and ')' in data[0]): + data = [json.loads(s) for s in data] + + if isinstance(data[0], (list, tuple)): + data = [a for b in data for a in b] + + return data + + def _run_rep_methods(self): + """Run the representative profile methods to find the meanoid/medianoid + profile and find the profiles most similar.""" + + if self.weights is not None: + if len(self.weights) != self.source_profiles.shape[1]: + e = ('Weights column "{}" resulted in {} weight scalars ' + 'which doesnt match gid column which yields ' + 'profiles with shape {}.' + .format(self._weight, len(self.weights), + self.source_profiles.shape)) + logger.debug('Gids from column "res_gids" with len {}: {}' + .format(len(self._res_gids), self._res_gids)) + logger.debug('Weights from column "{}" with len {}: {}' + .format(self._weight, len(self.weights), + self.weights)) + logger.error(e) + raise DataShapeError(e) + + self._profiles, self._i_reps = RepresentativeMethods.run( + self.source_profiles, weights=self.weights, + rep_method=self._rep_method, err_method=self._err_method, + n_profiles=self._n_profiles) + + @property + def rep_profiles(self): + """Get the representative profiles of this region.""" + if self._profiles is None: + self._run_rep_methods() + + return self._profiles + + @property + def i_reps(self): + """Get the representative profile index(es) of this region.""" + if self._i_reps is None: + self._run_rep_methods() + + return self._i_reps + + @property + def rep_gen_gids(self): + """Get the representative profile gen gids of this region.""" + gids = self._gen_gids + if self.i_reps[0] is None: + rep_gids = None + else: + rep_gids = [gids[i] for i in self.i_reps] + + return rep_gids + + @property + def rep_res_gids(self): + """Get the representative profile resource gids of this region.""" + gids = self._res_gids + if self.i_reps[0] is None or gids is None: + rep_gids = [None] + else: + rep_gids = [gids[i] for i in self.i_reps] + + return rep_gids + +
[docs] @classmethod + def get_region_rep_profile(cls, gen_fpath, rev_summary, + cf_dset='cf_profile', rep_method='meanoid', + err_method='rmse', weight='gid_counts', + n_profiles=1): + """Class method for parallelization of rep profile calc. + + Parameters + ---------- + gen_fpath : str + Filepath to reV gen output file to extract "cf_profile" from. + rev_summary : pd.DataFrame + Aggregated rev supply curve summary file trimmed to just one + region to get a rep profile for. + Must include "res_gids", "gen_gids", and the "weight" column (if + weight is not None) + cf_dset : str + Dataset name to pull generation profiles from. + rep_method : str + Method identifier for calculation of the representative profile. + err_method : str | None + Method identifier for calculation of error from the representative + profile (e.g. "rmse", "mae", "mbe"). If this is None, the + representative meanoid / medianoid profile will be returned + directly + weight : str | None + Column in rev_summary used to apply weighted mean to profiles. + The supply curve table data in the weight column should have + weight values corresponding to the res_gids in the same row. + n_profiles : int + Number of representative profiles to retrieve. + + Returns + ------- + rep_profile : np.ndarray + (time, n_profiles) array for the most representative profile(s) + i_rep : list + Column Index in profiles of the representative profile(s). + gen_gid_reps : list + Generation gid(s) of the representative profile(s). + res_gid_reps : list + Resource gid(s) of the representative profile(s). + """ + r = cls(gen_fpath, rev_summary, cf_dset=cf_dset, + rep_method=rep_method, err_method=err_method, weight=weight, + n_profiles=n_profiles) + + return r.rep_profiles, r.i_reps, r.rep_gen_gids, r.rep_res_gids
+ + +
[docs]class RepProfilesBase(ABC): + """Abstract utility framework for representative profile run classes.""" + + def __init__(self, gen_fpath, rev_summary, reg_cols=None, + cf_dset='cf_profile', rep_method='meanoid', err_method='rmse', + weight='gid_counts', n_profiles=1): + """ + Parameters + ---------- + gen_fpath : str + Filepath to reV gen output file to extract "cf_profile" from. + rev_summary : str | pd.DataFrame + Aggregated rev supply curve summary file. Str filepath or full df. + Must include "res_gids", "gen_gids", and the "weight" column (if + weight is not None) + reg_cols : str | list | None + Label(s) for a categorical region column(s) to extract profiles + for. e.g. "state" will extract a rep profile for each unique entry + in the "state" column in rev_summary. + cf_dset : str + Dataset name to pull generation profiles from. + rep_method : str + Method identifier for calculation of the representative profile. + err_method : str | None + Method identifier for calculation of error from the representative + profile (e.g. "rmse", "mae", "mbe"). If this is None, the + representative meanoid / medianoid profile will be returned + directly + weight : str | None + Column in rev_summary used to apply weighted mean to profiles. + The supply curve table data in the weight column should have + weight values corresponding to the res_gids in the same row. + n_profiles : int + Number of representative profiles to save to fout. + """ + + logger.info('Running rep profiles with gen_fpath: "{}"' + .format(gen_fpath)) + logger.info('Running rep profiles with rev_summary: "{}"' + .format(rev_summary)) + logger.info('Running rep profiles with region columns: "{}"' + .format(reg_cols)) + logger.info('Running rep profiles with representative method: "{}"' + .format(rep_method)) + logger.info('Running rep profiles with error method: "{}"' + .format(err_method)) + logger.info('Running rep profiles with weight factor: "{}"' + .format(weight)) + + self._weight = weight + self._n_profiles = n_profiles + self._cf_dset = cf_dset + self._gen_fpath = gen_fpath + self._reg_cols = reg_cols + + self._rev_summary = self._parse_rev_summary(rev_summary) + + self._check_req_cols(self._rev_summary, self._reg_cols) + self._check_req_cols(self._rev_summary, self._weight) + self._check_req_cols(self._rev_summary, RegionRepProfile.RES_GID_COL) + self._check_req_cols(self._rev_summary, RegionRepProfile.GEN_GID_COL) + + self._check_rev_gen(gen_fpath, cf_dset, self._rev_summary) + self._time_index = None + self._meta = None + self._profiles = None + self._rep_method = rep_method + self._err_method = err_method + + @staticmethod + def _parse_rev_summary(rev_summary): + """Extract, parse, and check the rev summary table. + + Parameters + ---------- + rev_summary : str | pd.DataFrame + Aggregated rev supply curve summary file. Str filepath or full df. + Must include "res_gids", "gen_gids", and the "weight" column (if + weight is not None) + + Returns + ------- + rev_summary : pd.DataFrame + Aggregated rev supply curve summary file. Full df. + Must include "res_gids", "gen_gids", and the "weight" column (if + weight is not None) + """ + + if isinstance(rev_summary, str): + if os.path.exists(rev_summary) and rev_summary.endswith('.csv'): + rev_summary = pd.read_csv(rev_summary) + elif os.path.exists(rev_summary) and rev_summary.endswith('.json'): + rev_summary = pd.read_json(rev_summary) + else: + e = 'Could not parse reV summary file: {}'.format(rev_summary) + logger.error(e) + raise FileInputError(e) + elif not isinstance(rev_summary, pd.DataFrame): + e = ('Bad input dtype for rev_summary input: {}' + .format(type(rev_summary))) + logger.error(e) + raise TypeError(e) + + return rev_summary + + @staticmethod + def _check_req_cols(df, cols): + """Check a dataframe for required columns. + + Parameters + ---------- + df : pd.DataFrame + Dataframe to check columns. + cols : str | list | tuple + Required columns in df. + """ + if cols is not None: + if isinstance(cols, str): + cols = [cols] + + missing = [] + for c in cols: + if c not in df: + missing.append(c) + + if any(missing): + e = ('Column labels not found in rev_summary table: {}' + .format(missing)) + logger.error(e) + raise KeyError(e) + + @staticmethod + def _check_rev_gen(gen_fpath, cf_dset, rev_summary): + """Check rev gen file for requisite datasets. + + Parameters + ---------- + gen_fpath : str + Filepath to reV gen output file to extract "cf_profile" from. + cf_dset : str + Dataset name to pull generation profiles from. + rev_summary : pd.DataFrame + Aggregated rev supply curve summary file. Full df. + Must include "res_gids", "gen_gids", and the "weight" column (if + weight is not None) + """ + with Resource(gen_fpath) as res: + dsets = res.datasets + if cf_dset not in dsets: + raise KeyError('reV gen file needs to have "{}" ' + 'dataset to calculate representative profiles!' + .format(cf_dset)) + + if 'time_index' not in str(dsets): + raise KeyError('reV gen file needs to have "time_index" ' + 'dataset to calculate representative profiles!') + + shape = res.get_dset_properties(cf_dset)[0] + + if len(rev_summary) > shape[1]: + msg = ('WARNING: reV SC summary table has {} sc points and CF ' + 'dataset "{}" has {} profiles. There should never be more ' + 'SC points than CF profiles.' + .format(len(rev_summary), cf_dset, shape[1])) + logger.warning(msg) + warn(msg) + + def _init_profiles(self): + """Initialize the output rep profiles attribute.""" + self._profiles = {k: np.zeros((len(self.time_index), + len(self.meta)), + dtype=np.float32) + for k in range(self._n_profiles)} + + @property + def time_index(self): + """Get the time index for the rep profiles. + + Returns + ------- + time_index : pd.datetimeindex + Time index sourced from the reV gen file. + """ + if self._time_index is None: + with Resource(self._gen_fpath) as res: + ds = 'time_index' + if parse_year(self._cf_dset, option='bool'): + year = parse_year(self._cf_dset, option='raise') + ds += '-{}'.format(year) + + self._time_index = res._get_time_index(ds, slice(None)) + + return self._time_index + + @property + def meta(self): + """Meta data for the representative profiles. + + Returns + ------- + meta : pd.DataFrame + Meta data for the representative profiles. At the very least, + this has columns for the region and res class. + """ + return self._meta + + @property + def profiles(self): + """Get the arrays of representative CF profiles corresponding to meta. + + Returns + ------- + profiles : dict + dict of n_profile-keyed arrays with shape (time, n) for the + representative profiles for each region. + """ + return self._profiles + + def _init_h5_out(self, fout, save_rev_summary=True, + scaled_precision=False): + """Initialize an output h5 file for n_profiles + + Parameters + ---------- + fout : str + None or filepath to output h5 file. + save_rev_summary : bool + Flag to save full reV SC table to rep profile output. + scaled_precision : bool + Flag to scale cf_profiles by 1000 and save as uint16. + """ + dsets = [] + shapes = {} + attrs = {} + chunks = {} + dtypes = {} + + for i in range(self._n_profiles): + dset = 'rep_profiles_{}'.format(i) + dsets.append(dset) + shapes[dset] = self.profiles[0].shape + chunks[dset] = None + + if scaled_precision: + attrs[dset] = {'scale_factor': 1000} + dtypes[dset] = np.uint16 + else: + attrs[dset] = None + dtypes[dset] = self.profiles[0].dtype + + meta = self.meta.copy() + for c in meta.columns: + try: + meta[c] = pd.to_numeric(meta[c]) + except ValueError: + pass + + Outputs.init_h5(fout, dsets, shapes, attrs, chunks, dtypes, + meta, time_index=self.time_index) + + if save_rev_summary: + with Outputs(fout, mode='a') as out: + rev_sum = to_records_array(self._rev_summary) + out._create_dset('rev_summary', rev_sum.shape, + rev_sum.dtype, data=rev_sum) + + def _write_h5_out(self, fout, save_rev_summary=True): + """Write profiles and meta to an output file. + + Parameters + ---------- + fout : str + None or filepath to output h5 file. + save_rev_summary : bool + Flag to save full reV SC table to rep profile output. + scaled_precision : bool + Flag to scale cf_profiles by 1000 and save as uint16. + """ + with Outputs(fout, mode='a') as out: + + if 'rev_summary' in out.datasets and save_rev_summary: + rev_sum = to_records_array(self._rev_summary) + out['rev_summary'] = rev_sum + + for i in range(self._n_profiles): + dset = 'rep_profiles_{}'.format(i) + out[dset] = self.profiles[i] + +
[docs] def save_profiles(self, fout, save_rev_summary=True, + scaled_precision=False): + """Initialize fout and save profiles. + + Parameters + ---------- + fout : str + None or filepath to output h5 file. + save_rev_summary : bool + Flag to save full reV SC table to rep profile output. + scaled_precision : bool + Flag to scale cf_profiles by 1000 and save as uint16. + """ + + self._init_h5_out(fout, save_rev_summary=save_rev_summary, + scaled_precision=scaled_precision) + self._write_h5_out(fout, save_rev_summary=save_rev_summary)
+ + @abstractmethod + def _run_serial(self): + """Abstract method for serial run method.""" + + @abstractmethod + def _run_parallel(self): + """Abstract method for parallel run method.""" + +
[docs] @abstractmethod + def run(self): + """Abstract method for generic run method."""
+ + +
[docs]class RepProfiles(RepProfilesBase): + """RepProfiles""" + + def __init__(self, gen_fpath, rev_summary, reg_cols, cf_dset='cf_profile', + rep_method='meanoid', err_method='rmse', weight='gid_counts', + n_profiles=1, aggregate_profiles=False): + """reV rep profiles class. + + ``reV`` rep profiles compute representative generation profiles + for each supply curve point output by ``reV`` supply curve + aggregation. Representative profiles can either be a spatial + aggregation of generation profiles or actual generation profiles + that most closely resemble an aggregated profile (selected based + on an error metric). + + Parameters + ---------- + gen_fpath : str + Filepath to ``reV`` generation output HDF5 file to extract + `cf_dset` dataset from. + + .. Note:: If executing ``reV`` from the command line, this + path can contain brackets ``{}`` that will be filled in by + the `analysis_years` input. Alternatively, this input can + be set to ``"PIPELINE"``, which will parse this input from + one of these preceding pipeline steps: ``multi-year``, + ``collect``, ``generation``, or + ``supply-curve-aggregation``. However, note that duplicate + executions of any of these commands within the pipeline + may invalidate this parsing, meaning the `gen_fpath` input + will have to be specified manually. + + rev_summary : str | pd.DataFrame + Aggregated ``reV`` supply curve summary file. Must include + the following columns: + + - ``res_gids`` : string representation of python list + containing the resource GID values corresponding to + each supply curve point. + - ``gen_gids`` : string representation of python list + containing the ``reV`` generation GID values + corresponding to each supply curve point. + - weight column (name based on `weight` input) : string + representation of python list containing the resource + GID weights for each supply curve point. + + .. Note:: If executing ``reV`` from the command line, this + input can be set to ``"PIPELINE"``, which will parse this + input from one of these preceding pipeline steps: + ``supply-curve-aggregation`` or ``supply-curve``. + However, note that duplicate executions of any of these + commands within the pipeline may invalidate this parsing, + meaning the `rev_summary` input will have to be specified + manually. + + reg_cols : str | list + Label(s) for a categorical region column(s) to extract + profiles for. For example, ``"state"`` will extract a rep + profile for each unique entry in the ``"state"`` column in + `rev_summary`. To get a profile for each supply curve point, + try setting `reg_cols` to a primary key such as + ``"sc_gid"``. + cf_dset : str, optional + Dataset name to pull generation profiles from. This dataset + must be present in the `gen_fpath` HDF5 file. By default, + ``"cf_profile"`` + + .. Note:: If executing ``reV`` from the command line, this + name can contain brackets ``{}`` that will be filled in by + the `analysis_years` input (e.g. ``"cf_profile-{}"``). + + rep_method : {'mean', 'meanoid', 'median', 'medianoid'}, optional + Method identifier for calculation of the representative + profile. By default, ``'meanoid'`` + err_method : {'mbe', 'mae', 'rmse'}, optional + Method identifier for calculation of error from the + representative profile. If this input is ``None``, the + representative meanoid / medianoid profile will be returned + directly. By default, ``'rmse'``. + weight : str, optional + Column in `rev_summary` used to apply weights when computing + mean profiles. The supply curve table data in the weight + column should have weight values corresponding to the + `res_gids` in the same row (i.e. string representation of + python list containing weight values). + + .. Important:: You'll often want to set this value to + something other than ``None`` (typically ``"gid_counts"`` + if running on standard ``reV`` outputs). Otherwise, the + unique generation profiles within each supply curve point + are weighted equally. For example, if you have a 64x64 + supply curve point, and one generation profile takes up + 4095 (99.98%) 90m cells while a second generation profile + takes up only one 90m cell (0.02%), they will contribute + *equally* to the meanoid profile unless these weights are + specified. + + By default, ``'gid_counts'``. + n_profiles : int, optional + Number of representative profiles to save to the output + file. By default, ``1``. + aggregate_profiles : bool, optional + Flag to calculate the aggregate (weighted meanoid) profile + for each supply curve point. This behavior is in lieu of + finding the single profile per region closest to the + meanoid. If you set this flag to ``True``, the `rep_method`, + `err_method`, and `n_profiles` inputs will be forcibly set + to the default values. By default, ``False``. + """ + + log_versions(logger) + logger.info('Finding representative profiles that are most similar ' + 'to the weighted meanoid for each supply curve region.') + + if reg_cols is None: + e = ('Need to define "reg_cols"! If you want a profile for each ' + 'supply curve point, try setting "reg_cols" to a primary ' + 'key such as "sc_gid".') + logger.error(e) + raise ValueError(e) + elif isinstance(reg_cols, str): + reg_cols = [reg_cols] + elif not isinstance(reg_cols, list): + reg_cols = list(reg_cols) + + self._aggregate_profiles = aggregate_profiles + if self._aggregate_profiles: + logger.info("Aggregate profiles input set to `True`. Setting " + "'rep_method' to `'meanoid'`, 'err_method' to `None`, " + "and 'n_profiles' to `1`") + rep_method = 'meanoid' + err_method = None + n_profiles = 1 + + super().__init__(gen_fpath, rev_summary, reg_cols=reg_cols, + cf_dset=cf_dset, + rep_method=rep_method, err_method=err_method, + weight=weight, n_profiles=n_profiles) + + self._set_meta() + self._init_profiles() + + def _set_meta(self): + """Set the rep profile meta data with each row being a unique + combination of the region columns.""" + if self._err_method is None: + self._meta = self._rev_summary + else: + self._meta = self._rev_summary.groupby(self._reg_cols) + self._meta = ( + self._meta['timezone'] + .apply(lambda x: stats.mode(x, keepdims=True).mode[0]) + ) + self._meta = self._meta.reset_index() + + self._meta['rep_gen_gid'] = None + self._meta['rep_res_gid'] = None + + def _get_mask(self, region_dict): + """Get the mask for a given region and res class. + + Parameters + ---------- + region_dict : dict + Column-value pairs to filter the rev summary on. + + Returns + ------- + mask : np.ndarray + Boolean mask to filter rev_summary to the appropriate + region_dict values. + """ + mask = None + for k, v in region_dict.items(): + temp = (self._rev_summary[k] == v) + if mask is None: + mask = temp + else: + mask = (mask & temp) + + return mask + + def _run_serial(self): + """Compute all representative profiles in serial.""" + + logger.info('Running {} rep profile calculations in serial.' + .format(len(self.meta))) + meta_static = deepcopy(self.meta) + for i, row in meta_static.iterrows(): + region_dict = {k: v for (k, v) in row.to_dict().items() + if k in self._reg_cols} + mask = self._get_mask(region_dict) + + if not any(mask): + logger.warning('Skipping profile {} out of {} ' + 'for region: {} with no valid mask.' + .format(i + 1, len(meta_static), region_dict)) + else: + logger.debug('Working on profile {} out of {} for region: {}' + .format(i + 1, len(meta_static), region_dict)) + out = RegionRepProfile.get_region_rep_profile( + self._gen_fpath, self._rev_summary[mask], + cf_dset=self._cf_dset, rep_method=self._rep_method, + err_method=self._err_method, weight=self._weight, + n_profiles=self._n_profiles) + profiles, _, ggids, rgids = out + logger.info('Profile {} out of {} complete ' + 'for region: {}' + .format(i + 1, len(meta_static), region_dict)) + + for n in range(profiles.shape[1]): + self._profiles[n][:, i] = profiles[:, n] + + if ggids is None: + self._meta.at[i, 'rep_gen_gid'] = None + self._meta.at[i, 'rep_res_gid'] = None + elif len(ggids) == 1: + self._meta.at[i, 'rep_gen_gid'] = ggids[0] + self._meta.at[i, 'rep_res_gid'] = rgids[0] + else: + self._meta.at[i, 'rep_gen_gid'] = str(ggids) + self._meta.at[i, 'rep_res_gid'] = str(rgids) + + def _run_parallel(self, max_workers=None, pool_size=72): + """Compute all representative profiles in parallel. + + Parameters + ---------- + max_workers : int | None + Number of parallel workers. 1 will run serial, None will use all + available. + pool_size : int + Number of futures to submit to a single process pool for + parallel futures. + """ + + logger.info('Kicking off {} rep profile futures.' + .format(len(self.meta))) + + iter_chunks = np.array_split(self.meta.index.values, + np.ceil(len(self.meta) / pool_size)) + n_complete = 0 + for iter_chunk in iter_chunks: + logger.debug('Starting process pool...') + futures = {} + loggers = [__name__, 'reV'] + with SpawnProcessPool(max_workers=max_workers, + loggers=loggers) as exe: + for i in iter_chunk: + row = self.meta.loc[i, :] + region_dict = {k: v for (k, v) in row.to_dict().items() + if k in self._reg_cols} + + mask = self._get_mask(region_dict) + + if not any(mask): + logger.info('Skipping profile {} out of {} ' + 'for region: {} with no valid mask.' + .format(i + 1, len(self.meta), + region_dict)) + else: + future = exe.submit( + RegionRepProfile.get_region_rep_profile, + self._gen_fpath, self._rev_summary[mask], + cf_dset=self._cf_dset, + rep_method=self._rep_method, + err_method=self._err_method, + weight=self._weight, + n_profiles=self._n_profiles) + + futures[future] = [i, region_dict] + + for future in as_completed(futures): + i, region_dict = futures[future] + profiles, _, ggids, rgids = future.result() + n_complete += 1 + logger.info('Future {} out of {} complete ' + 'for region: {}' + .format(n_complete, len(self.meta), + region_dict)) + log_mem(logger, log_level='DEBUG') + + for n in range(profiles.shape[1]): + self._profiles[n][:, i] = profiles[:, n] + + if ggids is None: + self._meta.at[i, 'rep_gen_gid'] = None + self._meta.at[i, 'rep_res_gid'] = None + elif len(ggids) == 1: + self._meta.at[i, 'rep_gen_gid'] = ggids[0] + self._meta.at[i, 'rep_res_gid'] = rgids[0] + else: + self._meta.at[i, 'rep_gen_gid'] = str(ggids) + self._meta.at[i, 'rep_res_gid'] = str(rgids) + +
[docs] def run(self, fout=None, save_rev_summary=True, scaled_precision=False, + max_workers=None): + """ + Run representative profiles in serial or parallel and save to disc + + Parameters + ---------- + fout : str, optional + Filepath to output HDF5 file. If ``None``, output data are + not written to a file. By default, ``None``. + save_rev_summary : bool, optional + Flag to save full ``reV`` supply curve table to rep profile + output. By default, ``True``. + scaled_precision : bool, optional + Flag to scale `cf_profiles` by 1000 and save as uint16. + By default, ``False``. + max_workers : int, optional + Number of parallel rep profile workers. ``1`` will run + serial, while ``None`` will use all available. + By default, ``None``. + """ + + if max_workers == 1: + self._run_serial() + else: + self._run_parallel(max_workers=max_workers) + + if fout is not None: + if self._aggregate_profiles: + logger.info("Aggregate profiles input set to `True`. Setting " + "'save_rev_summary' input to `False`") + save_rev_summary = False + self.save_profiles(fout, save_rev_summary=save_rev_summary, + scaled_precision=scaled_precision) + + logger.info('Representative profiles complete!') + + return fout
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/supply_curve/aggregation.html b/_modules/reV/supply_curve/aggregation.html new file mode 100644 index 000000000..c64574eb2 --- /dev/null +++ b/_modules/reV/supply_curve/aggregation.html @@ -0,0 +1,1528 @@ + + + + + + reV.supply_curve.aggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.supply_curve.aggregation

+# -*- coding: utf-8 -*-
+"""
+reV aggregation framework.
+"""
+from abc import ABC, abstractmethod
+import h5py
+import logging
+import numpy as np
+import os
+import pandas as pd
+
+from reV.handlers.outputs import Outputs
+from reV.handlers.exclusions import ExclusionLayers
+from reV.supply_curve.exclusions import ExclusionMaskFromDict
+from reV.supply_curve.extent import SupplyCurveExtent
+from reV.supply_curve.points import AggregationSupplyCurvePoint
+from reV.utilities.exceptions import (EmptySupplyCurvePointError,
+                                      FileInputError, SupplyCurveInputError)
+from reV.utilities import log_versions
+
+from rex.resource import Resource
+from rex.utilities.execution import SpawnProcessPool
+from rex.utilities.loggers import log_mem
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class AbstractAggFileHandler(ABC): + """Simple framework to handle aggregation file context managers.""" + + def __init__(self, excl_fpath, excl_dict=None, area_filter_kernel='queen', + min_area=None): + """ + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + by default None + area_filter_kernel : str, optional + Contiguous area filter method to use on final exclusions mask, + by default 'queen' + min_area : float, optional + Minimum required contiguous area filter in sq-km, + by default None + """ + self._excl_fpath = excl_fpath + self._excl = ExclusionMaskFromDict(excl_fpath, layers_dict=excl_dict, + min_area=min_area, + kernel=area_filter_kernel) + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + if type is not None: + raise + +
[docs] @abstractmethod + def close(self): + """Close all file handlers.""" + self._excl.close()
+ + @property + def exclusions(self): + """Get the exclusions file handler object. + + Returns + ------- + _excl : ExclusionMask + Exclusions h5 handler object. + """ + return self._excl + + @property + def h5(self): + """ + Placeholder for h5 Resource handler + """
+ + +
[docs]class AggFileHandler(AbstractAggFileHandler): + """ + Framework to handle aggregation file context manager: + - exclusions .h5 file + - h5 file to be aggregated + """ + + DEFAULT_H5_HANDLER = Resource + + def __init__(self, excl_fpath, h5_fpath, excl_dict=None, + area_filter_kernel='queen', min_area=None, + h5_handler=None): + """ + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + h5_fpath : str + Filepath to .h5 file to be aggregated + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + by default None + area_filter_kernel : str, optional + Contiguous area filter method to use on final exclusions mask, + by default 'queen' + min_area : float, optional + Minimum required contiguous area filter in sq-km, by default None + h5_handler : rex.Resource | None + Optional special handler similar to the rex.Resource handler which + is default. + """ + super().__init__(excl_fpath, excl_dict=excl_dict, + area_filter_kernel=area_filter_kernel, + min_area=min_area) + + if h5_handler is None: + self._h5 = Resource(h5_fpath) + else: + self._h5 = h5_handler(h5_fpath) + + @property + def h5(self): + """ + Get the h5 file handler object. + + Returns + ------- + _h5 : Outputs + reV h5 outputs handler object. + """ + return self._h5 + +
[docs] def close(self): + """Close all file handlers.""" + self._excl.close() + self._h5.close()
+ + +
[docs]class BaseAggregation(ABC): + """Abstract supply curve points aggregation framework based on only an + exclusion file and techmap.""" + + def __init__(self, excl_fpath, tm_dset, excl_dict=None, + area_filter_kernel='queen', min_area=None, + resolution=64, excl_area=None, gids=None, + pre_extract_inclusions=False): + """ + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + tm_dset : str + Dataset name in the techmap file containing the + exclusions-to-resource mapping data. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + by default None + area_filter_kernel : str, optional + Contiguous area filter method to use on final exclusions mask, + by default "queen" + min_area : float, optional + Minimum required contiguous area filter in sq-km, + by default None + resolution : int, optional + SC resolution, must be input in combination with gid. Prefered + option is to use the row/col slices to define the SC point instead, + by default None + excl_area : float, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + gids : list, optional + List of supply curve point gids to get summary for (can use to + subset if running in parallel), or None for all gids in the SC + extent, by default None + pre_extract_inclusions : bool, optional + Optional flag to pre-extract/compute the inclusion mask from the + provided excl_dict, by default False. Typically faster to compute + the inclusion mask on the fly with parallel workers. + """ + self._excl_fpath = excl_fpath + self._tm_dset = tm_dset + self._excl_dict = excl_dict + self._resolution = resolution + self._area_filter_kernel = area_filter_kernel + self._min_area = min_area + self._gids = gids + self._pre_extract_inclusions = pre_extract_inclusions + self._excl_area = self._get_excl_area(excl_fpath, excl_area=excl_area) + self._shape = None + + if pre_extract_inclusions: + self._inclusion_mask = \ + ExclusionMaskFromDict.extract_inclusion_mask( + excl_fpath, tm_dset, + excl_dict=excl_dict, + area_filter_kernel=area_filter_kernel, + min_area=min_area) + else: + self._inclusion_mask = None + + @property + def gids(self): + """ + 1D array of supply curve point gids to aggregate + + Returns + ------- + ndarray + """ + if self._gids is None: + with SupplyCurveExtent(self._excl_fpath, + resolution=self._resolution) as sc: + self._gids = sc.valid_sc_points(self._tm_dset) + elif np.issubdtype(type(self._gids), np.number): + self._gids = np.array([self._gids]) + elif not isinstance(self._gids, np.ndarray): + self._gids = np.array(self._gids) + + return self._gids + + @property + def shape(self): + """Get the shape of the full exclusions raster. + + Returns + ------- + tuple + """ + if self._shape is None: + with SupplyCurveExtent(self._excl_fpath, + resolution=self._resolution) as sc: + self._shape = sc.exclusions.shape + + return self._shape + + @staticmethod + def _get_excl_area(excl_fpath, excl_area=None): + """ + Get exclusion area from excl_fpath pixel area. Confirm that the + exclusion area is not None. + + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + excl_area : float, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + + Returns + ------- + excl_area : float + Area of an exclusion pixel in km2 + """ + if excl_area is None: + logger.debug('Setting the exclusion area from the area of a pixel ' + 'in {}'.format(excl_fpath)) + with ExclusionLayers(excl_fpath) as excl: + excl_area = excl.pixel_area + + if excl_area is None: + e = ('No exclusion pixel area was input and could not parse ' + 'area from the exclusion file attributes!') + logger.error(e) + raise SupplyCurveInputError(e) + + return excl_area + + @staticmethod + def _check_inclusion_mask(inclusion_mask, gids, excl_shape): + """ + Check inclusion mask to ensure it has the proper shape + + Parameters + ---------- + inclusion_mask : np.ndarray | dict | optional + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. This must be either match the full exclusion shape or + be a dict lookup of single-sc-point exclusion masks corresponding + to the gids input and keyed by gids, by default None which will + calculate exclusions on the fly for each sc point. + gids : list | ndarray + sc point gids corresponding to inclusion mask + excl_shape : tuple + Full exclusion layers shape + """ + if isinstance(inclusion_mask, dict): + assert len(inclusion_mask) == len(gids) + elif isinstance(inclusion_mask, np.ndarray): + assert inclusion_mask.shape == excl_shape + elif inclusion_mask is not None: + msg = ('Expected inclusion_mask to be dict or array but received ' + '{}'.format(type(inclusion_mask))) + logger.error(msg) + raise SupplyCurveInputError(msg) + + @staticmethod + def _get_gid_inclusion_mask(inclusion_mask, gid, slice_lookup, + resolution=64): + """ + Get inclusion mask for desired gid + + Parameters + ---------- + inclusion_mask : np.ndarray | dict | optional + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. This must be either match the full exclusion shape or + be a dict lookup of single-sc-point exclusion masks corresponding + to the gids input and keyed by gids, by default None which will + calculate exclusions on the fly for each sc point. + gid : int + sc_point_gid value, used to extract inclusion mask from 2D + inclusion array + slice_lookup : dict + Mapping of sc_point_gids to exclusion/inclusion row and column + slices + resolution : int, optional + supply curve extent resolution, by default 64 + + Returns + ------- + gid_inclusions : ndarray | None + 2D array of inclusions for desired gid, normalized from 0, excluded + to 1 fully included, if inclusion mask is None gid_inclusions + is None + """ + gid_inclusions = None + if isinstance(inclusion_mask, dict): + gid_inclusions = inclusion_mask[gid] + assert gid_inclusions.shape[0] <= resolution + assert gid_inclusions.shape[1] <= resolution + elif isinstance(inclusion_mask, np.ndarray): + row_slice, col_slice = slice_lookup[gid] + gid_inclusions = inclusion_mask[row_slice, col_slice] + elif inclusion_mask is not None: + msg = ('Expected inclusion_mask to be dict or array but received ' + '{}'.format(type(inclusion_mask))) + logger.error(msg) + raise SupplyCurveInputError(msg) + + return gid_inclusions + + @staticmethod + def _parse_gen_index(gen_fpath): + """Parse gen outputs for an array of generation gids corresponding to + the resource gids. + + Parameters + ---------- + gen_fpath : str + Filepath to reV generation output .h5 file. This can also be a csv + filepath to a project points input file. + + Returns + ------- + gen_index : np.ndarray + Array of generation gids with array index equal to resource gid. + Array value is -1 if the resource index was not used in the + generation run. + """ + + if gen_fpath.endswith('.h5'): + with Resource(gen_fpath) as f: + gen_index = f.meta + elif gen_fpath.endswith('.csv'): + gen_index = pd.read_csv(gen_fpath) + else: + msg = ('Could not recognize gen_fpath input, needs to be reV gen ' + 'output h5 or project points csv but received: {}' + .format(gen_fpath)) + logger.error(msg) + raise FileInputError(msg) + + if 'gid' in gen_index: + gen_index = gen_index.rename(columns={'gid': 'res_gids'}) + gen_index['gen_gids'] = gen_index.index + gen_index = gen_index[['res_gids', 'gen_gids']] + gen_index = gen_index.set_index(keys='res_gids') + gen_index = \ + gen_index.reindex(range(int(gen_index.index.max() + 1))) + gen_index = gen_index['gen_gids'].values + gen_index[np.isnan(gen_index)] = -1 + gen_index = gen_index.astype(np.int32) + else: + gen_index = None + + return gen_index
+ + +
[docs]class Aggregation(BaseAggregation): + """Concrete but generalized aggregation framework to aggregate ANY reV h5 + file to a supply curve grid (based on an aggregated exclusion grid).""" + + def __init__(self, excl_fpath, tm_dset, *agg_dset, + excl_dict=None, area_filter_kernel='queen', min_area=None, + resolution=64, excl_area=None, gids=None, + pre_extract_inclusions=False): + """ + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + tm_dset : str + Dataset name in the techmap file containing the + exclusions-to-resource mapping data. + agg_dset : str + Dataset to aggreate, can supply multiple datasets. The datasets + should be scalar values for each site. This method cannot aggregate + timeseries data. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + by default None + area_filter_kernel : str, optional + Contiguous area filter method to use on final exclusions mask, + by default "queen" + min_area : float, optional + Minimum required contiguous area filter in sq-km, + by default None + resolution : int, optional + SC resolution, must be input in combination with gid. Prefered + option is to use the row/col slices to define the SC point instead, + by default None + excl_area : float, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, + by default None + gids : list, optional + List of supply curve point gids to get summary for (can use to + subset if running in parallel), or None for all gids in the SC + extent, by default None + pre_extract_inclusions : bool, optional + Optional flag to pre-extract/compute the inclusion mask from the + provided excl_dict, by default False. Typically faster to compute + the inclusion mask on the fly with parallel workers. + """ + log_versions(logger) + logger.info('Initializing Aggregation...') + logger.debug('Exclusion filepath: {}'.format(excl_fpath)) + logger.debug('Exclusion dict: {}'.format(excl_dict)) + + super().__init__(excl_fpath, tm_dset, excl_dict=excl_dict, + area_filter_kernel=area_filter_kernel, + min_area=min_area, resolution=resolution, + excl_area=excl_area, gids=gids, + pre_extract_inclusions=pre_extract_inclusions) + + if isinstance(agg_dset, str): + agg_dset = (agg_dset, ) + + self._agg_dsets = agg_dset + + def _check_files(self, h5_fpath): + """Do a preflight check on input files""" + + if not os.path.exists(self._excl_fpath): + raise FileNotFoundError('Could not find required exclusions file: ' + '{}'.format(self._excl_fpath)) + + if not os.path.exists(h5_fpath): + raise FileNotFoundError('Could not find required h5 file: ' + '{}'.format(h5_fpath)) + + with h5py.File(self._excl_fpath, 'r') as f: + if self._tm_dset not in f: + raise FileInputError('Could not find techmap dataset "{}" ' + 'in exclusions file: {}' + .format(self._tm_dset, + self._excl_fpath)) + + with Resource(h5_fpath) as f: + for dset in self._agg_dsets: + if dset not in f: + raise FileInputError('Could not find provided dataset "{}"' + ' in h5 file: {}' + .format(dset, h5_fpath)) + +
[docs] @classmethod + def run_serial(cls, excl_fpath, h5_fpath, tm_dset, *agg_dset, + agg_method='mean', excl_dict=None, inclusion_mask=None, + area_filter_kernel='queen', min_area=None, + resolution=64, excl_area=0.0081, gids=None, + gen_index=None): + """ + Standalone method to aggregate - can be parallelized. + + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + h5_fpath : str + Filepath to .h5 file to aggregate + tm_dset : str + Dataset name in the techmap file containing the + exclusions-to-resource mapping data. + agg_dset : str + Dataset to aggreate, can supply multiple datasets. The datasets + should be scalar values for each site. This method cannot aggregate + timeseries data. + agg_method : str, optional + Aggregation method, either mean or sum/aggregate, by default "mean" + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + by default None + inclusion_mask : np.ndarray, optional + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. This must be either match the full exclusion shape or + be a list of single-sc-point exclusion masks corresponding to the + gids input, by default None + area_filter_kernel : str, optional + Contiguous area filter method to use on final exclusions mask, + by default "queen" + min_area : float, optional + Minimum required contiguous area filter in sq-km, + by default None + resolution : int, optional + SC resolution, must be input in combination with gid. Prefered + option is to use the row/col slices to define the SC point instead, + by default 0.0081 + excl_area : float, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, + by default None + gids : list, optional + List of supply curve point gids to get summary for (can use to + subset if running in parallel), or None for all gids in the SC + extent, by default None + gen_index : np.ndarray, optional + Array of generation gids with array index equal to resource gid. + Array value is -1 if the resource index was not used in the + generation run, by default None + + Returns + ------- + agg_out : dict + Aggregated values for each aggregation dataset + """ + with SupplyCurveExtent(excl_fpath, resolution=resolution) as sc: + exclusion_shape = sc.exclusions.shape + if gids is None: + gids = sc.valid_sc_points(tm_dset) + elif np.issubdtype(type(gids), np.number): + gids = [gids] + + slice_lookup = sc.get_slice_lookup(gids) + + cls._check_inclusion_mask(inclusion_mask, gids, exclusion_shape) + + # pre-extract handlers so they are not repeatedly initialized + file_kwargs = {'excl_dict': excl_dict, + 'area_filter_kernel': area_filter_kernel, + 'min_area': min_area} + dsets = agg_dset + ('meta', ) + agg_out = {ds: [] for ds in dsets} + with AggFileHandler(excl_fpath, h5_fpath, **file_kwargs) as fh: + n_finished = 0 + for gid in gids: + gid_inclusions = cls._get_gid_inclusion_mask( + inclusion_mask, gid, slice_lookup, + resolution=resolution) + try: + gid_out = AggregationSupplyCurvePoint.run( + gid, + fh.exclusions, + fh.h5, + tm_dset, + *agg_dset, + agg_method=agg_method, + excl_dict=excl_dict, + inclusion_mask=gid_inclusions, + resolution=resolution, + excl_area=excl_area, + exclusion_shape=exclusion_shape, + close=False, + gen_index=gen_index) + + except EmptySupplyCurvePointError: + logger.debug('SC gid {} is fully excluded or does not ' + 'have any valid source data!'.format(gid)) + except Exception as e: + msg = 'SC gid {} failed!'.format(gid) + logger.exception(msg) + raise RuntimeError(msg) from e + else: + n_finished += 1 + logger.debug('Serial aggregation: ' + '{} out of {} points complete' + .format(n_finished, len(gids))) + log_mem(logger) + for k, v in gid_out.items(): + agg_out[k].append(v) + + return agg_out
+ +
[docs] def run_parallel(self, h5_fpath, agg_method='mean', excl_area=None, + max_workers=None, sites_per_worker=100): + """ + Aggregate in parallel + + Parameters + ---------- + h5_fpath : str + Filepath to .h5 file to aggregate + agg_method : str, optional + Aggregation method, either mean or sum/aggregate, by default "mean" + excl_area : float, optional + Area of an exclusion cell (square km), by default None + max_workers : int, optional + Number of cores to run summary on. None is all available cpus, + by default None + sites_per_worker : int, optional + Number of SC points to process on a single parallel worker, + by default 100 + + Returns + ------- + agg_out : dict + Aggregated values for each aggregation dataset + """ + + self._check_files(h5_fpath) + gen_index = self._parse_gen_index(h5_fpath) + + slice_lookup = None + chunks = int(np.ceil(len(self.gids) / sites_per_worker)) + chunks = np.array_split(self.gids, chunks) + + if self._inclusion_mask is not None: + with SupplyCurveExtent(self._excl_fpath, + resolution=self._resolution) as sc: + assert sc.exclusions.shape == self._inclusion_mask.shape + slice_lookup = sc.get_slice_lookup(self.gids) + + logger.info('Running supply curve point aggregation for ' + 'points {} through {} at a resolution of {} ' + 'on {} cores in {} chunks.' + .format(self.gids[0], self.gids[-1], self._resolution, + max_workers, len(chunks))) + + n_finished = 0 + futures = [] + dsets = self._agg_dsets + ('meta', ) + agg_out = {ds: [] for ds in dsets} + loggers = [__name__, 'reV.supply_curve.points', 'reV'] + with SpawnProcessPool(max_workers=max_workers, loggers=loggers) as exe: + # iterate through split executions, submitting each to worker + for gid_set in chunks: + # submit executions and append to futures list + chunk_incl_masks = None + if self._inclusion_mask is not None: + chunk_incl_masks = {} + for gid in gid_set: + rs, cs = slice_lookup[gid] + chunk_incl_masks[gid] = self._inclusion_mask[rs, cs] + + # submit executions and append to futures list + futures.append(exe.submit( + self.run_serial, + self._excl_fpath, + h5_fpath, + self._tm_dset, + *self._agg_dsets, + agg_method=agg_method, + excl_dict=self._excl_dict, + inclusion_mask=chunk_incl_masks, + area_filter_kernel=self._area_filter_kernel, + min_area=self._min_area, + resolution=self._resolution, + excl_area=excl_area, + gids=gid_set, + gen_index=gen_index)) + + # gather results + for future in futures: + n_finished += 1 + logger.info('Parallel aggregation futures collected: ' + '{} out of {}' + .format(n_finished, len(chunks))) + for k, v in future.result().items(): + if v: + agg_out[k].extend(v) + + return agg_out
+ +
[docs] def aggregate(self, h5_fpath, agg_method='mean', max_workers=None, + sites_per_worker=100): + """ + Aggregate with given agg_method + + Parameters + ---------- + h5_fpath : str + Filepath to .h5 file to aggregate + agg_method : str, optional + Aggregation method, either mean or sum/aggregate, by default "mean" + max_workers : int, optional + Number of cores to run summary on. None is all available cpus, + by default None + sites_per_worker : int, optional + Number of SC points to process on a single parallel worker, + by default 100 + + Returns + ------- + agg : dict + Aggregated values for each aggregation dataset + """ + if max_workers is None: + max_workers = os.cpu_count() + + if max_workers == 1: + self._check_files(h5_fpath) + gen_index = self._parse_gen_index(h5_fpath) + agg = self.run_serial(self._excl_fpath, + h5_fpath, + self._tm_dset, + *self._agg_dsets, + agg_method=agg_method, + excl_dict=self._excl_dict, + gids=self.gids, + inclusion_mask=self._inclusion_mask, + area_filter_kernel=self._area_filter_kernel, + min_area=self._min_area, + resolution=self._resolution, + excl_area=self._excl_area, + gen_index=gen_index) + else: + agg = self.run_parallel(h5_fpath=h5_fpath, + agg_method=agg_method, + excl_area=self._excl_area, + max_workers=max_workers, + sites_per_worker=sites_per_worker) + + if not agg['meta']: + e = ('Supply curve aggregation found no non-excluded SC points. ' + 'Please check your exclusions or subset SC GID selection.') + logger.error(e) + raise EmptySupplyCurvePointError(e) + + for k, v in agg.items(): + if k == 'meta': + v = pd.concat(v, axis=1).T + v = v.sort_values('sc_point_gid') + v = v.reset_index(drop=True) + v.index.name = 'sc_gid' + agg[k] = v + else: + v = np.dstack(v)[0] + if v.shape[0] == 1: + v = v.flatten() + + agg[k] = v + + return agg
+ +
[docs] @staticmethod + def save_agg_to_h5(h5_fpath, out_fpath, aggregation): + """ + Save aggregated data to disc in .h5 format + + Parameters + ---------- + out_fpath : str + Output .h5 file path + aggregation : dict + Aggregated values for each aggregation dataset + """ + agg_out = aggregation.copy() + meta = agg_out.pop('meta').reset_index() + for c in meta.columns: + try: + meta[c] = pd.to_numeric(meta[c]) + except (ValueError, TypeError): + pass + + dsets = [] + shapes = {} + attrs = {} + chunks = {} + dtypes = {} + time_index = None + with Resource(h5_fpath) as f: + for dset, data in agg_out.items(): + dsets.append(dset) + shape = data.shape + shapes[dset] = shape + if len(data.shape) == 2: + if ('time_index' in f) and (shape[0] == f.shape[0]): + if time_index is None: + time_index = f.time_index + + attrs[dset] = f.get_attrs(dset=dset) + _, dtype, chunk = f.get_dset_properties(dset) + chunks[dset] = chunk + dtypes[dset] = dtype + + Outputs.init_h5(out_fpath, dsets, shapes, attrs, chunks, dtypes, + meta, time_index=time_index) + + with Outputs(out_fpath, mode='a') as out: + for dset, data in agg_out.items(): + out[dset] = data
+ +
[docs] @classmethod + def run(cls, excl_fpath, h5_fpath, tm_dset, *agg_dset, + excl_dict=None, area_filter_kernel='queen', min_area=None, + resolution=64, excl_area=None, gids=None, + pre_extract_inclusions=False, agg_method='mean', max_workers=None, + sites_per_worker=100, out_fpath=None): + """Get the supply curve points aggregation summary. + + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + h5_fpath : str + Filepath to .h5 file to aggregate + tm_dset : str + Dataset name in the techmap file containing the + exclusions-to-resource mapping data. + agg_dset : str + Dataset to aggreate, can supply multiple datasets. The datasets + should be scalar values for each site. This method cannot aggregate + timeseries data. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + by default None + area_filter_kernel : str, optional + Contiguous area filter method to use on final exclusions mask, + by default "queen" + min_area : float, optional + Minimum required contiguous area filter in sq-km, + by default None + resolution : int, optional + SC resolution, must be input in combination with gid. Prefered + option is to use the row/col slices to define the SC point instead, + by default None + excl_area : float, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, + by default None + gids : list, optional + List of supply curve point gids to get summary for (can use to + subset if running in parallel), or None for all gids in the SC + extent, by default None + pre_extract_inclusions : bool, optional + Optional flag to pre-extract/compute the inclusion mask from the + provided excl_dict, by default False. Typically faster to compute + the inclusion mask on the fly with parallel workers. + agg_method : str, optional + Aggregation method, either mean or sum/aggregate, by default "mean" + max_workers : int, optional + Number of cores to run summary on. None is all available cpus, + by default None + sites_per_worker : int, optional + Number of SC points to process on a single parallel worker, + by default 100 + out_fpath : str, optional + Output .h5 file path, by default None + + Returns + ------- + agg : dict + Aggregated values for each aggregation dataset + """ + + agg = cls(excl_fpath, tm_dset, *agg_dset, + excl_dict=excl_dict, area_filter_kernel=area_filter_kernel, + min_area=min_area, resolution=resolution, + excl_area=excl_area, gids=gids, + pre_extract_inclusions=pre_extract_inclusions) + + aggregation = agg.aggregate(h5_fpath=h5_fpath, agg_method=agg_method, + max_workers=max_workers, + sites_per_worker=sites_per_worker) + + if out_fpath is not None: + agg.save_agg_to_h5(h5_fpath, out_fpath, aggregation) + + return aggregation
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/supply_curve/competitive_wind_farms.html b/_modules/reV/supply_curve/competitive_wind_farms.html new file mode 100644 index 000000000..fa1d3f889 --- /dev/null +++ b/_modules/reV/supply_curve/competitive_wind_farms.html @@ -0,0 +1,1099 @@ + + + + + + reV.supply_curve.competitive_wind_farms — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for reV.supply_curve.competitive_wind_farms

+# -*- coding: utf-8 -*-
+"""
+Competitive Wind Farms exclusion handler
+"""
+import logging
+import numpy as np
+
+from rex.utilities.utilities import parse_table
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class CompetitiveWindFarms: + """ + Handle competitive wind farm exclusion during supply curve sorting + """ + + def __init__(self, wind_dirs, sc_points, n_dirs=2, offshore=False): + """ + Parameters + ---------- + wind_dirs : pandas.DataFrame | str + path to .csv or reVX.wind_dirs.wind_dirs.WindDirs output with + the neighboring supply curve point gids and power-rose value at + each cardinal direction + sc_points : pandas.DataFrame | str + Supply curve point summary table + n_dirs : int, optional + Number of prominent directions to use, by default 2 + offshore : bool + Flag as to whether offshore farms should be included during + CompetitiveWindFarms + """ + self._wind_dirs = self._parse_wind_dirs(wind_dirs) + + self._sc_gids, self._sc_point_gids, self._mask = \ + self._parse_sc_points(sc_points, offshore=offshore) + + self._offshore = offshore + + valid = np.isin(self.sc_point_gids, self._wind_dirs.index) + if not np.all(valid): + msg = ("'sc_points contains sc_point_gid values that do not " + "correspond to valid 'wind_dirs' sc_point_gids:\n{}" + .format(self.sc_point_gids[~valid])) + logger.error(msg) + raise RuntimeError(msg) + + mask = self._wind_dirs.index.isin(self._sc_point_gids.keys()) + self._wind_dirs = self._wind_dirs.loc[mask] + self._upwind, self._downwind = self._get_neighbors(self._wind_dirs, + n_dirs=n_dirs) + + def __repr__(self): + gids = len(self._upwind) + # pylint: disable=unsubscriptable-object + neighbors = len(self._upwind.values[0]) + msg = ("{} with {} sc_point_gids and {} prominent directions" + .format(self.__class__.__name__, gids, neighbors)) + + return msg + + def __getitem__(self, keys): + """ + Map gid for given mapping + + Parameters + ---------- + keys : tuple + (gid(s) to extract, gid) pair + + Returns + ------- + gid(s) : int | list + Mapped gid(s) for given mapping + """ + if not isinstance(keys, tuple): + msg = ("{} must be a tuple of form (source, gid) where source is: " + "'sc_gid', 'sc_point_gid', or 'upwind', 'downwind'" + .format(keys)) + logger.error(msg) + raise ValueError(msg) + + source, gid = keys + if source == 'sc_point_gid': + out = self.map_sc_gid_to_sc_point_gid(gid) + elif source == 'sc_gid': + out = self.map_sc_point_gid_to_sc_gid(gid) + elif source == 'upwind': + out = self.map_upwind(gid) + elif source == 'downwind': + out = self.map_downwind(gid) + else: + msg = ("{} must be: 'sc_gid', 'sc_point_gid', or 'upwind', " + "'downwind'".format(source)) + logger.error(msg) + raise ValueError(msg) + + return out + + @property + def mask(self): + """ + Supply curve point boolean mask, used for efficient exclusion + False == excluded sc_point_gid + + Returns + ------- + ndarray + """ + return self._mask + + @property + def sc_point_gids(self): + """ + Un-masked sc_point_gids + + Returns + ------- + ndarray + """ + sc_point_gids = np.array(list(self._sc_point_gids.keys()), dtype=int) + mask = self.mask[sc_point_gids] + + return sc_point_gids[mask] + + @property + def sc_gids(self): + """ + Un-masked sc_gids + + Returns + ------- + ndarray + """ + sc_gids = \ + np.concatenate([self._sc_point_gids[gid] + for gid in self.sc_point_gids]) + + return sc_gids + + @staticmethod + def _parse_table(table): + """ + Extract features and their capacity from supply curve transmission + mapping table + + Parameters + ---------- + table : str | pd.DataFrame + Path to .csv or .json or DataFrame to parse + + Returns + ------- + table : pandas.DataFrame + DataFrame extracted from file path + """ + try: + table = parse_table(table) + except ValueError as ex: + logger.error(ex) + raise + + return table + + @classmethod + def _parse_wind_dirs(cls, wind_dirs): + """ + Parse prominent direction neighbors + + Parameters + ---------- + wind_dirs : pandas.DataFrame | str + Neighboring supply curve point gids and power-rose value at each + cardinal direction + + Returns + ------- + wind_dirs : pandas.DataFrame + Neighboring supply curve point gids and power-rose value at each + cardinal direction for each sc point gid + """ + wind_dirs = cls._parse_table(wind_dirs) + + wind_dirs = wind_dirs.set_index('sc_point_gid') + columns = [c for c in wind_dirs if c.endswith(('_gid', '_pr'))] + wind_dirs = wind_dirs[columns] + + return wind_dirs + + @classmethod + def _parse_sc_points(cls, sc_points, offshore=False): + """ + Parse supply curve point summary table into sc_gid to sc_point_gid + mapping and vis-versa. + + Parameters + ---------- + sc_points : pandas.DataFrame | str + Supply curve point summary table + offshore : bool + Flag as to whether offshore farms should be included during + CompetitiveWindFarms + + Returns + ------- + sc_gids : pandas.DataFrame + sc_gid to sc_point_gid mapping + sc_point_gids : pandas.DataFrame + sc_point_gid to sc_gid mapping + mask : ndarray + Mask array to mask excluded sc_point_gids + """ + sc_points = cls._parse_table(sc_points) + if 'offshore' in sc_points and not offshore: + logger.debug('Not including offshore supply curve points in ' + 'CompetitiveWindFarm') + mask = sc_points['offshore'] == 0 + sc_points = sc_points.loc[mask] + + mask = np.ones(int(1 + sc_points['sc_point_gid'].max()), dtype=bool) + + sc_points = sc_points[['sc_gid', 'sc_point_gid']] + sc_gids = sc_points.set_index('sc_gid') + sc_gids = {k: int(v[0]) for k, v in sc_gids.iterrows()} + + sc_point_gids = \ + sc_points.groupby('sc_point_gid')['sc_gid'].unique().to_frame() + sc_point_gids = {int(k): v['sc_gid'] + for k, v in sc_point_gids.iterrows()} + + return sc_gids, sc_point_gids, mask + + @staticmethod + def _get_neighbors(wind_dirs, n_dirs=2): + """ + Parse prominent direction neighbors + + Parameters + ---------- + wind_dirs : pandas.DataFrame | str + Neighboring supply curve point gids and power-rose value at each + cardinal direction for each available sc point gid + n_dirs : int, optional + Number of prominent directions to use, by default 2 + + Returns + ------- + upwind : pandas.DataFrame + Upwind neighbor gids for n prominent wind directions + downwind : pandas.DataFrame + Downwind neighbor gids for n prominent wind directions + """ + cols = [c for c in wind_dirs + if (c.endswith('_gid') and not c.startswith('sc'))] + directions = [c.split('_')[0] for c in cols] + upwind_gids = wind_dirs[cols].values + + cols = ['{}_pr'.format(d) for d in directions] + neighbor_pr = wind_dirs[cols].values + + neighbors = np.argsort(neighbor_pr)[:, :n_dirs] + upwind_gids = np.take_along_axis(upwind_gids, neighbors, axis=1) + + downwind_map = {'N': 'S', 'NE': 'SW', 'E': 'W', 'SE': 'NW', 'S': 'N', + 'SW': 'NE', 'W': 'E', 'NW': 'SE'} + cols = ["{}_gid".format(downwind_map[d]) for d in directions] + downwind_gids = wind_dirs[cols].values + downwind_gids = np.take_along_axis(downwind_gids, neighbors, axis=1) + + downwind = {} + upwind = {} + for i, gid in enumerate(wind_dirs.index.values): + downwind[gid] = downwind_gids[i] + upwind[gid] = upwind_gids[i] + + return upwind, downwind + +
[docs] def map_sc_point_gid_to_sc_gid(self, sc_point_gid): + """ + Map given sc_point_gid to equivalent sc_gid(s) + + Parameters + ---------- + sc_point_gid : int + Supply curve point gid to map to equivalent supply curve gid(s) + + Returns + ------- + int | list + Equivalent supply curve gid(s) + """ + return self._sc_point_gids[sc_point_gid]
+ +
[docs] def map_sc_gid_to_sc_point_gid(self, sc_gid): + """ + Map given sc_gid to equivalent sc_point_gid + + Parameters + ---------- + sc_gid : int + Supply curve gid to map to equivalent supply point curve gid + + Returns + ------- + int + Equivalent supply point curve gid + """ + return self._sc_gids[sc_gid]
+ +
[docs] def check_sc_gid(self, sc_gid): + """ + Check to see if sc_gid is valid, if so return associated + sc_point_gids + + Parameters + ---------- + sc_gid : int + Supply curve gid to map to equivalent supply point curve gid + + Returns + ------- + int | None + Equivalent supply point curve gid or None if sc_gid is invalid + (offshore) + """ + sc_point_gid = None + if sc_gid in self._sc_gids: + sc_point_gid = self._sc_gids[sc_gid] + + return sc_point_gid
+ +
[docs] def map_upwind(self, sc_point_gid): + """ + Map given sc_point_gid to upwind neighbors + + Parameters + ---------- + sc_point_gid : int + Supply point curve gid to get upwind neighbors + Returns + ------- + int | list + upwind neighborings + """ + return self._upwind[sc_point_gid]
+ +
[docs] def map_downwind(self, sc_point_gid): + """ + Map given sc_point_gid to downwind neighbors + + Parameters + ---------- + sc_point_gid : int + Supply point curve gid to get downwind neighbors + Returns + ------- + int | list + downwind neighborings + """ + return self._downwind[sc_point_gid]
+ +
[docs] def exclude_sc_point_gid(self, sc_point_gid): + """ + Exclude supply curve point gid, return False if gid is not present + in list of available gids to avoid key errors elsewhere + + Parameters + ---------- + sc_point_gid : int + supply curve point gid to mask + + Returns + ------- + bool + Flag if gid is valid and was masked + """ + if sc_point_gid in self._sc_point_gids: + self._mask[sc_point_gid] = False + out = True + else: + out = False + + return out
+ +
[docs] def remove_noncompetitive_farm(self, sc_points, sort_on='total_lcoe', + downwind=False): + """ + Remove neighboring sc points for given number of prominent wind + directions + + Parameters + ---------- + sc_points : pandas.DataFrame | str + Supply curve point summary table + sort_on : str, optional + column to sort on before excluding neighbors, + by default 'total_lcoe' + downwind : bool, optional + Flag to remove downwind neighbors as well as upwind neighbors, + by default False + + Returns + ------- + sc_points : pandas.DataFrame + Updated supply curve points after removing non-competative + wind farms + """ + sc_points = self._parse_table(sc_points) + if 'offshore' in sc_points and not self._offshore: + mask = sc_points['offshore'] == 0 + sc_points = sc_points.loc[mask] + + sc_points = sc_points.sort_values(sort_on) + + sc_point_gids = sc_points['sc_point_gid'].values.astype(int) + + for i in range(len(sc_points)): + gid = sc_point_gids[i] + if self.mask[gid]: + upwind_gids = self['upwind', gid] + for n in upwind_gids: + self.exclude_sc_point_gid(n) + + if downwind: + downwind_gids = self['downwind', gid] + for n in downwind_gids: + self.exclude_sc_point_gid(n) + + sc_gids = self.sc_gids + mask = sc_points['sc_gid'].isin(sc_gids) + + return sc_points.loc[mask].reset_index(drop=True)
+ +
[docs] @classmethod + def run(cls, wind_dirs, sc_points, n_dirs=2, offshore=False, + sort_on='total_lcoe', downwind=False, out_fpath=None): + """ + Exclude given number of neighboring Supply Point gids based on most + prominent wind directions + + Parameters + ---------- + wind_dirs : pandas.DataFrame | str + path to .csv or reVX.wind_dirs.wind_dirs.WindDirs output with + the neighboring supply curve point gids and power-rose value at + each cardinal direction + sc_points : pandas.DataFrame | str + Supply curve point summary table + n_dirs : int, optional + Number of prominent directions to use, by default 2 + offshore : bool + Flag as to whether offshore farms should be included during + CompetitiveWindFarms + sort_on : str, optional + column to sort on before excluding neighbors, + by default 'total_lcoe' + downwind : bool, optional + Flag to remove downwind neighbors as well as upwind neighbors, + by default False + out_fpath : str, optional + Path to .csv file to save updated sc_points to, + by default None + + Returns + ------- + sc_points : pandas.DataFrame + Updated supply curve points after removing non-competative + wind farms + """ + cwf = cls(wind_dirs, sc_points, n_dirs=n_dirs, offshore=offshore) + sc_points = cwf.remove_noncompetitive_farm(sc_points, sort_on=sort_on, + downwind=downwind) + + if out_fpath is not None: + sc_points.to_csv(out_fpath, index=False) + + return sc_points
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/supply_curve/exclusions.html b/_modules/reV/supply_curve/exclusions.html new file mode 100644 index 000000000..1f70796fc --- /dev/null +++ b/_modules/reV/supply_curve/exclusions.html @@ -0,0 +1,1865 @@ + + + + + + reV.supply_curve.exclusions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.supply_curve.exclusions

+# -*- coding: utf-8 -*-
+"""
+Generate reV inclusion mask from exclusion layers
+"""
+import logging
+import numpy as np
+from scipy import ndimage
+from warnings import warn
+
+from rex.utilities.loggers import log_mem
+from reV.handlers.exclusions import ExclusionLayers
+from reV.utilities.exceptions import ExclusionLayerError
+from reV.utilities.exceptions import SupplyCurveInputError
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class LayerMask: + """ + Class to convert exclusion layer to inclusion layer mask + """ + + def __init__(self, layer, + exclude_values=None, + exclude_range=(None, None), + include_values=None, + include_range=(None, None), + include_weights=None, + force_include_values=None, + force_include_range=None, + use_as_weights=False, + weight=1.0, + exclude_nodata=False, + nodata_value=None, + **kwargs): + """ + Parameters + ---------- + layer : str + Layer name. + exclude_values : int | float | list, optional + Single value or list of values to exclude. + + .. Important:: The keyword arguments `exclude_values`, + `exclude_range`, `include_values`, `include_range`, + `include_weights`, `force_include_values`, and + `force_include_range` are all mutually exclusive. Users + should supply value(s) for exactly one of these arguments. + + By default, ``None``. + exclude_range : list | tuple, optional + Two-item list of (min threshold, max threshold) for values + to exclude. Mutually exclusive with other inputs - see info + in the description of `exclude_values`. + By default, ``None``. + include_values : int | float | list, optional + Single value or list of values to include. Mutually + exclusive with other inputs - see info in the description of + `exclude_values`. By default, ``None``. + include_range : list | tuple, optional + Two-item list of (min threshold, max threshold) for values + to include. Mutually exclusive with other inputs - see info + in the description of `exclude_values`. + By default, ``None``. + include_weights : dict, optional + A dictionary of ``{value: weight}`` pairs, where the + ``value`` in the layer that should be included with the + given ``weight``. Mutually exclusive with other inputs - see + info in the description of `exclude_values`. + By default, ``None``. + force_include_values : int | float | list, optional + Force the inclusion of the given value(s). Mutually + exclusive with other inputs - see info in the description of + `exclude_values`. By default, ``None``. + force_include_range : list | tuple, optional + Force the inclusion of given values in the range + (min threshold, max threshold). Mutually exclusive with + other inputs - see info in the description of + `exclude_values`. By default, ``None``. + use_as_weights : bool, optional + Option to use layer as final inclusion weights. If ``True``, + all inclusion/exclusions specifications for the layer are + ignored and the raw values (scaled by the `weight` input) + are used as weights. By default, ``False``. + weight : float, optional + Weight applied to exclusion layer after it is calculated. + Can be used, for example, to turn a binary exclusion layer + (i.e. data with 0 or 1 values and ``exclude_values=1`` + input) into partial exclusions by setting the weight to + a fraction (e.g. 0.5 for 50% exclusions). By default, ``1``. + exclude_nodata : bool, optional + Flag to exclude nodata values (`nodata_value`). If + ``nodata_value=None`` the `nodata_value` is inferred by + :class:`reV.supply_curve.exclusions.ExclusionMask`. + By default, ``False``. + nodata_value : int | float, optional + Nodata value for the layer. If ``None``, the value will be + inferred when LayerMask is added to + :class:`reV.supply_curve.exclusions.ExclusionMask`. + By default, ``None``. + **kwargs + Optional inputs to maintain legacy kwargs of ``inclusion_*`` + instead of ``include_*``. + """ + + self._name = layer + self._exclude_values = exclude_values + self._exclude_range = exclude_range + self._include_values = include_values + self._include_range = include_range + self._include_weights = include_weights + self._force_include = False + + self._parse_legacy_kwargs(kwargs) + + if force_include_values is not None: + self._include_values = force_include_values + self._force_include = True + if force_include_range is not None: + self._include_range = force_include_range + self._force_include = True + + self._as_weights = use_as_weights + self._exclude_nodata = exclude_nodata + self.nodata_value = nodata_value + + if weight > 1 or weight < 0: + msg = ('Invalide weight ({}) provided for layer {}:' + '\nWeight must fall between 0 and 1!'.format(weight, layer)) + logger.error(msg) + raise ValueError(msg) + + self._weight = weight + self._mask_type = self._check_mask_type() + + def __repr__(self): + msg = ('{} for "{}" exclusion, of type "{}"' + .format(self.__class__.__name__, self.name, self.mask_type)) + + return msg + + def __getitem__(self, data): + """Get the multiplicative inclusion mask. + + Returns + ------- + mask : ndarray + Masked exclusion data with weights applied such that 1 is included, + 0 is excluded, 0.5 is half included. + """ + return self._apply_mask(data) + + def _parse_legacy_kwargs(self, kwargs): + """Parse legacy kwargs that start with inclusion_* instead of include_* + + Parameters + ---------- + kwargs : dict + Optional inputs to maintain legacy kwargs of inclusion_* instead of + include_* + """ + + for k, v in kwargs.items(): + msg = None + if k == 'inclusion_range': + self._include_range = v + msg = 'Please use "include_range" instead of "inclusion_range"' + + elif k == 'inclusion_weights': + self._include_weights = v + msg = ('Please use "include_weights" instead of ' + '"inclusion_weights"') + + elif k == 'inclusion_values': + self._include_values = v + msg = ('Please use "include_values" instead of ' + '"inclusion_values"') + + if msg is not None: + warn(msg) + logger.warning(msg) + + @property + def name(self): + """ + Layer name to extract from exclusions .h5 file + + Returns + ------- + _name : str + """ + return self._name + + @property + def min_value(self): + """Minimum value to include/exclude if include_range or exclude_range + was input. + + Returns + ------- + float + """ + if 'excl' in self.mask_type: + range_var = self._exclude_range + else: + range_var = self._include_range + + if all(isinstance(x, (int, float)) for x in range_var): + return min(range_var) + else: + return range_var[0] + + @property + def max_value(self): + """Maximum value to include/exclude if include_range or exclude_range + was input. + + Returns + ------- + float + """ + if 'excl' in self.mask_type: + range_var = self._exclude_range + else: + range_var = self._include_range + + if all(isinstance(x, (int, float)) for x in range_var): + return max(range_var) + else: + return range_var[1] + + @property + def exclude_values(self): + """ + Values to exclude + + Returns + ------- + _exclude_values : list + """ + return self._exclude_values + + @property + def include_values(self): + """ + Values to include + + Returns + ------- + _include_values : list + """ + return self._include_values + + @property + def include_weights(self): + """ + Mapping of values to include and at what weights + + Returns + ------- + dict + """ + return self._include_weights + + @property + def force_include(self): + """ + Flag to force include mask + + Returns + ------- + _force_include : bool + """ + return self._force_include + + @property + def mask_type(self): + """ + Type of exclusion mask for this layer + + Returns + ------- + str + """ + return self._mask_type + + def _apply_mask(self, data): + """ + Apply mask function + + Parameters + ---------- + data : ndarray + Exclusions data to create mask from + + Returns + ------- + data : ndarray + Masked exclusion data with weights applied such that 1 is included, + 0 is excluded, 0.5 is half included. + """ + + if not self._as_weights: + if self.mask_type == 'include_range': + func = self._include_range_mask + elif self.mask_type == 'exclude_range': + func = self._exclude_range_mask + elif self.mask_type == 'exclude': + func = self._exclusion_mask + elif self.mask_type == 'include': + func = self._inclusion_mask + elif self.mask_type == 'include_weights': + func = self._weights_mask + else: + msg = ('{} is an invalid mask type: expecting ' + '"include_range", "exclude_range", "exclude", ' + '"include", or "include_weights"' + .format(self.mask_type)) + logger.error(msg) + raise KeyError(msg) + + data = func(data) + + data = data.astype('float32') * self._weight + + return data + + def _check_mask_type(self): + """ + Ensure that the initialization arguments are valid and not + contradictory + + Returns + ------ + mask : str + Mask type + """ + mask = None + if not self._as_weights: + masks = {'include_range': any(i is not None + for i in self._include_range), + 'exclude_range': any(i is not None + for i in self._exclude_range), + 'exclude': self._exclude_values is not None, + 'include': self._include_values is not None, + 'include_weights': self._include_weights is not None} + for k, v in masks.items(): + if v: + if mask is None: + mask = k + else: + msg = ('Only one approach can be used to create the ' + 'inclusion mask, but you supplied {} and {}' + .format(mask, k)) + logger.error(msg) + raise ExclusionLayerError(msg) + + if mask == 'include_weights' and self._weight < 1: + msg = ("Values are individually weighted when using " + "'include_weights', the supplied weight of {} will be " + "ignored!".format(self._weight)) + self._weight = 1 + logger.warning(msg) + warn(msg) + + return mask + + def _exclude_range_mask(self, data): + """ + Mask exclusion layer based on exclude value range + + Parameters + ---------- + data : ndarray + Exclusions data to create mask from + + Returns + ------- + mask : ndarray + Boolean mask of which values to include (True is include). + """ + mask = np.full(data.shape, False) + if self.min_value is not None: + mask = data < self.min_value + + if self.max_value is not None: + mask |= data > self.max_value + + mask[data == self.nodata_value] = True + if self._exclude_nodata: + mask = mask & (data != self.nodata_value) + + return mask + + def _include_range_mask(self, data): + """ + Mask exclusion layer based on include value range + + Parameters + ---------- + data : ndarray + Exclusions data to create mask from + + Returns + ------- + mask : ndarray + Boolean mask of which values to include (True is include). + """ + mask = np.full(data.shape, True) + if self.min_value is not None: + mask = data >= self.min_value + + if self.max_value is not None: + mask *= data <= self.max_value + + if self._exclude_nodata and self.nodata_value is not None: + mask = mask & (data != self.nodata_value) + + return mask + + def _value_mask(self, data, values, include=True): + """ + Mask exclusion layer based on values to include or exclude + + Parameters + ---------- + data : ndarray + Exclusions data to create mask from + values : list + Values to include or exclude. + include : boolean + Flag as to whether values should be included or excluded. + If True, output mask will be True where data == values. + If False, output mask will be True where data != values. + + Returns + ------- + mask : ndarray + Boolean mask of which values to include (True is include) + """ + mask = np.isin(data, values) + + if not include: + mask = ~mask + + # only include if not nodata + if self._exclude_nodata and self.nodata_value is not None: + mask = mask & (data != self.nodata_value) + + return mask + + def _exclusion_mask(self, data): + """ + Mask exclusion layer based on values to exclude + + Parameters + ---------- + data : ndarray + Exclusions data to create mask from + + Returns + ------- + mask : ndarray + Boolean mask of which values to include (True is include) + """ + mask = self._value_mask(data, self.exclude_values, include=False) + + return mask + + def _inclusion_mask(self, data): + """ + Mask exclusion layer based on values to include + + Parameters + ---------- + data : ndarray + Exclusions data to create mask from + + Returns + ------- + mask : ndarray + Boolean mask of which values to include (True is include) + """ + mask = self._value_mask(data, self.include_values, include=True) + + return mask + + def _weights_mask(self, data): + """ + Mask exclusion layer based on the weights for each inclusion value + + Parameters + ---------- + data : ndarray + Exclusions data to create mask from + + Returns + ------- + mask : ndarray + Percentage of value to include + """ + mask = None + for value, weight in self.include_weights.items(): + if isinstance(value, str): + value = float(value) + + weight = np.array([weight], dtype='float32') + + if mask is None: + mask = self._value_mask(data, [value], include=True) * weight + else: + mask += self._value_mask(data, [value], include=True) * weight + + return mask
+ + +
[docs]class ExclusionMask: + """ + Class to create final exclusion mask + """ + + FILTER_KERNELS = { + 'queen': np.array([[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]), + 'rook': np.array([[0, 1, 0], + [1, 1, 1], + [0, 1, 0]])} + + def __init__(self, excl_h5, layers=None, min_area=None, + kernel='queen', hsds=False, check_layers=False): + """ + Parameters + ---------- + excl_h5 : str | list | tuple + Path to one or more exclusions .h5 files + layers : list | NoneType + list of LayerMask instances for each exclusion layer to combine + min_area : float | NoneType + Minimum required contiguous area in sq-km + kernel : str + Contiguous filter method to use on final exclusion + hsds : bool + Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS + behind HSDS + check_layers : bool + Run a pre-flight check on each layer to ensure they contain + un-excluded values + """ + self._layers = {} + self._excl_h5 = ExclusionLayers(excl_h5, hsds=hsds) + self._excl_layers = None + self._check_layers = check_layers + + if layers is not None: + if not isinstance(layers, list): + layers = [layers] + + missing = [layer.name for layer in layers + if layer.name not in self.excl_layers] + if any(missing): + msg = ("ExclusionMask layers {} are missing from: {}" + .format(missing, self._excl_h5)) + logger.error(msg) + raise KeyError(msg) + + for layer in layers: + self.add_layer(layer) + + if kernel in ["queen", "rook"]: + self._min_area = min_area + self._kernel = kernel + logger.debug('Initializing Exclusions mask with min area of {} ' + 'km2 and filter kernel "{}".' + .format(self._min_area, self._kernel)) + else: + raise KeyError('kernel must be "queen" or "rook"') + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + if type is not None: + raise + + def __repr__(self): + msg = ("{} from {} with {} input layers" + .format(self.__class__.__name__, self.excl_h5.h5_file, + len(self))) + + return msg + + def __len__(self): + return len(self.layers) + + def __getitem__(self, *ds_slice): + """Get the multiplicative inclusion mask. + + Parameters + ---------- + ds_slice : int | slice | list | ndarray + What to extract from ds, each arg is for a sequential axis. + For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 + exclusions mask. + + Returns + ------- + mask : ndarray + Multiplicative inclusion mask with all layers multiplied together + ("and" operation) such that 1 is included, 0 is excluded, + 0.5 is half. + """ + return self._generate_mask(*ds_slice) + +
[docs] def close(self): + """ + Close h5 instance + """ + self.excl_h5.close()
+ + @property + def shape(self): + """ + Get the exclusions shape. + + Returns + ------- + shape : tuple + (rows, cols) shape tuple + """ + return self.excl_h5.shape + + @property + def excl_h5(self): + """ + Open ExclusionLayers instance + + Returns + ------- + _excl_h5 : ExclusionLayers + """ + return self._excl_h5 + + @property + def excl_layers(self): + """ + List of available exclusion layers in exclusions .h5 + + Returns + ------- + _excl_layers : list + """ + if self._excl_layers is None: + self._excl_layers = self.excl_h5.layers + + return self._excl_layers + + @property + def layer_names(self): + """ + List of layers to combines + + Returns + ------- + list + """ + return self._layers.keys() + + @property + def layers(self): + """ + List of LayerMask instances for each exclusion layer to combine + + Returns + ------- + list + """ + return self._layers.values() + + @property + def mask(self): + """ + Inclusion mask for entire exclusion domain + + Returns + ------- + ndarray + """ + mask = self[...] + return mask + + @property + def latitude(self): + """ + Latitude coordinates array + + Returns + ------- + ndarray + """ + return self.excl_h5['latitude'] + + @property + def longitude(self): + """ + Longitude coordinates array + + Returns + ------- + ndarray + """ + return self.excl_h5['longitude'] + +
[docs] def add_layer(self, layer, replace=False): + """ + Add layer to be combined + + Parameters + ---------- + layer : LayerMask + LayerMask instance to add to set of layers to be combined + """ + + if layer.name not in self.excl_layers: + msg = "{} does not exist in {}".format(layer.name, self._excl_h5) + logger.error(msg) + raise KeyError(msg) + + if layer.name in self.layer_names: + msg = "{} is already in {}".format(layer.name, self) + if replace: + msg += " replacing existing layer" + logger.warning(msg) + warn(msg) + else: + logger.error(msg) + raise ExclusionLayerError(msg) + + layer.nodata_value = self.excl_h5.get_nodata_value(layer.name) + if self._check_layers: + if not layer[self.excl_h5[layer.name]].any(): + msg = ("Layer {} is fully excluded!".format(layer.name)) + logger.error(msg) + raise ExclusionLayerError(msg) + + self._layers[layer.name] = layer
+ + @property + def nodata_lookup(self): + """Get a dictionary lookup of the nodata values for each layer name. + + Returns + ------- + nodata : dict + Lookup keyed by layer name and values are nodata values for the + respective layers. + """ + nodata = {} + for layer_name in self.layer_names: + nodata[layer_name] = self.excl_h5.get_nodata_value(layer_name) + + return nodata + + @classmethod + def _area_filter(cls, mask, min_area, excl_area, kernel='queen'): + """ + Ensure the contiguous area of included pixels is greater than + prescribed minimum in sq-km + + Parameters + ---------- + mask : ndarray + Inclusion mask + min_area : float + Minimum required contiguous area in sq-km + kernel : str + Kernel type, either 'queen' or 'rook' + excl_area : float + Area of each exclusion pixel in km^2, assumes 90m resolution + + Returns + ------- + mask : ndarray + Updated inclusion mask + """ + s = cls.FILTER_KERNELS[kernel] + labels, _ = ndimage.label(mask > 0, structure=s) + l, c = np.unique(labels, return_counts=True) + + min_counts = np.ceil(min_area / excl_area) + pos = c[1:] < min_counts + bad_labels = l[1:][pos] + + mask[np.isin(labels, bad_labels)] = 0 + + return mask + + def _increase_mask_slice(self, ds_slice, n=1): + """Increase the mask slice, e.g. from 64x64 to 192x192, to help the + contiguous area filter be more accurate. + + Parameters + ---------- + ds_slice : tuple + Two entry tuple with x and y slices. Anything else will be passed + through unaffected. + n : int + Number of blocks to increase in each direction. For example, + a 64x64 slice with n=1 will increase to 192x192 + (increases by 64xn in each direction). + + Returns + ------- + new_slice : tuple + Two entry tuple with x and y slices with increased dimensions. + sub_slice : tuple + Two entry tuple with x and y slices to retrieve the original + slice out of the bigger slice. + """ + new_slice = ds_slice + sub_slice = (slice(None), slice(None)) + + if isinstance(ds_slice, tuple) and len(ds_slice) == 2: + y_slice = ds_slice[0] + x_slice = ds_slice[1] + if isinstance(x_slice, slice) and isinstance(y_slice, slice): + y_diff = n * np.abs(y_slice.stop - y_slice.start) + x_diff = n * np.abs(x_slice.stop - x_slice.start) + + y_new_start = int(np.max((0, (y_slice.start - y_diff)))) + x_new_start = int(np.max((0, (x_slice.start - x_diff)))) + + y_new_stop = int(np.min((self.shape[0], + (y_slice.stop + y_diff)))) + x_new_stop = int(np.min((self.shape[1], + (x_slice.stop + x_diff)))) + + new_slice = (slice(y_new_start, y_new_stop), + slice(x_new_start, x_new_stop)) + + if y_new_start == y_slice.start: + y_sub_start = 0 + else: + y_sub_start = int(n * y_diff) + if x_new_start == x_slice.start: + x_sub_start = 0 + else: + x_sub_start = int(n * x_diff) + + y_sub_stop = y_sub_start + y_diff + x_sub_stop = x_sub_start + x_diff + + sub_slice = (slice(y_sub_start, y_sub_stop), + slice(x_sub_start, x_sub_stop)) + + return new_slice, sub_slice + + def _generate_ones_mask(self, ds_slice): + """ + Generate mask of all ones + + Parameters + ---------- + ds_slice : tuple + dataset slice of interest along axis 0 and 1 + + Returns + ------- + mask : ndarray + Array of ones slices down by ds_slice + """ + + ones_shape = () + for i, s in enumerate(self.shape): + if i < len(ds_slice): + ax_slice = ds_slice[i] + if np.issubdtype(type(ax_slice), np.integer): + ones_shape += (ax_slice,) + else: + ax = np.arange(s, dtype=np.int32) + ones_shape += (len(ax[ax_slice]), ) + else: + ones_shape += (s, ) + + mask = np.ones(ones_shape, dtype='float32') + + return mask + + def _force_include(self, mask, layers, ds_slice): + """ + Apply force inclusion layers + + Parameters + ---------- + mask : ndarray | None + Mask to apply force inclusion layers to + layers : list + List of force inclusion layers + ds_slice : int | slice | list | ndarray + What to extract from ds, each arg is for a sequential axis. + For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 + exclusions mask. + """ + for layer in layers: + layer_slice = (layer.name, ) + ds_slice + layer_mask = layer[self.excl_h5[layer_slice]] + logger.debug('Computing forced inclusions for {}. Layer has ' + 'average value of {:.2f}' + .format(layer, layer_mask.mean())) + log_mem(logger, log_level='DEBUG') + if mask is None: + mask = layer_mask + else: + mask = np.maximum(mask, layer_mask, dtype='float32') + + return mask + + def _generate_mask(self, *ds_slice, check_layers=False): + """ + Generate multiplicative inclusion mask from exclusion layers. + + Parameters + ---------- + ds_slice : int | slice | list | ndarray + What to extract from ds, each arg is for a sequential axis. + For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 + exclusions mask. + check_layers : bool + Check each layer as each layer is extracted to ensure they contain + un-excluded values. This should only really be True if ds_slice is + for the full inclusion mask. Otherwise, this could raise an error + for a fully excluded mask for just one excluded SC point. + + Returns + ------- + mask : ndarray + Multiplicative inclusion mask with all layers multiplied together + ("and" operation) such that 1 is included, 0 is excluded, + 0.5 is half. + """ + + mask = None + ds_slice, sub_slice = self._parse_ds_slice(ds_slice) + + if self.layers: + force_include = [] + for layer in self.layers: + if layer.force_include: + force_include.append(layer) + else: + layer_slice = (layer.name, ) + ds_slice + layer_mask = layer[self.excl_h5[layer_slice]] + + logger.debug('Computed exclusions {} for {}. ' + 'Layer has average value of {:.2f}.' + .format(layer, ds_slice, layer_mask.mean())) + log_mem(logger, log_level='DEBUG') + + if check_layers and not layer_mask.any(): + msg = ("Layer {} is fully excluded!" + .format(layer.name)) + logger.error(msg) + raise ExclusionLayerError(msg) + + if mask is None: + mask = layer_mask + else: + mask = np.minimum(mask, layer_mask, dtype='float32') + + if force_include: + mask = self._force_include(mask, force_include, ds_slice) + + if self._min_area is not None: + mask = self._area_filter(mask, self._min_area, + self._excl_h5.pixel_area, + kernel=self._kernel) + mask = mask[sub_slice] + else: + if self._min_area is not None: + ds_slice = sub_slice + + mask = self._generate_ones_mask(ds_slice) + + return mask + + def _parse_ds_slice(self, ds_slice): + """Parse a dataset slice to make it the proper dimensions and also + optionally increase the dataset slice to make the contiguous area + filter more accurate + + Parameters + ---------- + ds_slice : int | slice | list | ndarray + What to extract from ds, each arg is for a sequential axis. + For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 + exclusions mask. + + Returns + ------- + ds_slice : tuple + Two entry tuple with x and y slices with increased dimensions. + sub_slice : tuple + Two entry tuple with x and y slices to retrieve the original + slice out of the bigger slice. + """ + + if len(ds_slice) == 1 & isinstance(ds_slice[0], tuple): + ds_slice = ds_slice[0] + + sub_slice = None + if self._min_area is not None: + ds_slice, sub_slice = self._increase_mask_slice(ds_slice, n=1) + + return ds_slice, sub_slice + +
[docs] @classmethod + def run(cls, excl_h5, layers=None, min_area=None, + kernel='queen', hsds=False): + """ + Create inclusion mask from given layers + + Parameters + ---------- + excl_h5 : str | list | tuple + Path to one or more exclusions .h5 files + layers : list | NoneType + list of LayerMask instances for each exclusion layer to combine + min_area : float | NoneType + Minimum required contiguous area in sq-km + kernel : str + Contiguous filter method to use on final exclusion + hsds : bool + Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS + behind HSDS + + Returns + ------- + mask : ndarray + Full inclusion mask + """ + with cls(excl_h5, layers=layers, min_area=min_area, + kernel=kernel, hsds=hsds) as f: + mask = f.mask + + return mask
+ + +
[docs]class ExclusionMaskFromDict(ExclusionMask): + """ + Class to initialize ExclusionMask from a dictionary defining layers + """ + + def __init__(self, excl_h5, layers_dict=None, min_area=None, + kernel='queen', hsds=False, check_layers=False): + """ + Parameters + ---------- + excl_h5 : str | list | tuple + Path to one or more exclusions .h5 files + layers_dict : dict | NoneType + Dictionary of LayerMask arugments {layer: {kwarg: value}} + min_area : float | NoneType + Minimum required contiguous area in sq-km + kernel : str + Contiguous filter method to use on final exclusion + hsds : bool + Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS + behind HSDS + check_layers : bool + Run a pre-flight check on each layer to ensure they contain + un-excluded values + """ + if layers_dict is not None: + layers = [] + for layer, kwargs in layers_dict.items(): + layers.append(LayerMask(layer, **kwargs)) + else: + layers = None + + super().__init__(excl_h5, layers=layers, min_area=min_area, + kernel=kernel, hsds=hsds, check_layers=check_layers) + +
[docs] @classmethod + def extract_inclusion_mask(cls, excl_fpath, tm_dset, excl_dict=None, + area_filter_kernel='queen', min_area=None): + """ + Extract the full inclusion mask from excl_fpath using the given + exclusion layers and whether or not to run a minimum area filter + + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + tm_dset : str + Dataset name in the techmap file containing the + exclusions-to-resource mapping data. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + area_filter_kernel : str, optional + Contiguous area filter method to use on final exclusions mask, + by default "queen" + min_area : float, optional + Minimum required contiguous area filter in sq-km, + by default None + + Returns + ------- + inclusion_mask : ndarray + Pre-computed 2D inclusion mask (normalized with expected range: + [0, 1], where 1 is included and 0 is excluded) + """ + logger.info('Pre-extracting full exclusion mask, this could take ' + 'up to 30min for a large exclusion config...') + with cls(excl_fpath, layers_dict=excl_dict, check_layers=False, + min_area=min_area, kernel=area_filter_kernel) as f: + inclusion_mask = f._generate_mask(..., check_layers=True) + tm_mask = f._excl_h5[tm_dset] == -1 + inclusion_mask[tm_mask] = 0 + + logger.info('Finished extracting full exclusion mask.') + logger.info('The full exclusion mask has {:.2f}% of area included.' + .format(100 * inclusion_mask.sum() + / inclusion_mask.size)) + + if inclusion_mask.sum() == 0: + msg = 'The exclusions inputs resulted in a fully excluded mask!' + logger.error(msg) + raise SupplyCurveInputError(msg) + + return inclusion_mask
+ +
[docs] @classmethod + def run(cls, excl_h5, layers_dict=None, min_area=None, + kernel='queen', hsds=False): + """ + Create inclusion mask from given layers dictionary + + Parameters + ---------- + excl_h5 : str | list | tuple + Path to one or more exclusions .h5 files + layers_dict : dict | NoneType + Dictionary of LayerMask arugments {layer: {kwarg: value}} + min_area : float | NoneType + Minimum required contiguous area in sq-km + kernel : str + Contiguous filter method to use on final exclusion + hsds : bool + Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS + behind HSDS + + Returns + ------- + mask : ndarray + Full inclusion mask + """ + with cls(excl_h5, layers_dict=layers_dict, min_area=min_area, + kernel=kernel, hsds=hsds) as f: + mask = f.mask + + return mask
+ + +
[docs]class FrictionMask(ExclusionMask): + """Class to handle exclusion-style friction layer.""" + + def __init__(self, fric_h5, fric_dset, hsds=False, check_layers=False): + """ + Parameters + ---------- + fric_h5 : str + Path to friction layer .h5 file (same format as exclusions file) + fric_dset : str + Friction layer dataset in fric_h5 + hsds : bool + Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS + behind HSDS + check_layers : bool + Run a pre-flight check on each layer to ensure they contain + un-excluded values + """ + self._fric_dset = fric_dset + L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)] + super().__init__(fric_h5, layers=L, min_area=None, hsds=hsds, + check_layers=check_layers) + + def _generate_mask(self, *ds_slice): + """ + Generate multiplicative friction layer mask. + + Parameters + ---------- + ds_slice : int | slice | list | ndarray + What to extract from ds, each arg is for a sequential axis. + For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 + exclusions mask. + + Returns + ------- + mask : ndarray + Multiplicative friction layer mask with nodata values set to 1. + """ + + mask = None + if len(ds_slice) == 1 & isinstance(ds_slice[0], tuple): + ds_slice = ds_slice[0] + + layer_slice = (self._layers[self._fric_dset].name, ) + ds_slice + mask = self._layers[self._fric_dset][self.excl_h5[layer_slice]] + mask[(mask == self._layers[self._fric_dset].nodata_value)] = 1 + + return mask + +
[docs] @classmethod + def run(cls, excl_h5, fric_dset, hsds=False): + """ + Create inclusion mask from given layers dictionary + + Parameters + ---------- + fric_h5 : str + Path to friction layer .h5 file (same format as exclusions file) + fric_dset : str + Friction layer dataset in fric_h5 + hsds : bool + Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS + behind HSDS + + Returns + ------- + mask : ndarray + Full inclusion mask + """ + L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)] + with cls(excl_h5, *L, min_area=None, hsds=hsds) as f: + mask = f.mask + + return mask
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/supply_curve/extent.html b/_modules/reV/supply_curve/extent.html new file mode 100644 index 000000000..3c1dc9410 --- /dev/null +++ b/_modules/reV/supply_curve/extent.html @@ -0,0 +1,1203 @@ + + + + + + reV.supply_curve.extent — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.supply_curve.extent

+# -*- coding: utf-8 -*-
+"""
+reV supply curve extent
+"""
+import logging
+import numpy as np
+import pandas as pd
+
+from reV.handlers.exclusions import ExclusionLayers
+from reV.utilities.exceptions import SupplyCurveError, SupplyCurveInputError
+
+from rex.utilities.utilities import get_chunk_ranges
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SupplyCurveExtent: + """Supply curve full extent framework. This class translates the 90m + exclusion grid to the aggregated supply curve resolution.""" + + def __init__(self, f_excl, resolution=64): + """ + Parameters + ---------- + f_excl : str | list | tuple | ExclusionLayers + File path(s) to the exclusions grid, or pre-initialized + ExclusionLayers. The exclusions dictate the SC analysis extent. + resolution : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + """ + + logger.debug('Initializing SupplyCurveExtent with res {} from: {}' + .format(resolution, f_excl)) + + if not isinstance(resolution, int): + raise SupplyCurveInputError('Supply Curve resolution needs to be ' + 'an integer but received: {}' + .format(type(resolution))) + + if isinstance(f_excl, (str, list, tuple)): + self._excl_fpath = f_excl + self._excls = ExclusionLayers(f_excl) + elif isinstance(f_excl, ExclusionLayers): + self._excl_fpath = f_excl.h5_file + self._excls = f_excl + else: + raise SupplyCurveInputError('SupplyCurvePoints needs an ' + 'exclusions file path, or ' + 'ExclusionLayers handler but ' + 'received: {}' + .format(type(f_excl))) + + self._excl_shape = self.exclusions.shape + # limit the resolution to the exclusion shape. + self._res = int(np.min(list(self.excl_shape) + [resolution])) + + self._n_rows = None + self._n_cols = None + self._cols_of_excl = None + self._rows_of_excl = None + self._excl_row_slices = None + self._excl_col_slices = None + self._latitude = None + self._longitude = None + self._points = None + + self._sc_col_ind, self._sc_row_ind = np.meshgrid( + np.arange(self.n_cols), np.arange(self.n_rows)) + self._sc_col_ind = self._sc_col_ind.flatten() + self._sc_row_ind = self._sc_row_ind.flatten() + + logger.debug('Initialized SupplyCurveExtent with shape {} from ' + 'exclusions with shape {}' + .format(self.shape, self.excl_shape)) + + def __len__(self): + """Total number of supply curve points.""" + return self.n_rows * self.n_cols + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + if type is not None: + raise + + def __getitem__(self, gid): + """Get SC extent meta data corresponding to an SC point gid.""" + if gid >= len(self): + raise KeyError('SC extent with {} points does not contain SC ' + 'point gid {}.'.format(len(self), gid)) + + return self.points.loc[gid] + +
[docs] def close(self): + """Close all file handlers.""" + self._excls.close()
+ + @property + def shape(self): + """Get the Supply curve shape tuple (n_rows, n_cols). + + Returns + ------- + shape : tuple + 2-entry tuple representing the full supply curve extent. + """ + + return (self.n_rows, self.n_cols) + + @property + def exclusions(self): + """Get the exclusions object. + + Returns + ------- + _excls : ExclusionLayers + ExclusionLayers h5 handler object. + """ + return self._excls + + @property + def resolution(self): + """Get the 1D resolution. + + Returns + ------- + _res : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + """ + return self._res + + @property + def excl_shape(self): + """Get the shape tuple of the exclusion file raster. + + Returns + ------- + tuple + """ + return self._excl_shape + + @property + def excl_rows(self): + """Get the unique row indices identifying the exclusion points. + + Returns + ------- + excl_rows : np.ndarray + Array of exclusion row indices. + """ + return np.arange(self.excl_shape[0]) + + @property + def excl_cols(self): + """Get the unique column indices identifying the exclusion points. + + Returns + ------- + excl_cols : np.ndarray + Array of exclusion column indices. + """ + return np.arange(self.excl_shape[1]) + + @property + def rows_of_excl(self): + """List representing the supply curve points rows and which + exclusions rows belong to each supply curve row. + + Returns + ------- + _rows_of_excl : list + List representing the supply curve points rows. Each list entry + contains the exclusion row indices that are included in the sc + point. + """ + if self._rows_of_excl is None: + self._rows_of_excl = self._chunk_excl(self.excl_rows, + self.resolution) + + return self._rows_of_excl + + @property + def cols_of_excl(self): + """List representing the supply curve points columns and which + exclusions columns belong to each supply curve column. + + Returns + ------- + _cols_of_excl : list + List representing the supply curve points columns. Each list entry + contains the exclusion column indices that are included in the sc + point. + """ + if self._cols_of_excl is None: + self._cols_of_excl = self._chunk_excl(self.excl_cols, + self.resolution) + + return self._cols_of_excl + + @property + def excl_row_slices(self): + """ + List representing the supply curve points rows and which + exclusions rows belong to each supply curve row. + + Returns + ------- + _excl_row_slices : list + List representing the supply curve points rows. Each list entry + contains the exclusion row slice that are included in the sc + point. + """ + if self._excl_row_slices is None: + self._excl_row_slices = self._excl_slices(self.excl_rows, + self.resolution) + + return self._excl_row_slices + + @property + def excl_col_slices(self): + """ + List representing the supply curve points cols and which + exclusions cols belong to each supply curve col. + + Returns + ------- + _excl_col_slices : list + List representing the supply curve points cols. Each list entry + contains the exclusion col slice that are included in the sc + point. + """ + if self._excl_col_slices is None: + self._excl_col_slices = self._excl_slices(self.excl_cols, + self.resolution) + + return self._excl_col_slices + + @property + def n_rows(self): + """Get the number of supply curve grid rows. + + Returns + ------- + n_rows : int + Number of row entries in the full supply curve grid. + """ + if self._n_rows is None: + self._n_rows = int(np.ceil(self.excl_shape[0] / self.resolution)) + + return self._n_rows + + @property + def n_cols(self): + """Get the number of supply curve grid columns. + + Returns + ------- + n_cols : int + Number of column entries in the full supply curve grid. + """ + if self._n_cols is None: + self._n_cols = int(np.ceil(self.excl_shape[1] / self.resolution)) + + return self._n_cols + + @property + def latitude(self): + """ + Get supply curve point latitudes + + Returns + ------- + ndarray + """ + if self._latitude is None: + lats = [] + lons = [] + + sc_cols, sc_rows = np.meshgrid(np.arange(self.n_cols), + np.arange(self.n_rows)) + for r, c in zip(sc_rows.flatten(), sc_cols.flatten()): + r = self.excl_row_slices[r] + c = self.excl_col_slices[c] + lats.append(self.exclusions['latitude', r, c].mean()) + lons.append(self.exclusions['longitude', r, c].mean()) + + self._latitude = np.array(lats, dtype='float32') + self._longitude = np.array(lons, dtype='float32') + + return self._latitude + + @property + def longitude(self): + """ + Get supply curve point longitudes + + Returns + ------- + ndarray + """ + if self._longitude is None: + lats = [] + lons = [] + + sc_cols, sc_rows = np.meshgrid(np.arange(self.n_cols), + np.arange(self.n_rows)) + for r, c in zip(sc_rows.flatten(), sc_cols.flatten()): + r = self.excl_row_slices[r] + c = self.excl_col_slices[c] + lats.append(self.exclusions['latitude', r, c].mean()) + lons.append(self.exclusions['longitude', r, c].mean()) + + self._latitude = np.array(lats, dtype='float32') + self._longitude = np.array(lons, dtype='float32') + + return self._longitude + + @property + def lat_lon(self): + """ + 2D array of lat, lon coordinates for all sc points + + Returns + ------- + ndarray + """ + return np.dstack((self.latitude, self.longitude))[0] + + @property + def row_indices(self): + """Get a 1D array of row indices for every gid. That is, this property + has length == len(gids) and row_indices[sc_gid] yields the row index of + the target supply curve gid + + Returns + ------- + ndarray + """ + return self._sc_row_ind + + @property + def col_indices(self): + """Get a 1D array of col indices for every gid. That is, this property + has length == len(gids) and col_indices[sc_gid] yields the col index of + the target supply curve gid + + Returns + ------- + ndarray + """ + return self._sc_col_ind + + @property + def points(self): + """Get the summary dataframe of supply curve points. + + Returns + ------- + _points : pd.DataFrame + Supply curve points with columns for attributes of each sc point. + """ + + if self._points is None: + self._points = pd.DataFrame({'row_ind': self.row_indices.copy(), + 'col_ind': self.col_indices.copy()}) + + self._points.index.name = 'gid' # sc_point_gid + + return self._points + + @staticmethod + def _chunk_excl(arr, resolution): + """Split an array into a list of arrays with len == resolution. + + Parameters + ---------- + arr : np.ndarray + 1D array to be split into chunks. + resolution : int + Resolution of the chunks. + + Returns + ------- + chunks : list + List of arrays, each with length equal to self.resolution + (except for the last array in the list which is the remainder). + """ + + chunks = get_chunk_ranges(len(arr), resolution) + chunks = list(map(lambda i: np.arange(*i), chunks)) + + return chunks + + @staticmethod + def _excl_slices(arr, resolution): + """Split row or col ind into slices of excl rows or slices + + Parameters + ---------- + arr : np.ndarray + 1D array to be split into slices + resolution : int + Resolution of the sc points + + Returns + ------- + slices : list + List of arr slices, each with length equal to self.resolution + (except for the last array in the list which is the remainder). + """ + + slices = get_chunk_ranges(len(arr), resolution) + slices = list(map(lambda i: slice(*i), slices)) + + return slices + +
[docs] def get_sc_row_col_ind(self, gid): + """Get the supply curve grid row and column index values corresponding + to a supply curve gid. + + Parameters + ---------- + gid : int + Supply curve point gid. + + Returns + ------- + row_ind : int + Row index that the gid is located at in the sc grid. + col_ind : int + Column index that the gid is located at in the sc grid. + """ + row_ind = self.points.loc[gid, 'row_ind'] + col_ind = self.points.loc[gid, 'col_ind'] + return row_ind, col_ind
+ +
[docs] def get_excl_slices(self, gid): + """Get the row and column slices of the exclusions grid corresponding + to the supply curve point gid. + + Parameters + ---------- + gid : int + Supply curve point gid. + + Returns + ------- + row_slice : slice + Exclusions grid row slice corresponding to the sc point gid. + col_slice : slice + Exclusions grid col slice corresponding to the sc point gid. + """ + + if gid >= len(self): + raise SupplyCurveError('Requested gid "{}" is out of bounds for ' + 'supply curve points with length "{}".' + .format(gid, len(self))) + + row_slice = self.excl_row_slices[self.row_indices[gid]] + col_slice = self.excl_col_slices[self.col_indices[gid]] + + return row_slice, col_slice
+ +
[docs] def get_flat_excl_ind(self, gid): + """Get the index values of the flattened exclusions grid corresponding + to the supply curve point gid. + + Parameters + ---------- + gid : int + Supply curve point gid. + + Returns + ------- + excl_ind : np.ndarray + Index values of the flattened exclusions grid corresponding to + the SC gid. + """ + + row_slice, col_slice = self.get_excl_slices(gid) + excl_ind = self.exclusions.iarr[row_slice, col_slice].flatten() + + return excl_ind
+ +
[docs] def get_excl_points(self, dset, gid): + """Get the exclusions data corresponding to a supply curve gid. + + Parameters + ---------- + dset : str | int + Used as the first arg in the exclusions __getitem__ slice. + String can be "meta", integer can be layer number. + gid : int + Supply curve point gid. + + Returns + ------- + excl_points : pd.DataFrame + Exclusions data reduced to just the exclusion points associated + with the requested supply curve gid. + """ + + row_slice, col_slice = self.get_excl_slices(gid) + + return self.exclusions[dset, row_slice, col_slice]
+ +
[docs] def get_coord(self, gid): + """Get the centroid coordinate for the supply curve gid point. + + Parameters + ---------- + gid : int + Supply curve point gid. + + Returns + ------- + coord : tuple + Two entry coordinate tuple: (latitude, longitude) + """ + + lat = self.latitude[gid] + lon = self.longitude[gid] + + return (lat, lon)
+ +
[docs] def valid_sc_points(self, tm_dset): + """ + Determine which sc_point_gids contain resource gids and are thus + valid supply curve points + + Parameters + ---------- + tm_dset : str + Techmap dataset name + + Returns + ------- + valid_gids : ndarray + Vector of valid sc_point_gids that contain resource gis + """ + + logger.info('Getting valid SC points from "{}"...'.format(tm_dset)) + + valid_bool = np.zeros(self.n_rows * self.n_cols) + tm = self._excls[tm_dset] + + gid = 0 + for r in self.excl_row_slices: + for c in self.excl_col_slices: + if np.any(tm[r, c] != -1): + valid_bool[gid] = 1 + gid += 1 + + valid_gids = np.where(valid_bool == 1)[0].astype(np.uint32) + + logger.info('Found {} valid SC points out of {} total possible ' + '(valid SC points that map to valid resource gids)' + .format(len(valid_gids), len(valid_bool))) + + return valid_gids
+ +
[docs] def get_slice_lookup(self, sc_point_gids): + """ + Get exclusion slices for all requested supply curve point gids + + Parameters + ---------- + sc_point_gids : list | ndarray + List or 1D array of sc_point_gids to get exclusion slices for + + Returns + ------- + dict + lookup mapping sc_point_gid to exclusion slice + """ + return {g: self.get_excl_slices(g) for g in sc_point_gids}
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/supply_curve/points.html b/_modules/reV/supply_curve/points.html new file mode 100644 index 000000000..f94bde3b3 --- /dev/null +++ b/_modules/reV/supply_curve/points.html @@ -0,0 +1,2729 @@ + + + + + + reV.supply_curve.points — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.supply_curve.points

+# -*- coding: utf-8 -*-
+"""
+reV supply curve points frameworks.
+"""
+from abc import ABC
+import logging
+import numpy as np
+import pandas as pd
+from warnings import warn
+
+from reV.econ.economies_of_scale import EconomiesOfScale
+from reV.econ.utilities import lcoe_fcr
+from reV.handlers.exclusions import ExclusionLayers
+from reV.supply_curve.exclusions import ExclusionMask, ExclusionMaskFromDict
+from reV.utilities.exceptions import (SupplyCurveInputError,
+                                      EmptySupplyCurvePointError,
+                                      InputWarning,
+                                      FileInputError,
+                                      DataShapeError,
+                                      OutputWarning)
+
+from rex.resource import Resource, BaseResource
+from rex.multi_time_resource import MultiTimeResource
+from rex.utilities.utilities import jsonify_dict
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class AbstractSupplyCurvePoint(ABC): + """ + Abstract SC point based on only the point gid, SC shape, and resolution. + """ + + def __init__(self, gid, exclusion_shape, resolution=64): + """ + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + exclusion_shape : tuple + Shape of the full exclusions extent (rows, cols). + resolution : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + """ + + self._gid = gid + self._resolution = resolution + self._rows, self._cols = self._parse_slices( + gid, resolution, exclusion_shape) + + def _parse_slices(self, gid, resolution, exclusion_shape): + """Parse inputs for the definition of this SC point. + + Parameters + ---------- + gid : int | None + gid for supply curve point to analyze. + resolution : int | None + SC resolution, must be input in combination with gid. + exclusion_shape : tuple + Shape of the exclusions extent (rows, cols). Inputing this will + speed things up considerably. + + Returns + ------- + rows : slice + Row slice to index the high-res layer (exclusions) for the gid in + the agg layer (supply curve). + cols : slice + Col slice to index the high-res layer (exclusions) for the gid in + the agg layer (supply curve). + """ + + rows, cols = self.get_agg_slices(gid, exclusion_shape, resolution) + + return rows, cols + + @property + def gid(self): + """supply curve point gid""" + return self._gid + + @property + def sc_point_gid(self): + """ + Supply curve point gid + + Returns + ------- + int + """ + return self._gid + + @property + def resolution(self): + """Get the supply curve grid aggregation resolution""" + return self._resolution + + @property + def rows(self): + """Get the rows of the exclusions layer associated with this SC point. + + Returns + ------- + rows : slice + Row slice to index the high-res layer (exclusions layer) for the + gid in the agg layer (supply curve layer). + """ + return self._rows + + @property + def cols(self): + """Get the cols of the exclusions layer associated with this SC point. + + Returns + ------- + cols : slice + Column slice to index the high-res layer (exclusions layer) for the + gid in the agg layer (supply curve layer). + """ + return self._cols + +
[docs] @staticmethod + def get_agg_slices(gid, shape, resolution): + """Get the row, col slices of an aggregation gid. + + Parameters + ---------- + gid : int + Gid of interest in the aggregated layer. + shape : tuple + (row, col) shape tuple of the underlying high-res layer. + resolution : int + Resolution of the aggregation: number of pixels in 1D being + aggregated. + + Returns + ------- + row_slice : slice + Row slice to index the high-res layer for the gid in the agg layer. + col_slice : slice + Col slice to index the high-res layer for the gid in the agg layer. + """ + + nrows = int(np.ceil(shape[0] / resolution)) + ncols = int(np.ceil(shape[1] / resolution)) + super_shape = (nrows, ncols) + arr = np.arange(nrows * ncols).reshape(super_shape) + try: + loc = np.where(arr == gid) + row = loc[0][0] + col = loc[1][0] + except IndexError as exc: + msg = ('Gid {} out of bounds for extent shape {} and ' + 'resolution {}.'.format(gid, shape, resolution)) + raise IndexError(msg) from exc + + if row + 1 != nrows: + row_slice = slice(row * resolution, (row + 1) * resolution) + else: + row_slice = slice(row * resolution, shape[0]) + + if col + 1 != ncols: + col_slice = slice(col * resolution, (col + 1) * resolution) + else: + col_slice = slice(col * resolution, shape[1]) + + return row_slice, col_slice
+ + +
[docs]class SupplyCurvePoint(AbstractSupplyCurvePoint): + """Generic single SC point based on exclusions, resolution, and techmap""" + + def __init__(self, gid, excl, tm_dset, excl_dict=None, inclusion_mask=None, + resolution=64, excl_area=None, exclusion_shape=None, + close=True): + """ + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + excl : str | list | tuple | ExclusionMask + Filepath(s) to exclusions h5 or ExclusionMask file handler. + tm_dset : str + Dataset name in the exclusions file containing the + exclusions-to-resource mapping data. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + None if excl input is pre-initialized. + inclusion_mask : np.ndarray + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. The shape of this will be checked against the input + resolution. + resolution : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + excl_area : float | None, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + exclusion_shape : tuple + Shape of the full exclusions extent (rows, cols). Inputing this + will speed things up considerably. + close : bool + Flag to close object file handlers on exit. + """ + + self._excl_dict = excl_dict + self._close = close + self._excl_fpath, self._excls = self._parse_excl_file(excl) + + if exclusion_shape is None: + exclusion_shape = self.exclusions.shape + + super().__init__(gid, exclusion_shape, resolution=resolution) + + self._gids = self._parse_techmap(tm_dset) + + self._incl_mask = inclusion_mask + self._incl_mask_flat = None + if inclusion_mask is not None: + msg = ('Bad inclusion mask input shape of {} with stated ' + 'resolution of {}'.format(inclusion_mask.shape, resolution)) + assert len(inclusion_mask.shape) == 2, msg + assert inclusion_mask.shape[0] <= resolution, msg + assert inclusion_mask.shape[1] <= resolution, msg + assert inclusion_mask.size == len(self._gids), msg + self._incl_mask = inclusion_mask.copy() + + self._centroid = None + self._excl_area = excl_area + self._check_excl() + + @staticmethod + def _parse_excl_file(excl): + """Parse excl filepath input or handler object and set to attrs. + + Parameters + ---------- + excl : str | ExclusionMask + Filepath to exclusions geotiff or ExclusionMask handler + + Returns + ------- + excl_fpath : str | list | tuple + Filepath(s) for exclusions file + exclusions : ExclusionMask | None + Exclusions mask if input is already an open handler or None if it + is to be lazy instantiated. + """ + + if isinstance(excl, (str, list, tuple)): + excl_fpath = excl + exclusions = None + elif isinstance(excl, ExclusionMask): + excl_fpath = excl.excl_h5.h5_file + exclusions = excl + else: + raise SupplyCurveInputError('SupplyCurvePoints needs an ' + 'exclusions file path, or ' + 'ExclusionMask handler but ' + 'received: {}' + .format(type(excl))) + + return excl_fpath, exclusions + + def _parse_techmap(self, tm_dset): + """Parse data from the tech map file (exclusions to resource mapping). + Raise EmptySupplyCurvePointError if there are no valid resource points + in this SC point. + + Parameters + ---------- + tm_dset : str + Dataset name in the exclusions file containing the + exclusions-to-resource mapping data. + + Returns + ------- + res_gids : np.ndarray + 1D array with length == number of exclusion points. reV resource + gids (native resource index) from the original resource data + corresponding to the tech exclusions. + """ + res_gids = self.exclusions.excl_h5[tm_dset, self.rows, self.cols] + res_gids = res_gids.astype(np.int32).flatten() + + if (res_gids != -1).sum() == 0: + emsg = ('Supply curve point gid {} has no viable exclusion points ' + 'based on exclusions file: "{}"' + .format(self._gid, self._excl_fpath)) + raise EmptySupplyCurvePointError(emsg) + + return res_gids + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + if type is not None: + raise + +
[docs] def close(self): + """Close all file handlers.""" + if self._close: + if self._excls is not None: + self._excls.close()
+ + @property + def exclusions(self): + """Get the exclusions object. + + Returns + ------- + _excls : ExclusionMask + ExclusionMask h5 handler object. + """ + if self._excls is None: + self._excls = ExclusionMaskFromDict(self._excl_fpath, + layers_dict=self._excl_dict) + + return self._excls + + @property + def centroid(self): + """Get the supply curve point centroid coordinate. + + Returns + ------- + centroid : tuple + SC point centroid (lat, lon). + """ + decimals = 3 + + if self._centroid is None: + lats = self.exclusions.excl_h5['latitude', self.rows, self.cols] + lons = self.exclusions.excl_h5['longitude', self.rows, self.cols] + self._centroid = (np.round(lats.mean(), decimals=decimals), + np.round(lons.mean(), decimals=decimals)) + + return self._centroid + + @property + def pixel_area(self): + """The area in km2 of a single exclusion pixel. If this value was not + provided on initialization, it is determined from the profile of the + exclusion file. + + Returns + ------- + float + """ + if self._excl_area is None: + with ExclusionLayers(self._excl_fpath) as f: + self._excl_area = f.pixel_area + return self._excl_area + + @property + def area(self): + """Get the non-excluded resource area of the supply curve point in the + current resource class. + + Returns + ------- + area : float + Non-excluded resource/generation area in square km. + """ + mask = self._gids != -1 + area = np.sum(self.include_mask_flat[mask]) * self.pixel_area + + return area + + @property + def latitude(self): + """Get the SC point latitude""" + return self.centroid[0] + + @property + def longitude(self): + """Get the SC point longitude""" + return self.centroid[1] + + @property + def n_gids(self): + """ + Get the total number of not fully excluded pixels associated with the + available resource/generation gids at the given sc gid. + + Returns + ------- + n_gids : list + """ + mask = self._gids != -1 + n_gids = np.sum(self.include_mask_flat[mask] > 0) + + return n_gids + + @property + def include_mask(self): + """Get the 2D inclusion mask (normalized with expected range: [0, 1] + where 1 is included and 0 is excluded). + + Returns + ------- + np.ndarray + """ + + if self._incl_mask is None: + self._incl_mask = self.exclusions[self.rows, self.cols] + + # make sure exclusion pixels outside resource extent are excluded + out_of_extent = self._gids.reshape(self._incl_mask.shape) == -1 + self._incl_mask[out_of_extent] = 0.0 + + if self._incl_mask.max() > 1: + w = ('Exclusions data max value is > 1: {}' + .format(self._incl_mask.max()), InputWarning) + logger.warning(w) + warn(w) + + return self._incl_mask + + @property + def include_mask_flat(self): + """Get the flattened inclusion mask (normalized with expected + range: [0, 1] where 1 is included and 0 is excluded). + + Returns + ------- + np.ndarray + """ + + if self._incl_mask_flat is None: + self._incl_mask_flat = self.include_mask.flatten() + + return self._incl_mask_flat + + @property + def bool_mask(self): + """Get a boolean inclusion mask (True if excl point is not excluded). + + Returns + ------- + mask : np.ndarray + Mask with length equal to the flattened exclusion shape + """ + return self._gids != -1 + + @property + def h5(self): + """ + placeholder for h5 Resource handler object + """ + + @property + def summary(self): + """ + Placeholder for Supply curve point's meta data summary + """ + + def _check_excl(self): + """ + Check to see if supply curve point is fully excluded + """ + + if all(self.include_mask_flat[self.bool_mask] == 0): + msg = ('Supply curve point gid {} is completely excluded!' + .format(self._gid)) + raise EmptySupplyCurvePointError(msg) + +
[docs] def exclusion_weighted_mean(self, arr, drop_nan=True): + """ + Calc the exclusions-weighted mean value of an array of resource data. + + Parameters + ---------- + arr : np.ndarray + Array of resource data. + drop_nan : bool + Flag to drop nan values from the mean calculation (only works for + 1D arr input, profiles should not have NaN's) + + Returns + ------- + mean : float | np.ndarray + Mean of arr masked by the binary exclusions then weighted by + the non-zero exclusions. This will be a 1D numpy array if the + input data is a 2D numpy array (averaged along axis=1) + """ + + if len(arr.shape) == 2: + x = arr[:, self._gids[self.bool_mask]].astype('float32') + incl = self.include_mask_flat[self.bool_mask] + x *= incl + mean = x.sum(axis=1) / incl.sum() + + else: + x = arr[self._gids[self.bool_mask]].astype('float32') + incl = self.include_mask_flat[self.bool_mask] + + if np.isnan(x).all(): + return np.nan + elif drop_nan and np.isnan(x).any(): + nan_mask = np.isnan(x) + x = x[~nan_mask] + incl = incl[~nan_mask] + + x *= incl + mean = x.sum() / incl.sum() + + return mean
+ +
[docs] def mean_wind_dirs(self, arr): + """ + Calc the mean wind directions at every time-step + + Parameters + ---------- + arr : np.ndarray + Array of wind direction data. + + Returns + ------- + mean_wind_dirs : np.ndarray | float + Mean wind direction of arr masked by the binary exclusions + """ + incl = self.include_mask_flat[self.bool_mask] + gids = self._gids[self.bool_mask] + if len(arr.shape) == 2: + arr_slice = (slice(None), gids) + ax = 1 + + else: + arr_slice = gids + ax = 0 + + angle = np.radians(arr[arr_slice], dtype=np.float32) + sin = np.mean(np.sin(angle) * incl, axis=ax) + cos = np.mean(np.cos(angle) * incl, axis=ax) + + mean_wind_dirs = np.degrees(np.arctan2(sin, cos)) + mask = mean_wind_dirs < 0 + mean_wind_dirs[mask] += 360 + + return mean_wind_dirs
+ +
[docs] def aggregate(self, arr): + """ + Calc sum (aggregation) of the resource data. + + Parameters + ---------- + arr : np.ndarray + Array of resource data. + + Returns + ------- + agg : float + Sum of arr masked by the binary exclusions + """ + if len(arr.shape) == 2: + x = arr[:, self._gids[self.bool_mask]].astype('float32') + ax = 1 + else: + x = arr[self._gids[self.bool_mask]].astype('float32') + ax = 0 + + x *= self.include_mask_flat[self.bool_mask] + agg = x.sum(axis=ax) + + return agg
+ +
[docs] @classmethod + def sc_mean(cls, gid, excl, tm_dset, data, excl_dict=None, resolution=64, + exclusion_shape=None, close=True): + """ + Compute exclusions weight mean for the sc point from data + + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + excl : str | ExclusionMask + Filepath to exclusions h5 or ExclusionMask file handler. + tm_dset : str + Dataset name in the exclusions file containing the + exclusions-to-resource mapping data. + data : ndarray | ResourceDataset + Array of data or open dataset handler to apply exclusions too + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + None if excl input is pre-initialized. + resolution : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + exclusion_shape : tuple + Shape of the full exclusions extent (rows, cols). Inputing this + will speed things up considerably. + close : bool + Flag to close object file handlers on exit + + Returns + ------- + ndarray + Exclusions weighted means of data for supply curve point + """ + kwargs = {"excl_dict": excl_dict, "resolution": resolution, + "exclusion_shape": exclusion_shape, "close": close} + with cls(gid, excl, tm_dset, **kwargs) as point: + means = point.exclusion_weighted_mean(data) + + return means
+ +
[docs] @classmethod + def sc_sum(cls, gid, excl, tm_dset, data, excl_dict=None, resolution=64, + exclusion_shape=None, close=True): + """ + Compute the aggregate (sum) of data for the sc point + + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + excl : str | ExclusionMask + Filepath to exclusions h5 or ExclusionMask file handler. + tm_dset : str + Dataset name in the exclusions file containing the + exclusions-to-resource mapping data. + data : ndarray | ResourceDataset + Array of data or open dataset handler to apply exclusions too + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + None if excl input is pre-initialized. + resolution : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + exclusion_shape : tuple + Shape of the full exclusions extent (rows, cols). Inputing this + will speed things up considerably. + close : bool + Flag to close object file handlers on exit. + + Returns + ------- + ndarray + Sum / aggregation of data for supply curve point + """ + kwargs = {"excl_dict": excl_dict, "resolution": resolution, + "exclusion_shape": exclusion_shape, "close": close} + with cls(gid, excl, tm_dset, **kwargs) as point: + agg = point.aggregate(data) + + return agg
+ + @staticmethod + def _mode(data): + """ + Compute the mode of the data vector and return a single value + + Parameters + ---------- + data : ndarray + data layer vector to compute mode for + + Returns + ------- + float | int + Mode of data + """ + if not data.size: + return None + else: + # pd series is more flexible with non-numeric than stats mode + return pd.Series(data).mode().values[0] + + @staticmethod + def _categorize(data, incl_mult): + """ + Extract the sum of inclusion scalar values (where 1 is + included, 0 is excluded, and 0.7 is included with 70 percent of + available land) for each unique (categorical value) in data + + Parameters + ---------- + data : ndarray + Vector of categorical values + incl_mult : ndarray + Vector of inclusion values + + Returns + ------- + str + Jsonified string of the dictionary mapping categorical values to + total inclusions + """ + + data = {category: float(incl_mult[(data == category)].sum()) + for category in np.unique(data)} + data = jsonify_dict(data) + + return data + + @classmethod + def _agg_data_layer_method(cls, data, incl_mult, method): + """Aggregate the data array using specified method. + + Parameters + ---------- + data : np.ndarray | None + Data array that will be flattened and operated on using method. + This must be the included data. Exclusions should be applied + before this method. + incl_mult : np.ndarray | None + Scalar exclusion data for methods with exclusion-weighted + aggregation methods. Shape must match input data. + method : str + Aggregation method (mode, mean, max, min, sum, category) + + Returns + ------- + data : float | int | str | None + Result of applying method to data. + """ + method_func = {'mode': cls._mode, + 'mean': np.mean, + 'max': np.max, + 'min': np.min, + 'sum': np.sum, + 'category': cls._categorize} + + if data is not None: + method = method.lower() + if method not in method_func: + e = ('Cannot recognize data layer agg method: ' + '"{}". Can only {}'.format(method, list(method_func))) + logger.error(e) + raise ValueError(e) + + if len(data.shape) > 1: + data = data.flatten() + + if data.shape != incl_mult.shape: + e = ('Cannot aggregate data with shape that doesnt ' + 'match excl mult!') + logger.error(e) + raise DataShapeError(e) + + if method == 'category': + data = method_func['category'](data, incl_mult) + elif method in ['mean', 'sum']: + data = data * incl_mult + data = method_func[method](data) + else: + data = method_func[method](data) + + return data + +
[docs] def agg_data_layers(self, summary, data_layers): + """Perform additional data layer aggregation. If there is no valid data + in the included area, the data layer will be taken from the full SC + point extent (ignoring exclusions). If there is still no valid data, + a warning will be raised and the data layer will have a NaN/None value. + + Parameters + ---------- + summary : dict + Dictionary of summary outputs for this sc point. + data_layers : None | dict + Aggregation data layers. Must be a dictionary keyed by data label + name. Each value must be another dictionary with "dset", "method", + and "fpath". + + Returns + ------- + summary : dict + Dictionary of summary outputs for this sc point. A new entry for + each data layer is added. + """ + + if data_layers is not None: + for name, attrs in data_layers.items(): + excl_fp = attrs.get('fpath', self._excl_fpath) + if excl_fp != self._excl_fpath: + fh = ExclusionLayers(attrs['fpath']) + else: + fh = self.exclusions.excl_h5 + + raw = fh[attrs['dset'], self.rows, self.cols] + nodata = fh.get_nodata_value(attrs['dset']) + + data = raw.flatten()[self.bool_mask] + incl_mult = self.include_mask_flat[self.bool_mask].copy() + + if nodata is not None: + valid_data_mask = (data != nodata) + data = data[valid_data_mask] + incl_mult = incl_mult[valid_data_mask] + + if not data.size: + m = ('Data layer "{}" has no valid data for ' + 'SC point gid {} because of exclusions ' + 'and/or nodata values in the data layer.' + .format(name, self._gid)) + logger.debug(m) + + data = self._agg_data_layer_method(data, incl_mult, + attrs['method']) + summary[name] = data + + if excl_fp != self._excl_fpath: + fh.close() + + return summary
+ + +
[docs]class AggregationSupplyCurvePoint(SupplyCurvePoint): + """Generic single SC point to aggregate data from an h5 file.""" + + def __init__(self, gid, excl, agg_h5, tm_dset, + excl_dict=None, inclusion_mask=None, + resolution=64, excl_area=None, exclusion_shape=None, + close=True, gen_index=None, apply_exclusions=True): + """ + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + excl : str | ExclusionMask + Filepath to exclusions h5 or ExclusionMask file handler. + agg_h5 : str | Resource + Filepath to .h5 file to aggregate or Resource handler + tm_dset : str + Dataset name in the exclusions file containing the + exclusions-to-resource mapping data. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + None if excl input is pre-initialized. + inclusion_mask : np.ndarray + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. The shape of this will be checked against the input + resolution. + resolution : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + excl_area : float | None, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + exclusion_shape : tuple + Shape of the full exclusions extent (rows, cols). Inputing this + will speed things up considerably. + close : bool + Flag to close object file handlers on exit. + gen_index : np.ndarray + Array of generation gids with array index equal to resource gid. + Array value is -1 if the resource index was not used in the + generation run. + apply_exclusions : bool + Flag to apply exclusions to the resource / generation gid's on + initialization. + """ + super().__init__(gid, excl, tm_dset, + excl_dict=excl_dict, + inclusion_mask=inclusion_mask, + resolution=resolution, + excl_area=excl_area, + exclusion_shape=exclusion_shape, + close=close) + + self._h5_gid_set = None + self._h5_fpath, self._h5 = self._parse_h5_file(agg_h5) + + if gen_index is not None: + self._gids, _ = self._map_gen_gids(self._gids, gen_index) + + self._h5_gids = self._gids + + if (self._h5_gids != -1).sum() == 0: + emsg = ('Supply curve point gid {} has no viable exclusion ' + 'points based on exclusions file: "{}"' + .format(self._gid, self._excl_fpath)) + raise EmptySupplyCurvePointError(emsg) + + if apply_exclusions: + self._apply_exclusions() + + @staticmethod + def _parse_h5_file(h5): + """ + Parse .h5 filepath input or handler object and set to attrs. + + Parameters + ---------- + h5 : str | Resource + Filepath to .h5 file to aggregate or Resource handler + + Returns + ------- + h5_fpath : str + Filepath for .h5 file to aggregate + h5 : Resource | None + Resource if input is already an open handler or None if it + is to be lazy instantiated. + """ + + if isinstance(h5, str): + h5_fpath = h5 + h5 = None + elif issubclass(h5.__class__, BaseResource): + h5_fpath = h5.h5_file + elif issubclass(h5.__class__, MultiTimeResource): + h5_fpath = h5.h5_files + else: + raise SupplyCurveInputError('SupplyCurvePoints needs a ' + '.h5 file path, or ' + 'Resource handler but ' + 'received: {}' + .format(type(h5))) + + return h5_fpath, h5 + + def _apply_exclusions(self): + """Apply exclusions by masking the generation and resource gid arrays. + This removes all res/gen entries that are masked by the exclusions or + resource bin.""" + + # exclusions mask is False where excluded + exclude = self.include_mask_flat == 0 + + self._gids[exclude] = -1 + self._h5_gids[exclude] = -1 + + if (self._gids != -1).sum() == 0: + msg = ('Supply curve point gid {} is completely excluded!' + .format(self._gid)) + raise EmptySupplyCurvePointError(msg) + +
[docs] def close(self): + """Close all file handlers.""" + if self._close: + if self._excls is not None: + self._excls.close() + + if self._h5 is not None: + self._h5.close()
+ + @staticmethod + def _map_gen_gids(res_gids, gen_index): + """ + Map resource gids from techmap to gen gids in .h5 source file + + Parameters + ---------- + res_gids : ndarray + resource gids from techmap + gen_index : ndarray + Equivalent gen gids to resource gids + + Returns + ------- + gen_gids : ndarray + gen gid to excl mapping + res_gids : ndarray + updated resource gid to excl mapping + """ + mask = (res_gids >= len(gen_index)) | (res_gids == -1) + res_gids[mask] = -1 + gen_gids = gen_index[res_gids] + gen_gids[mask] = -1 + res_gids[(gen_gids == -1)] = -1 + + return gen_gids, res_gids + + @staticmethod + def _ordered_unique(seq): + """Get a list of unique values in the same order as the input sequence. + + Parameters + ---------- + seq : list | tuple + Sequence of values. + + Returns + ------- + seq : list + List of unique values in seq input with original order. + """ + + seen = set() + + return [x for x in seq if not (x in seen or seen.add(x))] + + @property + def h5(self): + """ + h5 Resource handler object + + Returns + ------- + _h5 : Resource + Resource h5 handler object. + """ + if self._h5 is None and '*' in self._h5_fpath: + self._h5 = MultiTimeResource(self._h5_fpath) + elif self._h5 is None: + self._h5 = Resource(self._h5_fpath) + + return self._h5 + + @property + def country(self): + """Get the SC point country based on the resource meta data.""" + country = None + if 'country' in self.h5.meta and self.county is not None: + # make sure country and county are coincident + counties = self.h5.meta.loc[self.h5_gid_set, 'county'].values + iloc = np.where(counties == self.county)[0][0] + country = self.h5.meta.loc[self.h5_gid_set, 'country'].values + country = country[iloc] + + elif 'country' in self.h5.meta: + country = self.h5.meta.loc[self.h5_gid_set, 'country'].mode() + country = country.values[0] + + return country + + @property + def state(self): + """Get the SC point state based on the resource meta data.""" + state = None + if 'state' in self.h5.meta and self.county is not None: + # make sure state and county are coincident + counties = self.h5.meta.loc[self.h5_gid_set, 'county'].values + iloc = np.where(counties == self.county)[0][0] + state = self.h5.meta.loc[self.h5_gid_set, 'state'].values + state = state[iloc] + + elif 'state' in self.h5.meta: + state = self.h5.meta.loc[self.h5_gid_set, 'state'].mode() + state = state.values[0] + + return state + + @property + def county(self): + """Get the SC point county based on the resource meta data.""" + county = None + if 'county' in self.h5.meta: + county = self.h5.meta.loc[self.h5_gid_set, 'county'].mode() + county = county.values[0] + + return county + + @property + def elevation(self): + """Get the SC point elevation based on the resource meta data.""" + elevation = None + if 'elevation' in self.h5.meta: + elevation = self.h5.meta.loc[self.h5_gid_set, 'elevation'].mean() + + return elevation + + @property + def timezone(self): + """Get the SC point timezone based on the resource meta data.""" + timezone = None + if 'timezone' in self.h5.meta and self.county is not None: + # make sure timezone flag and county are coincident + counties = self.h5.meta.loc[self.h5_gid_set, 'county'].values + iloc = np.where(counties == self.county)[0][0] + timezone = self.h5.meta.loc[self.h5_gid_set, 'timezone'].values + timezone = timezone[iloc] + + elif 'timezone' in self.h5.meta: + timezone = self.h5.meta.loc[self.h5_gid_set, 'timezone'].mode() + timezone = timezone.values[0] + + return timezone + + @property + def offshore(self): + """Get the SC point offshore flag based on the resource meta data + (if offshore column is present).""" + offshore = None + if 'offshore' in self.h5.meta and self.county is not None: + # make sure offshore flag and county are coincident + counties = self.h5.meta.loc[self.h5_gid_set, 'county'].values + iloc = np.where(counties == self.county)[0][0] + offshore = self.h5.meta.loc[self.h5_gid_set, 'offshore'].values + offshore = offshore[iloc] + + elif 'offshore' in self.h5.meta: + offshore = self.h5.meta.loc[self.h5_gid_set, 'offshore'].mode() + offshore = offshore.values[0] + + return offshore + + @property + def h5_gid_set(self): + """Get list of unique h5 gids corresponding to this sc point. + + Returns + ------- + h5_gids : list + List of h5 gids. + """ + if self._h5_gid_set is None: + self._h5_gid_set = self._ordered_unique(self._h5_gids) + if -1 in self._h5_gid_set: + self._h5_gid_set.remove(-1) + + return self._h5_gid_set + + @property + def gid_counts(self): + """Get the sum of the inclusion values in each resource/generation gid + corresponding to this sc point. The sum of the gid counts can be less + than the value provided by n_gids if fractional exclusion/inclusions + are provided. + + Returns + ------- + gid_counts : list + """ + gid_counts = [self.include_mask_flat[(self._h5_gids == gid)].sum() + for gid in self.h5_gid_set] + + return gid_counts + + @property + def summary(self): + """ + Supply curve point's meta data summary + + Returns + ------- + pandas.Series + List of supply curve point's meta data + """ + meta = {'sc_point_gid': self.sc_point_gid, + 'source_gids': self.h5_gid_set, + 'gid_counts': self.gid_counts, + 'n_gids': self.n_gids, + 'area_sq_km': self.area, + 'latitude': self.latitude, + 'longitude': self.longitude, + 'country': self.country, + 'state': self.state, + 'county': self.county, + 'elevation': self.elevation, + 'timezone': self.timezone, + } + meta = pd.Series(meta) + + return meta + +
[docs] @classmethod + def run(cls, gid, excl, agg_h5, tm_dset, *agg_dset, agg_method='mean', + excl_dict=None, inclusion_mask=None, + resolution=64, excl_area=None, + exclusion_shape=None, close=True, gen_index=None): + """ + Compute exclusions weight mean for the sc point from data + + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + excl : str | ExclusionMask + Filepath to exclusions h5 or ExclusionMask file handler. + agg_h5 : str | Resource + Filepath to .h5 file to aggregate or Resource handler + tm_dset : str + Dataset name in the exclusions file containing the + exclusions-to-resource mapping data. + agg_dset : str + Dataset to aggreate, can supply multiple datasets or no datasets. + The datasets should be scalar values for each site. This method + cannot aggregate timeseries data. + agg_method : str + Aggregation method, either mean or sum/aggregate + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + None if excl input is pre-initialized. + inclusion_mask : np.ndarray + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. The shape of this will be checked against the input + resolution. + resolution : int + Number of exclusion points per SC point along an axis. + This number**2 is the total number of exclusion points per + SC point. + excl_area : float | None, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + exclusion_shape : tuple + Shape of the full exclusions extent (rows, cols). Inputing this + will speed things up considerably. + close : bool + Flag to close object file handlers on exit. + gen_index : np.ndarray + Array of generation gids with array index equal to resource gid. + Array value is -1 if the resource index was not used in the + generation run. + + Returns + ------- + out : dict + Given datasets and meta data aggregated to supply curve points + """ + if isinstance(agg_dset, str): + agg_dset = (agg_dset, ) + + kwargs = {"excl_dict": excl_dict, + "inclusion_mask": inclusion_mask, + "resolution": resolution, + "excl_area": excl_area, + "exclusion_shape": exclusion_shape, + "close": close, + "gen_index": gen_index} + + with cls(gid, excl, agg_h5, tm_dset, **kwargs) as point: + if agg_method.lower().startswith('mean'): + agg_method = point.exclusion_weighted_mean + elif agg_method.lower().startswith(('sum', 'agg')): + agg_method = point.aggregate + elif 'wind_dir' in agg_method.lower(): + agg_method = point.mean_wind_dirs + else: + msg = ('Aggregation method must be either mean, ' + 'sum/aggregate, or wind_dir') + logger.error(msg) + raise ValueError(msg) + + out = {'meta': point.summary} + + for dset in agg_dset: + ds = point.h5.open_dataset(dset) + out[dset] = agg_method(ds) + + return out
+ + +
[docs]class GenerationSupplyCurvePoint(AggregationSupplyCurvePoint): + """Supply curve point summary framework that ties a reV SC point to its + respective generation and resource data.""" + + # technology-dependent power density estimates in MW/km2 + POWER_DENSITY = {'pv': 36, 'wind': 3} + + def __init__(self, gid, excl, gen, tm_dset, gen_index, + excl_dict=None, inclusion_mask=None, + res_class_dset=None, res_class_bin=None, excl_area=None, + power_density=None, cf_dset='cf_mean-means', + lcoe_dset='lcoe_fcr-means', h5_dsets=None, resolution=64, + exclusion_shape=None, close=False, friction_layer=None, + recalc_lcoe=True, apply_exclusions=True): + """ + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + excl : str | ExclusionMask + Filepath to exclusions h5 or ExclusionMask file handler. + gen : str | reV.handlers.Outputs + Filepath to .h5 reV generation output results or reV Outputs file + handler. + tm_dset : str + Dataset name in the techmap file containing the + exclusions-to-resource mapping data. + gen_index : np.ndarray + Array of generation gids with array index equal to resource gid. + Array value is -1 if the resource index was not used in the + generation run. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + None if excl input is pre-initialized. + inclusion_mask : np.ndarray + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. The shape of this will be checked against the input + resolution. + res_class_dset : str | np.ndarray | None + Dataset in the generation file dictating resource classes. + Can be pre-extracted resource data in np.ndarray. + None if no resource classes. + res_class_bin : list | None + Two-entry lists dictating the single resource class bin. + None if no resource classes. + excl_area : float | None, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + power_density : float | None | pd.DataFrame + Constant power density float, None, or opened dataframe with + (resource) "gid" and "power_density columns". + cf_dset : str | np.ndarray + Dataset name from gen containing capacity factor mean values. + Can be pre-extracted generation output data in np.ndarray. + lcoe_dset : str | np.ndarray + Dataset name from gen containing LCOE mean values. + Can be pre-extracted generation output data in np.ndarray. + h5_dsets : None | list | dict + Optional list of dataset names to summarize from the gen/econ h5 + files. Can also be pre-extracted data dictionary where keys are + the dataset names and values are the arrays of data from the + h5 files. + resolution : int | None + SC resolution, must be input in combination with gid. + exclusion_shape : tuple + Shape of the exclusions extent (rows, cols). Inputing this will + speed things up considerably. + close : bool + Flag to close object file handlers on exit. + friction_layer : None | FrictionMask + Friction layer with scalar friction values if valid friction inputs + were entered. Otherwise, None to not apply friction layer. + recalc_lcoe : bool + Flag to re-calculate the LCOE from the multi-year mean capacity + factor and annual energy production data. This requires several + datasets to be aggregated in the h5_dsets input: system_capacity, + fixed_charge_rate, capital_cost, fixed_operating_cost, + and variable_operating_cost. + apply_exclusions : bool + Flag to apply exclusions to the resource / generation gid's on + initialization. + """ + + self._res_class_dset = res_class_dset + self._res_class_bin = res_class_bin + self._cf_dset = cf_dset + self._lcoe_dset = lcoe_dset + self._h5_dsets = h5_dsets + self._mean_res = None + self._res_data = None + self._gen_data = None + self._lcoe_data = None + self._pd_obj = None + self._power_density = self._power_density_ac = power_density + self._friction_layer = friction_layer + self._recalc_lcoe = recalc_lcoe + + super().__init__(gid, excl, gen, tm_dset, + excl_dict=excl_dict, + inclusion_mask=inclusion_mask, + resolution=resolution, + excl_area=excl_area, + exclusion_shape=exclusion_shape, + close=close, apply_exclusions=False) + + self._res_gid_set = None + self._gen_gid_set = None + + self._gen_fpath, self._gen = self._h5_fpath, self._h5 + + self._gen_gids, self._res_gids = self._map_gen_gids(self._gids, + gen_index) + self._gids = self._gen_gids + if (self._gen_gids != -1).sum() == 0: + emsg = ('Supply curve point gid {} has no viable exclusion ' + 'points based on exclusions file: "{}"' + .format(self._gid, self._excl_fpath)) + raise EmptySupplyCurvePointError(emsg) + + if apply_exclusions: + self._apply_exclusions() + +
[docs] def exclusion_weighted_mean(self, flat_arr): + """Calc the exclusions-weighted mean value of a flat array of gen data. + + Parameters + ---------- + flat_arr : np.ndarray + Flattened array of resource/generation/econ data. Must be + index-able with the self._gen_gids array (must be a 1D array with + an entry for every site in the generation extent). + + Returns + ------- + mean : float + Mean of flat_arr masked by the binary exclusions then weighted by + the non-zero exclusions. + """ + x = flat_arr[self._gen_gids[self.bool_mask]].astype('float32') + incl = self.include_mask_flat[self.bool_mask] + x *= incl + mean = x.sum() / incl.sum() + + return mean
+ + @property + def gen(self): + """Get the generation output object. + + Returns + ------- + _gen : Resource + reV generation Resource object + """ + if self._gen is None: + self._gen = Resource(self._gen_fpath, str_decode=False) + + return self._gen + + @property + def res_gid_set(self): + """Get list of unique resource gids corresponding to this sc point. + + Returns + ------- + res_gids : list + List of resource gids. + """ + if self._res_gid_set is None: + self._res_gid_set = self._ordered_unique(self._res_gids) + if -1 in self._res_gid_set: + self._res_gid_set.remove(-1) + + return self._res_gid_set + + @property + def gen_gid_set(self): + """Get list of unique generation gids corresponding to this sc point. + + Returns + ------- + gen_gids : list + List of generation gids. + """ + if self._gen_gid_set is None: + self._gen_gid_set = self._ordered_unique(self._gen_gids) + if -1 in self._gen_gid_set: + self._gen_gid_set.remove(-1) + + return self._gen_gid_set + + @property + def h5_gid_set(self): + """Get list of unique h5 gids corresponding to this sc point. + Same as gen_gid_set + + Returns + ------- + h5_gids : list + List of h5 gids. + """ + return self.gen_gid_set + + @property + def gid_counts(self): + """Get the number of exclusion pixels in each resource/generation gid + corresponding to this sc point. + + Returns + ------- + gid_counts : list + List of exclusion pixels in each resource/generation gid. + """ + gid_counts = [self.include_mask_flat[(self._res_gids == gid)].sum() + for gid in self.res_gid_set] + + return gid_counts + + @property + def res_data(self): + """Get the resource data array. + + Returns + ------- + _res_data : np.ndarray + Multi-year-mean resource data array for all sites in the + generation data output file. + """ + + if isinstance(self._res_class_dset, np.ndarray): + return self._res_class_dset + + else: + if self._res_data is None: + if self._res_class_dset in self.gen.datasets: + self._res_data = self.gen[self._res_class_dset] + + return self._res_data + + @property + def gen_data(self): + """Get the generation capacity factor data array. + + Returns + ------- + _gen_data : np.ndarray + Multi-year-mean capacity factor data array for all sites in the + generation data output file. + """ + + if isinstance(self._cf_dset, np.ndarray): + return self._cf_dset + + else: + if self._gen_data is None: + if self._cf_dset in self.gen.datasets: + self._gen_data = self.gen[self._cf_dset] + + return self._gen_data + + @property + def lcoe_data(self): + """Get the LCOE data array. + + Returns + ------- + _lcoe_data : np.ndarray + Multi-year-mean LCOE data array for all sites in the + generation data output file. + """ + + if isinstance(self._lcoe_dset, np.ndarray): + return self._lcoe_dset + + else: + if self._lcoe_data is None: + if self._lcoe_dset in self.gen.datasets: + self._lcoe_data = self.gen[self._lcoe_dset] + + return self._lcoe_data + + @property + def mean_cf(self): + """Get the mean capacity factor for the non-excluded data. Capacity + factor is weighted by the exclusions (usually 0 or 1, but 0.5 + exclusions will weight appropriately). + + Returns + ------- + mean_cf : float | None + Mean capacity factor value for the non-excluded data. + """ + mean_cf = None + if self.gen_data is not None: + mean_cf = self.exclusion_weighted_mean(self.gen_data) + + return mean_cf + + @property + def mean_lcoe(self): + """Get the mean LCOE for the non-excluded data. + + Returns + ------- + mean_lcoe : float | None + Mean LCOE value for the non-excluded data. + """ + + mean_lcoe = None + + # prioritize the calculation of lcoe explicitly from the multi year + # mean CF (the lcoe re-calc will still happen if mean_cf is a single + # year CF, but the output should be identical to the original LCOE and + # so is not consequential). + if self._recalc_lcoe: + required = ('fixed_charge_rate', 'capital_cost', + 'fixed_operating_cost', 'variable_operating_cost', + 'system_capacity') + if self.mean_h5_dsets_data is not None: + if all(k in self.mean_h5_dsets_data for k in required): + aep = (self.mean_h5_dsets_data['system_capacity'] + * self.mean_cf * 8760) + mean_lcoe = lcoe_fcr( + self.mean_h5_dsets_data['fixed_charge_rate'], + self.mean_h5_dsets_data['capital_cost'], + self.mean_h5_dsets_data['fixed_operating_cost'], + aep, + self.mean_h5_dsets_data['variable_operating_cost']) + + # alternative if lcoe was not able to be re-calculated from + # multi year mean CF + if mean_lcoe is None and self.lcoe_data is not None: + mean_lcoe = self.exclusion_weighted_mean(self.lcoe_data) + + return mean_lcoe + + @property + def mean_res(self): + """Get the mean resource for the non-excluded data. + + Returns + ------- + mean_res : float | None + Mean resource for the non-excluded data. + """ + mean_res = None + if self._res_class_dset is not None: + mean_res = self.exclusion_weighted_mean(self.res_data) + + return mean_res + + @property + def mean_lcoe_friction(self): + """Get the mean LCOE for the non-excluded data, multiplied by the + mean_friction scalar value. + + Returns + ------- + mean_lcoe_friction : float | None + Mean LCOE value for the non-excluded data multiplied by the + mean friction scalar value. + """ + mean_lcoe_friction = None + if self.mean_lcoe is not None and self.mean_friction is not None: + mean_lcoe_friction = self.mean_lcoe * self.mean_friction + + return mean_lcoe_friction + + @property + def mean_friction(self): + """Get the mean friction scalar for the non-excluded data. + + Returns + ------- + friction : None | float + Mean value of the friction data layer for the non-excluded data. + If friction layer is not input to this class, None is returned. + """ + friction = None + if self._friction_layer is not None: + friction = self.friction_data.flatten()[self.bool_mask].mean() + + return friction + + @property + def friction_data(self): + """Get the friction data for the full SC point (no exclusions) + + Returns + ------- + friction_data : None | np.ndarray + 2D friction data layer corresponding to the exclusions grid in + the SC domain. If friction layer is not input to this class, + None is returned. + """ + friction_data = None + if self._friction_layer is not None: + friction_data = self._friction_layer[self.rows, self.cols] + + return friction_data + + @property + def power_density(self): + """Get the estimated power density either from input or infered from + generation output meta. + + Returns + ------- + _power_density : float + Estimated power density in MW/km2 + """ + + if self._power_density is None: + tech = self.gen.meta['reV_tech'][0] + if tech in self.POWER_DENSITY: + self._power_density = self.POWER_DENSITY[tech] + else: + warn('Could not recognize reV technology in generation meta ' + 'data: "{}". Cannot lookup an appropriate power density ' + 'to calculate SC point capacity.'.format(tech)) + + elif isinstance(self._power_density, pd.DataFrame): + self._pd_obj = self._power_density + + missing = set(self.res_gid_set) - set(self._pd_obj.index.values) + if any(missing): + msg = ('Variable power density input is missing the ' + 'following resource GIDs: {}'.format(missing)) + logger.error(msg) + raise FileInputError(msg) + + pds = self._pd_obj.loc[self._res_gids[self.bool_mask], + 'power_density'].values + pds = pds.astype(np.float32) + pds *= self.include_mask_flat[self.bool_mask] + denom = self.include_mask_flat[self.bool_mask].sum() + self._power_density = pds.sum() / denom + + return self._power_density + + @property + def power_density_ac(self): + """Get the estimated AC power density either from input or + inferred from generation output meta. + + This value is only available for solar runs with a "dc_ac_ratio" + dataset in the generation file. If these conditions are not met, + this value is `None`. + + Returns + ------- + _power_density_ac : float | None + Estimated AC power density in MW/km2 + """ + if "dc_ac_ratio" not in self.gen.datasets: + return None + + ilr = self.gen["dc_ac_ratio", self._gen_gids[self.bool_mask]] + ilr = ilr.astype('float32') + weights = self.include_mask_flat[self.bool_mask] + if self._power_density_ac is None: + tech = self.gen.meta['reV_tech'][0] + if tech in self.POWER_DENSITY: + power_density_ac = self.POWER_DENSITY[tech] / ilr + power_density_ac *= weights + power_density_ac = power_density_ac.sum() / weights.sum() + else: + warn('Could not recognize reV technology in generation meta ' + 'data: "{}". Cannot lookup an appropriate power density ' + 'to calculate SC point capacity.'.format(tech)) + power_density_ac = None + + elif isinstance(self._power_density_ac, pd.DataFrame): + self._pd_obj = self._power_density_ac + + missing = set(self.res_gid_set) - set(self._pd_obj.index.values) + if any(missing): + msg = ('Variable power density input is missing the ' + 'following resource GIDs: {}'.format(missing)) + logger.error(msg) + raise FileInputError(msg) + + pds = self._pd_obj.loc[self._res_gids[self.bool_mask], + 'power_density'].values + power_density_ac = pds.astype(np.float32) / ilr + power_density_ac *= weights + power_density_ac = power_density_ac.sum() / weights.sum() + else: + power_density_ac = self._power_density_ac * weights / ilr + power_density_ac = power_density_ac.sum() / weights.sum() + + return power_density_ac + + @property + def capacity(self): + """Get the estimated capacity in MW of the supply curve point in the + current resource class with the applied exclusions. + + Returns + ------- + capacity : float + Estimated capacity in MW of the supply curve point in the + current resource class with the applied exclusions. + """ + + capacity = None + if self.power_density is not None: + capacity = self.area * self.power_density + + return capacity + + @property + def capacity_ac(self): + """Get the AC estimated capacity in MW of the supply curve point in the + current resource class with the applied exclusions. + + This values is provided only for solar inputs that have + the "dc_ac_ratio" dataset in the generation file. If these + conditions are not met, this value is `None`. + + Returns + ------- + capacity : float | None + Estimated AC capacity in MW of the supply curve point in the + current resource class with the applied exclusions. Only not + `None` for solar runs with "dc_ac_ratio" dataset in the + generation file + """ + if self.power_density_ac is None: + return None + + return self.area * self.power_density_ac + + @property + def h5_dsets_data(self): + """Get any additional/supplemental h5 dataset data to summarize. + + Returns + ------- + h5_dsets_data : dict | None + + """ + + _h5_dsets_data = None + + if isinstance(self._h5_dsets, (list, tuple)): + _h5_dsets_data = {} + for dset in self._h5_dsets: + if dset in self.gen.datasets: + _h5_dsets_data[dset] = self.gen[dset] + + elif isinstance(self._h5_dsets, dict): + _h5_dsets_data = self._h5_dsets + + elif self._h5_dsets is not None: + e = ('Cannot recognize h5_dsets input type, should be None, ' + 'a list of dataset names, or a dictionary or ' + 'pre-extracted data. Received: {} {}' + .format(type(self._h5_dsets), self._h5_dsets)) + logger.error(e) + raise TypeError(e) + + return _h5_dsets_data + + @property + def mean_h5_dsets_data(self): + """Get the mean supplemental h5 datasets data (optional) + + Returns + ------- + mean_h5_dsets_data : dict | None + Mean dataset values for the non-excluded data for the optional + h5_dsets input. + """ + _mean_h5_dsets_data = None + if self.h5_dsets_data is not None: + _mean_h5_dsets_data = {} + for dset, arr in self.h5_dsets_data.items(): + _mean_h5_dsets_data[dset] = self.exclusion_weighted_mean(arr) + + return _mean_h5_dsets_data + + def _apply_exclusions(self): + """Apply exclusions by masking the generation and resource gid arrays. + This removes all res/gen entries that are masked by the exclusions or + resource bin.""" + + # exclusions mask is False where excluded + exclude = self.include_mask_flat == 0 + exclude = self._resource_exclusion(exclude) + + self._gen_gids[exclude] = -1 + self._res_gids[exclude] = -1 + + # ensure that excluded pixels (including resource exclusions!) + # has an exclusions multiplier of 0 + exclude = exclude.reshape(self.include_mask.shape) + self._incl_mask[exclude] = 0.0 + self._incl_mask = self._incl_mask.flatten() + + if (self._gen_gids != -1).sum() == 0: + msg = ('Supply curve point gid {} is completely excluded for res ' + 'bin: {}'.format(self._gid, self._res_class_bin)) + raise EmptySupplyCurvePointError(msg) + + def _resource_exclusion(self, boolean_exclude): + """Include the resource exclusion into a pre-existing bool exclusion. + + Parameters + ---------- + boolean_exclude : np.ndarray + Boolean exclusion array (True is exclude). + + Returns + ------- + boolean_exclude : np.ndarray + Same as input but includes additional exclusions for resource + outside of current resource class bin. + """ + + if (self._res_class_dset is not None + and self._res_class_bin is not None): + + rex = self.res_data[self._gen_gids] + rex = ((rex < np.min(self._res_class_bin)) + | (rex >= np.max(self._res_class_bin))) + + boolean_exclude = (boolean_exclude | rex) + + return boolean_exclude + +
[docs] def point_summary(self, args=None): + """ + Get a summary dictionary of a single supply curve point. + + Parameters + ---------- + args : tuple | list | None + List of summary arguments to include. None defaults to all + available args defined in the class attr. + + Returns + ------- + summary : dict + Dictionary of summary outputs for this sc point. + """ + + ARGS = {'res_gids': self.res_gid_set, + 'gen_gids': self.gen_gid_set, + 'gid_counts': self.gid_counts, + 'n_gids': self.n_gids, + 'mean_cf': self.mean_cf, + 'mean_lcoe': self.mean_lcoe, + 'mean_res': self.mean_res, + 'capacity': self.capacity, + 'area_sq_km': self.area, + 'latitude': self.latitude, + 'longitude': self.longitude, + 'country': self.country, + 'state': self.state, + 'county': self.county, + 'elevation': self.elevation, + 'timezone': self.timezone, + } + + if self.capacity_ac is not None: + ARGS['capacity_ac'] = self.capacity_ac + + if self.offshore is not None: + ARGS['offshore'] = self.offshore + + if self._friction_layer is not None: + ARGS['mean_friction'] = self.mean_friction + ARGS['mean_lcoe_friction'] = self.mean_lcoe_friction + + if self._h5_dsets is not None: + for dset, data in self.mean_h5_dsets_data.items(): + ARGS['mean_{}'.format(dset)] = data + + if args is None: + args = list(ARGS.keys()) + + summary = {} + for arg in args: + if arg in ARGS: + summary[arg] = ARGS[arg] + else: + warn('Cannot find "{}" as an available SC self summary ' + 'output', OutputWarning) + + return summary
+ +
[docs] @staticmethod + def economies_of_scale(cap_cost_scale, summary): + """Apply economies of scale to this point summary + + Parameters + ---------- + cap_cost_scale : str + LCOE scaling equation to implement "economies of scale". + Equation must be in python string format and return a scalar + value to multiply the capital cost by. Independent variables in + the equation should match the names of the columns in the reV + supply curve aggregation table. + summary : dict + Dictionary of summary outputs for this sc point. + + Returns + ------- + summary : dict + Dictionary of summary outputs for this sc point. + """ + + eos = EconomiesOfScale(cap_cost_scale, summary) + summary['raw_lcoe'] = eos.raw_lcoe + summary['mean_lcoe'] = eos.scaled_lcoe + summary['capital_cost_scalar'] = eos.capital_cost_scalar + + return summary
+ +
[docs] @classmethod + def summarize(cls, gid, excl_fpath, gen_fpath, tm_dset, gen_index, + excl_dict=None, inclusion_mask=None, + res_class_dset=None, res_class_bin=None, + excl_area=None, power_density=None, + cf_dset='cf_mean-means', lcoe_dset='lcoe_fcr-means', + h5_dsets=None, resolution=64, exclusion_shape=None, + close=False, friction_layer=None, args=None, + data_layers=None, cap_cost_scale=None, recalc_lcoe=True): + """Get a summary dictionary of a single supply curve point. + + Parameters + ---------- + gid : int + gid for supply curve point to analyze. + excl_fpath : str + Filepath to exclusions h5. + gen_fpath : str + Filepath to .h5 reV generation output results. + tm_dset : str + Dataset name in the techmap file containing the + exclusions-to-resource mapping data. + gen_index : np.ndarray + Array of generation gids with array index equal to resource gid. + Array value is -1 if the resource index was not used in the + generation run. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + None if excl input is pre-initialized. + inclusion_mask : np.ndarray + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. The shape of this will be checked against the input + resolution. + res_class_dset : str | np.ndarray | None + Dataset in the generation file dictating resource classes. + Can be pre-extracted resource data in np.ndarray. + None if no resource classes. + res_class_bin : list | None + Two-entry lists dictating the single resource class bin. + None if no resource classes. + excl_area : float | None, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + power_density : float | None | pd.DataFrame + Constant power density float, None, or opened dataframe with + (resource) "gid" and "power_density columns". + cf_dset : str | np.ndarray + Dataset name from gen containing capacity factor mean values. + Can be pre-extracted generation output data in np.ndarray. + lcoe_dset : str | np.ndarray + Dataset name from gen containing LCOE mean values. + Can be pre-extracted generation output data in np.ndarray. + h5_dsets : None | list | dict + Optional list of dataset names to summarize from the gen/econ h5 + files. Can also be pre-extracted data dictionary where keys are + the dataset names and values are the arrays of data from the + h5 files. + resolution : int | None + SC resolution, must be input in combination with gid. + exclusion_shape : tuple + Shape of the exclusions extent (rows, cols). Inputing this will + speed things up considerably. + close : bool + Flag to close object file handlers on exit. + friction_layer : None | FrictionMask + Friction layer with scalar friction values if valid friction inputs + were entered. Otherwise, None to not apply friction layer. + args : tuple | list, optional + List of summary arguments to include. None defaults to all + available args defined in the class attr, by default None + data_layers : dict, optional + Aggregation data layers. Must be a dictionary keyed by data label + name. Each value must be another dictionary with "dset", "method", + and "fpath", by default None + cap_cost_scale : str | None + Optional LCOE scaling equation to implement "economies of scale". + Equations must be in python string format and return a scalar + value to multiply the capital cost by. Independent variables in + the equation should match the names of the columns in the reV + supply curve aggregation table. + recalc_lcoe : bool + Flag to re-calculate the LCOE from the multi-year mean capacity + factor and annual energy production data. This requires several + datasets to be aggregated in the h5_dsets input: system_capacity, + fixed_charge_rate, capital_cost, fixed_operating_cost, + and variable_operating_cost. + + Returns + ------- + summary : dict + Dictionary of summary outputs for this sc point. + """ + kwargs = {"excl_dict": excl_dict, + "inclusion_mask": inclusion_mask, + "res_class_dset": res_class_dset, + "res_class_bin": res_class_bin, + "excl_area": excl_area, + "power_density": power_density, + "cf_dset": cf_dset, + "lcoe_dset": lcoe_dset, + "h5_dsets": h5_dsets, + "resolution": resolution, + "exclusion_shape": exclusion_shape, + "close": close, + 'friction_layer': friction_layer, + 'recalc_lcoe': recalc_lcoe, + } + + with cls(gid, excl_fpath, gen_fpath, tm_dset, gen_index, + **kwargs) as point: + summary = point.point_summary(args=args) + + if data_layers is not None: + summary = point.agg_data_layers(summary, data_layers) + + if cap_cost_scale is not None: + summary = point.economies_of_scale(cap_cost_scale, summary) + + return summary
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/supply_curve/sc_aggregation.html b/_modules/reV/supply_curve/sc_aggregation.html new file mode 100644 index 000000000..bbf7013c3 --- /dev/null +++ b/_modules/reV/supply_curve/sc_aggregation.html @@ -0,0 +1,1998 @@ + + + + + + reV.supply_curve.sc_aggregation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for reV.supply_curve.sc_aggregation

+# -*- coding: utf-8 -*-
+# pylint: disable=anomalous-backslash-in-string
+"""reV supply curve aggregation framework.
+
+Created on Fri Jun 21 13:24:31 2019
+
+@author: gbuster
+"""
+from concurrent.futures import as_completed
+import logging
+import numpy as np
+import psutil
+import os
+import pandas as pd
+from warnings import warn
+
+from reV.generation.base import BaseGen
+from reV.handlers.exclusions import ExclusionLayers
+from reV.supply_curve.aggregation import (AbstractAggFileHandler,
+                                          BaseAggregation, Aggregation)
+from reV.supply_curve.exclusions import FrictionMask
+from reV.supply_curve.extent import SupplyCurveExtent
+from reV.supply_curve.points import GenerationSupplyCurvePoint
+from reV.supply_curve.tech_mapping import TechMapping
+from reV.utilities.exceptions import (EmptySupplyCurvePointError,
+                                      OutputWarning, FileInputError,
+                                      InputWarning)
+from reV.utilities import log_versions
+
+from rex.resource import Resource
+from rex.multi_file_resource import MultiFileResource
+from rex.utilities.execution import SpawnProcessPool
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SupplyCurveAggFileHandler(AbstractAggFileHandler): + """ + Framework to handle aggregation summary context managers: + - exclusions .h5 file + - generation .h5 file + - econ .h5 file (optional) + - friction surface .h5 file (optional) + - variable power density .csv (optional) + """ + + def __init__(self, excl_fpath, gen_fpath, econ_fpath=None, + data_layers=None, power_density=None, excl_dict=None, + friction_fpath=None, friction_dset=None, + area_filter_kernel='queen', min_area=None): + """ + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + gen_fpath : str + Filepath to .h5 reV generation output results. + econ_fpath : str | None + Filepath to .h5 reV econ output results. This is optional and only + used if the lcoe_dset is not present in the gen_fpath file. + data_layers : None | dict + Aggregation data layers. Must be a dictionary keyed by data label + name. Each value must be another dictionary with "dset", "method", + and "fpath". + power_density : float | str | None + Power density in MW/km2 or filepath to variable power + density file. None will attempt to infer a constant + power density from the generation meta data technology. + Variable power density csvs must have "gid" and "power_density" + columns where gid is the resource gid (typically wtk or nsrdb gid) + and the power_density column is in MW/km2. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + friction_fpath : str | None + Filepath to friction surface data (cost based exclusions). + Must be paired with friction_dset. The friction data must be the + same shape as the exclusions. Friction input creates a new output + "mean_lcoe_friction" which is the nominal LCOE multiplied by the + friction data. + friction_dset : str | None + Dataset name in friction_fpath for the friction surface data. + Must be paired with friction_fpath. Must be same shape as + exclusions. + area_filter_kernel : str + Contiguous area filter method to use on final exclusions mask + min_area : float | None + Minimum required contiguous area filter in sq-km + """ + super().__init__(excl_fpath, excl_dict=excl_dict, + area_filter_kernel=area_filter_kernel, + min_area=min_area) + + self._gen = self._open_gen_econ_resource(gen_fpath, econ_fpath) + # pre-initialize the resource meta data + _ = self._gen.meta + + self._data_layers = data_layers + self._power_density = power_density + self._parse_power_density() + + self._friction_layer = None + if friction_fpath is not None and friction_dset is not None: + self._friction_layer = FrictionMask(friction_fpath, friction_dset) + + if not np.all(self._friction_layer.shape == self._excl.shape): + e = ('Friction layer shape {} must match exclusions shape {}!' + .format(self._friction_layer.shape, self._excl.shape)) + logger.error(e) + raise FileInputError(e) + + @staticmethod + def _open_gen_econ_resource(gen_fpath, econ_fpath): + """Open a rex resource file handler for the reV generation and + (optionally) the reV econ output(s). + + Parameters + ---------- + gen_fpath : str + Filepath to .h5 reV generation output results. + econ_fpath : str | None + Filepath to .h5 reV econ output results. This is optional and only + used if the lcoe_dset is not present in the gen_fpath file. + + Returns + ------- + handler : Resource | MultiFileResource + Open resource handler initialized with gen_fpath and (optionally) + econ_fpath. + """ + + handler = None + is_gen_h5 = isinstance(gen_fpath, str) and gen_fpath.endswith('.h5') + is_econ_h5 = isinstance(econ_fpath, str) and econ_fpath.endswith('.h5') + + if is_gen_h5 and not is_econ_h5: + handler = Resource(gen_fpath) + elif is_gen_h5 and is_econ_h5: + handler = MultiFileResource([gen_fpath, econ_fpath], + check_files=True) + + return handler + + def _parse_power_density(self): + """Parse the power density input. If file, open file handler.""" + + if isinstance(self._power_density, str): + self._pdf = self._power_density + + if self._pdf.endswith('.csv'): + self._power_density = pd.read_csv(self._pdf) + if ('gid' in self._power_density + and 'power_density' in self._power_density): + self._power_density = self._power_density.set_index('gid') + else: + msg = ('Variable power density file must include "gid" ' + 'and "power_density" columns, but received: {}' + .format(self._power_density.columns.values)) + logger.error(msg) + raise FileInputError(msg) + else: + msg = ('Variable power density file must be csv but received: ' + '{}'.format(self._pdf)) + logger.error(msg) + raise FileInputError(msg) + +
[docs] def close(self): + """Close all file handlers.""" + self._excl.close() + self._gen.close() + if self._friction_layer is not None: + self._friction_layer.close()
+ + @property + def gen(self): + """Get the gen file handler object. + + Returns + ------- + _gen : Outputs + reV gen outputs handler object. + """ + return self._gen + + @property + def data_layers(self): + """Get the data layers object. + + Returns + ------- + _data_layers : dict + Data layers namespace. + """ + return self._data_layers + + @property + def power_density(self): + """Get the power density object. + + Returns + ------- + _power_density : float | None | pd.DataFrame + Constant power density float, None, or opened dataframe with + (resource) "gid" and "power_density columns". + """ + return self._power_density + + @property + def friction_layer(self): + """Get the friction layer (cost based exclusions). + + Returns + ------- + friction_layer : None | FrictionMask + Friction layer with scalar friction values if valid friction inputs + were entered. Otherwise, None to not apply friction layer. + """ + return self._friction_layer
+ + +
[docs]class SupplyCurveAggregation(BaseAggregation): + """SupplyCurveAggregation""" + + def __init__(self, excl_fpath, tm_dset, econ_fpath=None, + excl_dict=None, area_filter_kernel='queen', min_area=None, + resolution=64, excl_area=None, gids=None, + pre_extract_inclusions=False, res_class_dset=None, + res_class_bins=None, cf_dset='cf_mean-means', + lcoe_dset='lcoe_fcr-means', h5_dsets=None, data_layers=None, + power_density=None, friction_fpath=None, friction_dset=None, + cap_cost_scale=None, recalc_lcoe=True): + """reV supply curve points aggregation framework. + + ``reV`` supply curve aggregation combines a high-resolution + (e.g. 90m) exclusion dataset with a (typically) lower resolution + (e.g. 2km) generation dataset by mapping all data onto the high- + resolution grid and aggregating it by a large factor (e.g. 64 or + 128). The result is coarsely-gridded data that summarizes + capacity and generation potential as well as associated + economics under a particular land access scenario. This module + can also summarize extra data layers during the aggregation + process, allowing for complementary land characterization + analysis. + + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions data HDF5 file. The exclusions HDF5 + file should contain the layers specified in `excl_dict` + and `data_layers`. These layers may also be spread out + across multiple HDF5 files, in which case this input should + be a list or tuple of filepaths pointing to the files + containing the layers. Note that each data layer must be + uniquely defined (i.e.only appear once and in a single + input file). + tm_dset : str + Dataset name in the `excl_fpath` file containing the + techmap (exclusions-to-resource mapping data). This data + layer links the supply curve GID's to the generation GID's + that are used to evaluate performance metrics such as + ``mean_cf``. + + .. Important:: This dataset uniquely couples the (typically + high-resolution) exclusion layers to the (typically + lower-resolution) resource data. Therefore, a separate + techmap must be used for every unique combination of + resource and exclusion coordinates. + + .. Note:: If executing ``reV`` from the command line, you + can specify a name that is not in the exclusions HDF5 + file, and ``reV`` will calculate the techmap for you. Note + however that computing the techmap and writing it to the + exclusion HDF5 file is a blocking operation, so you may + only run a single ``reV`` aggregation step at a time this + way. + + econ_fpath : str, optional + Filepath to HDF5 file with ``reV`` econ output results + containing an `lcoe_dset` dataset. If ``None``, `lcoe_dset` + should be a dataset in the `gen_fpath` HDF5 file that + aggregation is executed on. + + .. Note:: If executing ``reV`` from the command line, this + input can be set to ``"PIPELINE"`` to parse the output + from one of these preceding pipeline steps: + ``multi-year``, ``collect``, or ``generation``. However, + note that duplicate executions of any of these commands + within the pipeline may invalidate this parsing, meaning + the `econ_fpath` input will have to be specified manually. + + By default, ``None``. + excl_dict : dict | None + Dictionary of exclusion keyword arguments of the format + ``{layer_dset_name: {kwarg: value}}``, where + ``layer_dset_name`` is a dataset in the exclusion h5 file + and the ``kwarg: value`` pair is a keyword argument to + the :class:`reV.supply_curve.exclusions.LayerMask` class. + For example:: + + excl_dict = { + "typical_exclusion": { + "exclude_values": 255, + }, + "another_exclusion": { + "exclude_values": [2, 3], + "weight": 0.5 + }, + "exclusion_with_nodata": { + "exclude_range": [10, 100], + "exclude_nodata": True, + "nodata_value": -1 + }, + "partial_setback": { + "use_as_weights": True + }, + "height_limit": { + "exclude_range": [0, 200] + }, + "slope": { + "include_range": [0, 20] + }, + "developable_land": { + "force_include_values": 42 + }, + "more_developable_land": { + "force_include_range": [5, 10] + }, + ... + } + + Note that all the keys given in this dictionary should be + datasets of the `excl_fpath` file. If ``None`` or empty + dictionary, no exclusions are applied. By default, ``None``. + area_filter_kernel : {"queen", "rook"}, optional + Contiguous area filter method to use on final exclusions + mask. The filters are defined as:: + + # Queen: # Rook: + [[1,1,1], [[0,1,0], + [1,1,1], [1,1,1], + [1,1,1]] [0,1,0]] + + These filters define how neighboring pixels are "connected". + Once pixels in the final exclusion layer are connected, the + area of each resulting cluster is computed and compared + against the `min_area` input. Any cluster with an area + less than `min_area` is excluded from the final mask. + This argument has no effect if `min_area` is ``None``. + By default, ``"queen"``. + min_area : float, optional + Minimum area (in km\ :sup:`2`) required to keep an isolated + cluster of (included) land within the resulting exclusions + mask. Any clusters of land with areas less than this value + will be marked as exclusions. See the documentation for + `area_filter_kernel` for an explanation of how the area of + each land cluster is computed. If ``None``, no area + filtering is performed. By default, ``None``. + resolution : int, optional + Supply Curve resolution. This value defines how many pixels + are in a single side of a supply curve cell. For example, + a value of ``64`` would generate a supply curve where the + side of each supply curve cell is ``64x64`` exclusion + pixels. By default, ``64``. + excl_area : float, optional + Area of a single exclusion mask pixel (in km\ :sup:`2`). + If ``None``, this value will be inferred from the profile + transform attribute in `excl_fpath`. By default, ``None``. + gids : list, optional + List of supply curve point gids to get summary for. If you + would like to obtain all available ``reV`` supply curve + points to run, you can use the + :class:`reV.supply_curve.extent.SupplyCurveExtent` class + like so:: + + import pandas as pd + from reV.supply_curve.extent import SupplyCurveExtent + + excl_fpath = "..." + resolution = ... + with SupplyCurveExtent(excl_fpath, resolution) as sc: + gids = sc.valid_sc_points(tm_dset).tolist() + ... + + If ``None``, supply curve aggregation is computed for all + gids in the supply curve extent. By default, ``None``. + pre_extract_inclusions : bool, optional + Optional flag to pre-extract/compute the inclusion mask from + the `excl_dict` input. It is typically faster to compute + the inclusion mask on the fly with parallel workers. + By default, ``False``. + res_class_dset : str, optional + Name of dataset in the ``reV`` generation HDF5 output file + containing resource data. If ``None``, no aggregated + resource classification is performed (i.e. no ``mean_res`` + output), and the `res_class_bins` is ignored. + By default, ``None``. + res_class_bins : list, optional + Optional input to perform separate aggregations for various + resource data ranges. If ``None``, only a single aggregation + per supply curve point is performed. Otherwise, this input + should be a list of floats or ints representing the resource + bin boundaries. One aggregation per resource value range is + computed, and only pixels within the given resource range + are aggregated. By default, ``None``. + cf_dset : str, optional + Dataset name from the ``reV`` generation HDF5 output file + containing capacity factor mean values. + By default, ``"cf_mean-means"``. + lcoe_dset : str, optional + Dataset name from the ``reV`` generation HDF5 output file + containing LCOE mean values. + By default, ``"lcoe_fcr-means"``. + h5_dsets : list, optional + Optional list of additional datasets from the ``reV`` + generation/econ HDF5 output file to aggregate. If ``None``, + no extra datasets are aggregated. By default, ``None``. + data_layers : dict, optional + Dictionary of aggregation data layers of the format:: + + data_layers = { + "output_layer_name": { + "dset": "layer_name", + "method": "mean", + "fpath": "/path/to/data.h5" + }, + "another_output_layer_name": { + "dset": "input_layer_name", + "method": "mode", + # optional "fpath" key omitted + }, + ... + } + + The ``"output_layer_name"`` is the column name under which + the aggregated data will appear in the output CSV file. The + ``"output_layer_name"`` does not have to match the ``dset`` + input value. The latter should match the layer name in the + HDF5 from which the data to aggregate should be pulled. The + ``method`` should be one of + ``{"mode", "mean", "min", "max", "sum", "category"}``, + describing how the high-resolution data should be aggregated + for each supply curve point. ``fpath`` is an optional key + that can point to an HDF5 file containing the layer data. If + left out, the data is assumed to exist in the file(s) + specified by the `excl_fpath` input. If ``None``, no data + layer aggregation is performed. By default, ``None`` + power_density : float | str, optional + Power density value (in MW/km\ :sup:`2`) or filepath to + variable power density CSV file containing the following + columns: + + - ``gid`` : resource gid (typically wtk or nsrdb gid) + - ``power_density`` : power density value (in + MW/km\ :sup:`2`) + + If ``None``, a constant power density is inferred from the + generation meta data technology. By default, ``None``. + friction_fpath : str, optional + Filepath to friction surface data (cost based exclusions). + Must be paired with the `friction_dset` input below. The + friction data must be the same shape as the exclusions. + Friction input creates a new output column + ``"mean_lcoe_friction"`` which is the nominal LCOE + multiplied by the friction data. If ``None``, no friction + data is aggregated. By default, ``None``. + friction_dset : str, optional + Dataset name in friction_fpath for the friction surface + data. Must be paired with the `friction_fpath` above. If + ``None``, no friction data is aggregated. + By default, ``None``. + cap_cost_scale : str, optional + Optional LCOE scaling equation to implement "economies of + scale". Equations must be in python string format and must + return a scalar value to multiply the capital cost by. + Independent variables in the equation should match the names + of the columns in the ``reV`` supply curve aggregation + output table (see the documentation of + :class:`~reV.supply_curve.sc_aggregation.SupplyCurveAggregation` + for details on available outputs). If ``None``, no economies + of scale are applied. By default, ``None``. + recalc_lcoe : bool, optional + Flag to re-calculate the LCOE from the multi-year mean + capacity factor and annual energy production data. This + requires several datasets to be aggregated in the h5_dsets + input: + + - ``system_capacity`` + - ``fixed_charge_rate`` + - ``capital_cost`` + - ``fixed_operating_cost`` + - ``variable_operating_cost`` + + By default, ``True``. + + Examples + -------- + Standard outputs: + + sc_gid : int + Unique supply curve gid. This is the enumerated supply curve + points, which can have overlapping geographic locations due + to different resource bins at the same geographic SC point. + res_gids : list + Stringified list of resource gids (e.g. original WTK or + NSRDB resource GIDs) corresponding to each SC point. + gen_gids : list + Stringified list of generation gids (e.g. GID in the reV + generation output, which corresponds to the reV project + points and not necessarily the resource GIDs). + gid_counts : list + Stringified list of the sum of inclusion scalar values + corresponding to each `gen_gid` and `res_gid`, where 1 is + included, 0 is excluded, and 0.7 is included with 70 percent + of available land. Each entry in this list is associated + with the corresponding entry in the `gen_gids` and + `res_gids` lists. + n_gids : int + Total number of included pixels. This is a boolean sum and + considers partial inclusions to be included (e.g. 1). + mean_cf : float + Mean capacity factor of each supply curve point (the + arithmetic mean is weighted by the inclusion layer) + (unitless). + mean_lcoe : float + Mean LCOE of each supply curve point (the arithmetic mean is + weighted by the inclusion layer). Units match the reV econ + output ($/MWh). By default, the LCOE is re-calculated using + the multi-year mean capacity factor and annual energy + production. This requires several datasets to be aggregated + in the h5_dsets input: ``fixed_charge_rate``, + ``capital_cost``, + ``fixed_operating_cost``, ``annual_energy_production``, and + ``variable_operating_cost``. This recalc behavior can be + disabled by setting ``recalc_lcoe=False``. + mean_res : float + Mean resource, the resource dataset to average is provided + by the user in `res_class_dset`. The arithmetic mean is + weighted by the inclusion layer. + capacity : float + Total capacity of each supply curve point (MW). Units are + contingent on the `power_density` input units of MW/km2. + area_sq_km : float + Total included area for each supply curve point in km2. This + is based on the nominal area of each exclusion pixel which + by default is calculated from the exclusion profile + attributes. The NREL reV default is 0.0081 km2 pixels + (90m x 90m). The area sum considers partial inclusions. + latitude : float + Supply curve point centroid latitude coordinate, in degrees + (does not consider exclusions). + longitude : float + Supply curve point centroid longitude coordinate, in degrees + (does not consider exclusions). + country : str + Country of the supply curve point based on the most common + country of the associated resource meta data. Does not + consider exclusions. + state : str + State of the supply curve point based on the most common + state of the associated resource meta data. Does not + consider exclusions. + county : str + County of the supply curve point based on the most common + county of the associated resource meta data. Does not + consider exclusions. + elevation : float + Mean elevation of the supply curve point based on the mean + elevation of the associated resource meta data. Does not + consider exclusions. + timezone : int + UTC offset of local timezone based on the most common + timezone of the associated resource meta data. Does not + consider exclusions. + sc_point_gid : int + Spatially deterministic supply curve point gid. Duplicate + `sc_point_gid` values can exist due to resource binning. + sc_row_ind : int + Row index of the supply curve point in the aggregated + exclusion grid. + sc_col_ind : int + Column index of the supply curve point in the aggregated + exclusion grid + res_class : int + Resource class for the supply curve gid. Each geographic + supply curve point (`sc_point_gid`) can have multiple + resource classes associated with it, resulting in multiple + supply curve gids (`sc_gid`) associated with the same + spatially deterministic supply curve point. + + + Optional outputs: + + mean_friction : float + Mean of the friction data provided in 'friction_fpath' and + 'friction_dset'. The arithmetic mean is weighted by boolean + inclusions and considers partial inclusions to be included. + mean_lcoe_friction : float + Mean of the nominal LCOE multiplied by mean_friction value. + mean_{dset} : float + Mean input h5 dataset(s) provided by the user in 'h5_dsets'. + These mean calculations are weighted by the partial + inclusion layer. + data_layers : float | int | str | dict + Requested data layer aggregations, each data layer must be + the same shape as the exclusion layers. + + - mode: int | str + Most common value of a given data layer after + applying the boolean inclusion mask. + - mean : float + Arithmetic mean value of a given data layer weighted + by the scalar inclusion mask (considers partial + inclusions). + - min : float | int + Minimum value of a given data layer after applying + the boolean inclusion mask. + - max : float | int + Maximum value of a given data layer after applying + the boolean inclusion mask. + - sum : float + Sum of a given data layer weighted by the scalar + inclusion mask (considers partial inclusions). + - category : dict + Dictionary mapping the unique values in the + `data_layer` to the sum of inclusion scalar values + associated with all pixels with that unique value. + """ + log_versions(logger) + logger.info('Initializing SupplyCurveAggregation...') + logger.debug('Exclusion filepath: {}'.format(excl_fpath)) + logger.debug('Exclusion dict: {}'.format(excl_dict)) + + super().__init__(excl_fpath, tm_dset, excl_dict=excl_dict, + area_filter_kernel=area_filter_kernel, + min_area=min_area, resolution=resolution, + excl_area=excl_area, gids=gids, + pre_extract_inclusions=pre_extract_inclusions) + + self._econ_fpath = econ_fpath + self._res_class_dset = res_class_dset + self._res_class_bins = self._convert_bins(res_class_bins) + self._cf_dset = cf_dset + self._lcoe_dset = lcoe_dset + self._h5_dsets = h5_dsets + self._cap_cost_scale = cap_cost_scale + self._power_density = power_density + self._friction_fpath = friction_fpath + self._friction_dset = friction_dset + self._data_layers = data_layers + self._recalc_lcoe = recalc_lcoe + + logger.debug('Resource class bins: {}'.format(self._res_class_bins)) + + if self._cap_cost_scale is not None: + if self._h5_dsets is None: + self._h5_dsets = [] + + self._h5_dsets += list(BaseGen.LCOE_ARGS) + self._h5_dsets = list(set(self._h5_dsets)) + + if self._power_density is None: + msg = ('Supply curve aggregation power density not specified. ' + 'Will try to infer based on lookup table: {}' + .format(GenerationSupplyCurvePoint.POWER_DENSITY)) + logger.warning(msg) + warn(msg, InputWarning) + + self._check_data_layers() + + def _check_data_layers(self, methods=('mean', 'max', 'min', + 'mode', 'sum', 'category')): + """Run pre-flight checks on requested aggregation data layers. + + Parameters + ---------- + methods : list | tuple + Data layer aggregation methods that are available to the user. + """ + + if self._data_layers is not None: + logger.debug('Checking data layers...') + + with ExclusionLayers(self._excl_fpath) as f: + shape_base = f.shape + + for k, v in self._data_layers.items(): + if 'dset' not in v: + raise KeyError('Data aggregation "dset" data layer "{}" ' + 'must be specified.'.format(k)) + if 'method' not in v: + raise KeyError('Data aggregation "method" data layer "{}" ' + 'must be specified.'.format(k)) + elif v['method'].lower() not in methods: + raise ValueError('Cannot recognize data layer agg method: ' + '"{}". Can only do: {}.' + .format(v['method'], methods)) + if 'fpath' in v: + with ExclusionLayers(v['fpath']) as f: + try: + mismatched_shapes = any(f.shape != shape_base) + except TypeError: + mismatched_shapes = f.shape != shape_base + if mismatched_shapes: + msg = ('Data shape of data layer "{}" is {}, ' + 'which does not match the baseline ' + 'exclusions shape {}.' + .format(k, f.shape, shape_base)) + raise FileInputError(msg) + + logger.debug('Finished checking data layers.') + + @staticmethod + def _get_res_gen_lcoe_data(gen, res_class_dset, res_class_bins, + cf_dset, lcoe_dset): + """Extract the basic resource / generation / lcoe data to be used in + the aggregation process. + + Parameters + ---------- + gen : Resource | MultiFileResource + Open rex resource handler initialized from gen_fpath and + (optionally) econ_fpath. + res_class_dset : str | None + Dataset in the generation file dictating resource classes. + None if no resource classes. + res_class_bins : list | None + List of two-entry lists dictating the resource class bins. + None if no resource classes. + cf_dset : str + Dataset name from f_gen containing capacity factor mean values. + lcoe_dset : str + Dataset name from f_gen containing LCOE mean values. + + Returns + ------- + res_data : np.ndarray | None + Extracted resource data from res_class_dset + res_class_bins : list + List of resouce class bin ranges. + cf_data : np.ndarray | None + Capacity factor data extracted from cf_dset in gen + lcoe_data : np.ndarray | None + LCOE data extracted from lcoe_dset in gen + """ + + dset_list = (res_class_dset, cf_dset, lcoe_dset) + gen_dsets = [] if gen is None else gen.datasets + labels = ('res_class_dset', 'cf_dset', 'lcoe_dset') + temp = [None, None, None] + + if isinstance(gen, Resource): + source_fps = [gen.h5_file] + elif isinstance(gen, MultiFileResource): + source_fps = gen._h5_files + else: + msg = ('Did not recognize gen object input of type "{}": {}' + .format(type(gen), gen)) + logger.error(msg) + raise TypeError(msg) + + for i, dset in enumerate(dset_list): + if dset in gen_dsets: + temp[i] = gen[dset] + elif dset not in gen_dsets and dset is not None: + w = ('Could not find "{}" input as "{}" in source files: {}. ' + 'Available datasets: {}' + .format(labels[i], dset, source_fps, gen_dsets)) + logger.warning(w) + warn(w, OutputWarning) + + res_data, cf_data, lcoe_data = temp + + if res_class_dset is None or res_class_bins is None: + res_class_bins = [None] + + return res_data, res_class_bins, cf_data, lcoe_data + + @staticmethod + def _get_extra_dsets(gen, h5_dsets): + """Extract extra ancillary datasets to be used in the aggregation + process + + Parameters + ---------- + gen : Resource | MultiFileResource + Open rex resource handler initialized from gen_fpath and + (optionally) econ_fpath. + h5_dsets : list | None + Optional list of additional datasets from the source h5 gen/econ + files to aggregate. + + Returns + ------- + h5_dsets_data : dict | None + If additional h5_dsets are requested, this will be a dictionary + keyed by the h5 dataset names. The corresponding values will be + the extracted arrays from the h5 files. + """ + + # look for the datasets required by the LCOE re-calculation and make + # lists of the missing datasets + gen_dsets = [] if gen is None else gen.datasets + lcoe_recalc_req = ('fixed_charge_rate', 'capital_cost', + 'fixed_operating_cost', 'variable_operating_cost', + 'system_capacity') + missing_lcoe_source = [k for k in lcoe_recalc_req + if k not in gen_dsets] + missing_lcoe_request = [] + + if isinstance(gen, Resource): + source_fps = [gen.h5_file] + elif isinstance(gen, MultiFileResource): + source_fps = gen._h5_files + else: + msg = ('Did not recognize gen object input of type "{}": {}' + .format(type(gen), gen)) + logger.error(msg) + raise TypeError(msg) + + h5_dsets_data = None + if h5_dsets is not None: + missing_lcoe_request = [k for k in lcoe_recalc_req + if k not in h5_dsets] + + if not isinstance(h5_dsets, (list, tuple)): + e = ('Additional h5_dsets argument must be a list or tuple ' + 'but received: {} {}'.format(type(h5_dsets), h5_dsets)) + logger.error(e) + raise TypeError(e) + + missing_h5_dsets = [k for k in h5_dsets if k not in gen_dsets] + if any(missing_h5_dsets): + msg = ('Could not find requested h5_dsets "{}" in ' + 'source files: {}. Available datasets: {}' + .format(missing_h5_dsets, source_fps, gen_dsets)) + logger.error(msg) + raise FileInputError(msg) + + h5_dsets_data = {dset: gen[dset] for dset in h5_dsets} + + if any(missing_lcoe_source): + msg = ('Could not find the datasets in the gen source file that ' + 'are required to re-calculate the multi-year LCOE. If you ' + 'are running a multi-year job, it is strongly suggested ' + 'you pass through these datasets to re-calculate the LCOE ' + 'from the multi-year mean CF: {}' + .format(missing_lcoe_source)) + logger.warning(msg) + warn(msg, InputWarning) + + if any(missing_lcoe_request): + msg = ('It is strongly advised that you include the following ' + 'datasets in the h5_dsets request in order to re-calculate ' + 'the LCOE from the multi-year mean CF and AEP: {}' + .format(missing_lcoe_request)) + logger.warning(msg) + warn(msg, InputWarning) + + return h5_dsets_data + +
[docs] @classmethod + def run_serial(cls, excl_fpath, gen_fpath, tm_dset, gen_index, + econ_fpath=None, excl_dict=None, inclusion_mask=None, + area_filter_kernel='queen', min_area=None, + resolution=64, gids=None, args=None, res_class_dset=None, + res_class_bins=None, cf_dset='cf_mean-means', + lcoe_dset='lcoe_fcr-means', h5_dsets=None, data_layers=None, + power_density=None, friction_fpath=None, friction_dset=None, + excl_area=None, cap_cost_scale=None, recalc_lcoe=True): + """Standalone method to create agg summary - can be parallelized. + + Parameters + ---------- + excl_fpath : str | list | tuple + Filepath to exclusions h5 with techmap dataset + (can be one or more filepaths). + gen_fpath : str + Filepath to .h5 reV generation output results. + tm_dset : str + Dataset name in the exclusions file containing the + exclusions-to-resource mapping data. + gen_index : np.ndarray + Array of generation gids with array index equal to resource gid. + Array value is -1 if the resource index was not used in the + generation run. + econ_fpath : str | None + Filepath to .h5 reV econ output results. This is optional and only + used if the lcoe_dset is not present in the gen_fpath file. + excl_dict : dict | None + Dictionary of exclusion keyword arugments of the format + {layer_dset_name: {kwarg: value}} where layer_dset_name is a + dataset in the exclusion h5 file and kwarg is a keyword argument to + the reV.supply_curve.exclusions.LayerMask class. + inclusion_mask : np.ndarray | dict | optional + 2D array pre-extracted inclusion mask where 1 is included and 0 is + excluded. This must be either match the full exclusion shape or + be a dict lookup of single-sc-point exclusion masks corresponding + to the gids input and keyed by gids, by default None which will + calculate exclusions on the fly for each sc point. + area_filter_kernel : str + Contiguous area filter method to use on final exclusions mask + min_area : float | None + Minimum required contiguous area filter in sq-km + resolution : int | None + SC resolution, must be input in combination with gid. Prefered + option is to use the row/col slices to define the SC point instead. + gids : list | None + List of supply curve point gids to get summary for (can use to + subset if running in parallel), or None for all gids in the SC + extent, by default None + args : list | None + List of positional args for sc_point_method + res_class_dset : str | None + Dataset in the generation file dictating resource classes. + None if no resource classes. + res_class_bins : list | None + List of two-entry lists dictating the resource class bins. + None if no resource classes. + cf_dset : str + Dataset name from f_gen containing capacity factor mean values. + lcoe_dset : str + Dataset name from f_gen containing LCOE mean values. + h5_dsets : list | None + Optional list of additional datasets from the source h5 gen/econ + files to aggregate. + data_layers : None | dict + Aggregation data layers. Must be a dictionary keyed by data label + name. Each value must be another dictionary with "dset", "method", + and "fpath". + power_density : float | str | None + Power density in MW/km2 or filepath to variable power + density file. None will attempt to infer a constant + power density from the generation meta data technology. + Variable power density csvs must have "gid" and "power_density" + columns where gid is the resource gid (typically wtk or nsrdb gid) + and the power_density column is in MW/km2. + friction_fpath : str | None + Filepath to friction surface data (cost based exclusions). + Must be paired with friction_dset. The friction data must be the + same shape as the exclusions. Friction input creates a new output + "mean_lcoe_friction" which is the nominal LCOE multiplied by the + friction data. + friction_dset : str | None + Dataset name in friction_fpath for the friction surface data. + Must be paired with friction_fpath. Must be same shape as + exclusions. + excl_area : float | None, optional + Area of an exclusion pixel in km2. None will try to infer the area + from the profile transform attribute in excl_fpath, by default None + cap_cost_scale : str | None + Optional LCOE scaling equation to implement "economies of scale". + Equations must be in python string format and return a scalar + value to multiply the capital cost by. Independent variables in + the equation should match the names of the columns in the reV + supply curve aggregation table. + recalc_lcoe : bool + Flag to re-calculate the LCOE from the multi-year mean capacity + factor and annual energy production data. This requires several + datasets to be aggregated in the h5_dsets input: system_capacity, + fixed_charge_rate, capital_cost, fixed_operating_cost, + and variable_operating_cost. + + Returns + ------- + summary : list + List of dictionaries, each being an SC point summary. + """ + summary = [] + + with SupplyCurveExtent(excl_fpath, resolution=resolution) as sc: + points = sc.points + exclusion_shape = sc.exclusions.shape + if gids is None: + gids = sc.valid_sc_points(tm_dset) + elif np.issubdtype(type(gids), np.number): + gids = [gids] + + slice_lookup = sc.get_slice_lookup(gids) + + logger.debug('Starting SupplyCurveAggregation serial with ' + 'supply curve {} gids'.format(len(gids))) + + cls._check_inclusion_mask(inclusion_mask, gids, exclusion_shape) + + # pre-extract handlers so they are not repeatedly initialized + file_kwargs = {'econ_fpath': econ_fpath, + 'data_layers': data_layers, + 'power_density': power_density, + 'excl_dict': excl_dict, + 'area_filter_kernel': area_filter_kernel, + 'min_area': min_area, + 'friction_fpath': friction_fpath, + 'friction_dset': friction_dset} + with SupplyCurveAggFileHandler(excl_fpath, gen_fpath, + **file_kwargs) as fh: + + temp = cls._get_res_gen_lcoe_data(fh.gen, res_class_dset, + res_class_bins, cf_dset, + lcoe_dset) + res_data, res_class_bins, cf_data, lcoe_data = temp + h5_dsets_data = cls._get_extra_dsets(fh.gen, h5_dsets) + + n_finished = 0 + for gid in gids: + gid_inclusions = cls._get_gid_inclusion_mask( + inclusion_mask, gid, slice_lookup, + resolution=resolution) + + for ri, res_bin in enumerate(res_class_bins): + try: + pointsum = GenerationSupplyCurvePoint.summarize( + gid, + fh.exclusions, + fh.gen, + tm_dset, + gen_index, + res_class_dset=res_data, + res_class_bin=res_bin, + cf_dset=cf_data, + lcoe_dset=lcoe_data, + h5_dsets=h5_dsets_data, + data_layers=fh.data_layers, + resolution=resolution, + exclusion_shape=exclusion_shape, + power_density=fh.power_density, + args=args, + excl_dict=excl_dict, + inclusion_mask=gid_inclusions, + excl_area=excl_area, + close=False, + friction_layer=fh.friction_layer, + cap_cost_scale=cap_cost_scale, + recalc_lcoe=recalc_lcoe) + + except EmptySupplyCurvePointError: + logger.debug('SC point {} is empty'.format(gid)) + else: + pointsum['sc_point_gid'] = gid + pointsum['sc_row_ind'] = points.loc[gid, 'row_ind'] + pointsum['sc_col_ind'] = points.loc[gid, 'col_ind'] + pointsum['res_class'] = ri + + summary.append(pointsum) + logger.debug('Serial aggregation completed gid {}: ' + '{} out of {} points complete' + .format(gid, n_finished, len(gids))) + + n_finished += 1 + + return summary
+ +
[docs] def run_parallel(self, gen_fpath, args=None, max_workers=None, + sites_per_worker=100): + """Get the supply curve points aggregation summary using futures. + + Parameters + ---------- + gen_fpath : str + Filepath to .h5 reV generation output results. + args : tuple | list | None + List of summary arguments to include. None defaults to all + available args defined in the class attr. + max_workers : int | None, optional + Number of cores to run summary on. None is all + available cpus, by default None + sites_per_worker : int + Number of sc_points to summarize on each worker, by default 100 + + Returns + ------- + summary : list + List of dictionaries, each being an SC point summary. + """ + + gen_index = self._parse_gen_index(gen_fpath) + chunks = int(np.ceil(len(self.gids) / sites_per_worker)) + chunks = np.array_split(self.gids, chunks) + + logger.info('Running supply curve point aggregation for ' + 'points {} through {} at a resolution of {} ' + 'on {} cores in {} chunks.' + .format(self.gids[0], self.gids[-1], self._resolution, + max_workers, len(chunks))) + + slice_lookup = None + if self._inclusion_mask is not None: + with SupplyCurveExtent(self._excl_fpath, + resolution=self._resolution) as sc: + assert sc.exclusions.shape == self._inclusion_mask.shape + slice_lookup = sc.get_slice_lookup(self.gids) + + futures = [] + summary = [] + n_finished = 0 + loggers = [__name__, 'reV.supply_curve.point_summary', 'reV'] + with SpawnProcessPool(max_workers=max_workers, loggers=loggers) as exe: + + # iterate through split executions, submitting each to worker + for gid_set in chunks: + # submit executions and append to futures list + chunk_incl_masks = None + if self._inclusion_mask is not None: + chunk_incl_masks = {} + for gid in gid_set: + rs, cs = slice_lookup[gid] + chunk_incl_masks[gid] = self._inclusion_mask[rs, cs] + + futures.append(exe.submit( + self.run_serial, + self._excl_fpath, gen_fpath, + self._tm_dset, gen_index, + econ_fpath=self._econ_fpath, + excl_dict=self._excl_dict, + inclusion_mask=chunk_incl_masks, + res_class_dset=self._res_class_dset, + res_class_bins=self._res_class_bins, + cf_dset=self._cf_dset, + lcoe_dset=self._lcoe_dset, + h5_dsets=self._h5_dsets, + data_layers=self._data_layers, + resolution=self._resolution, + power_density=self._power_density, + friction_fpath=self._friction_fpath, + friction_dset=self._friction_dset, + area_filter_kernel=self._area_filter_kernel, + min_area=self._min_area, + gids=gid_set, + args=args, + excl_area=self._excl_area, + cap_cost_scale=self._cap_cost_scale, + recalc_lcoe=self._recalc_lcoe)) + + # gather results + for future in as_completed(futures): + n_finished += 1 + summary += future.result() + if n_finished % 10 == 0: + mem = psutil.virtual_memory() + logger.info('Parallel aggregation futures collected: ' + '{} out of {}. Memory usage is {:.3f} GB out ' + 'of {:.3f} GB ({:.2f}% utilized).' + .format(n_finished, len(chunks), + mem.used / 1e9, mem.total / 1e9, + 100 * mem.used / mem.total)) + + return summary
+ + @staticmethod + def _convert_bins(bins): + """Convert a list of floats or ints to a list of two-entry bin bounds. + + Parameters + ---------- + bins : list | None + List of floats or ints (bin edges) to convert to list of two-entry + bin boundaries or list of two-entry bind boundaries in final format + + Returns + ------- + bins : list + List of two-entry bin boundaries + """ + + if bins is None: + return None + + type_check = [isinstance(x, (list, tuple)) for x in bins] + + if all(type_check): + return bins + + elif any(type_check): + raise TypeError('Resource class bins has inconsistent ' + 'entry type: {}'.format(bins)) + + else: + bbins = [] + for i, b in enumerate(sorted(bins)): + if i < len(bins) - 1: + bbins.append([b, bins[i + 1]]) + + return bbins + + @staticmethod + def _summary_to_df(summary): + """Convert the agg summary list to a DataFrame. + + Parameters + ---------- + summary : list + List of dictionaries, each being an SC point summary. + + Returns + ------- + summary : DataFrame + Summary of the SC points. + """ + summary = pd.DataFrame(summary) + sort_by = [x for x in ('sc_point_gid', 'res_class') if x in summary] + summary = summary.sort_values(sort_by) + summary = summary.reset_index(drop=True) + summary.index.name = 'sc_gid' + + return summary + + def _validate_tech_mapping(self, res_fpath): + """Check that tech mapping exists and create it if it doesn't""" + + with ExclusionLayers(self._excl_fpath) as f: + dsets = f.h5.dsets + + excl_fp_is_str = isinstance(self._excl_fpath, str) + tm_in_excl = self._tm_dset in dsets + if tm_in_excl: + logger.info('Found techmap "{}".'.format(self._tm_dset)) + elif not tm_in_excl and not excl_fp_is_str: + msg = ('Could not find techmap dataset "{}" and cannot run ' + 'techmap with arbitrary multiple exclusion filepaths ' + 'to write to: {}'.format(self._tm_dset, self._excl_fpath)) + logger.error(msg) + raise RuntimeError(msg) + else: + logger.info('Could not find techmap "{}". Running techmap module.' + .format(self._tm_dset)) + try: + TechMapping.run(self._excl_fpath, res_fpath, + dset=self._tm_dset) + except Exception as e: + msg = ('TechMapping process failed. Received the ' + 'following error:\n{}'.format(e)) + logger.exception(msg) + raise RuntimeError(msg) from e + +
[docs] def summarize(self, gen_fpath, args=None, max_workers=None, + sites_per_worker=100): + """ + Get the supply curve points aggregation summary + + Parameters + ---------- + gen_fpath : str + Filepath to .h5 reV generation output results. + args : tuple | list | None + List of summary arguments to include. None defaults to all + available args defined in the class attr. + max_workers : int | None, optional + Number of cores to run summary on. None is all + available cpus, by default None + sites_per_worker : int + Number of sc_points to summarize on each worker, by default 100 + + Returns + ------- + summary : list + List of dictionaries, each being an SC point summary. + """ + if max_workers is None: + max_workers = os.cpu_count() + + if max_workers == 1: + gen_index = self._parse_gen_index(gen_fpath) + afk = self._area_filter_kernel + summary = self.run_serial(self._excl_fpath, gen_fpath, + self._tm_dset, gen_index, + econ_fpath=self._econ_fpath, + excl_dict=self._excl_dict, + inclusion_mask=self._inclusion_mask, + res_class_dset=self._res_class_dset, + res_class_bins=self._res_class_bins, + cf_dset=self._cf_dset, + lcoe_dset=self._lcoe_dset, + h5_dsets=self._h5_dsets, + data_layers=self._data_layers, + resolution=self._resolution, + power_density=self._power_density, + friction_fpath=self._friction_fpath, + friction_dset=self._friction_dset, + area_filter_kernel=afk, + min_area=self._min_area, + gids=self.gids, args=args, + excl_area=self._excl_area, + cap_cost_scale=self._cap_cost_scale, + recalc_lcoe=self._recalc_lcoe) + else: + summary = self.run_parallel(gen_fpath=gen_fpath, args=args, + max_workers=max_workers, + sites_per_worker=sites_per_worker) + + if not any(summary): + e = ('Supply curve aggregation found no non-excluded SC points. ' + 'Please check your exclusions or subset SC GID selection.') + logger.error(e) + raise EmptySupplyCurvePointError(e) + + summary = self._summary_to_df(summary) + + return summary
+ +
[docs] def run(self, out_fpath, gen_fpath=None, res_fpath=None, args=None, + max_workers=None, sites_per_worker=100): + """Run a supply curve aggregation. + + Parameters + ---------- + gen_fpath : str, optional + Filepath to HDF5 file with ``reV`` generation output + results. If ``None``, a simple aggregation without any + generation, resource, or cost data is performed. + + .. Note:: If executing ``reV`` from the command line, this + input can be set to ``"PIPELINE"`` to parse the output + from one of these preceding pipeline steps: + ``multi-year``, ``collect``, or ``econ``. However, note + that duplicate executions of any of these commands within + the pipeline may invalidate this parsing, meaning the + `econ_fpath` input will have to be specified manually. + + By default, ``None``. + res_fpath : str, optional + Filepath to HDF5 resource file (e.g. WTK or NSRDB). This + input is required if techmap dset is to be created or if + ``gen_fpath is`` is ``None``. By default, ``None``. + args : tuple | list, optional + List of columns to include in summary output table. ``None`` + defaults to all available args defined in the + :class:`~reV.supply_curve.sc_aggregation.SupplyCurveAggregation` + documentation. By default, ``None``. + max_workers : int, optional + Number of cores to run summary on. ``None`` is all available + CPUs. By default, ``None``. + sites_per_worker : int, optional + Number of sc_points to summarize on each worker. + By default, ``100``. + + Returns + ------- + str + Path to output CSV file containing supply curve aggregation. + """ + + self._validate_tech_mapping(res_fpath) + + if gen_fpath is None: + out = Aggregation.run( + self._excl_fpath, res_fpath, self._tm_dset, + excl_dict=self._excl_dict, + resolution=self._resolution, + excl_area=self._excl_area, + area_filter_kernel=self._area_filter_kernel, + min_area=self._min_area, + pre_extract_inclusions=self._pre_extract_inclusions, + max_workers=max_workers, + sites_per_worker=sites_per_worker) + summary = out['meta'] + else: + summary = self.summarize(gen_fpath=gen_fpath, args=args, + max_workers=max_workers, + sites_per_worker=sites_per_worker) + + out_fpath = _format_sc_agg_out_fpath(out_fpath) + summary.to_csv(out_fpath) + + return out_fpath
+ + +def _format_sc_agg_out_fpath(out_fpath): + """Add CSV file ending and replace underscore, if necessary.""" + if not out_fpath.endswith(".csv"): + out_fpath = '{}.csv'.format(out_fpath) + + project_dir, out_fn = os.path.split(out_fpath) + out_fn = out_fn.replace("supply_curve_aggregation", + "supply-curve-aggregation") + return os.path.join(project_dir, out_fn) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/supply_curve/supply_curve.html b/_modules/reV/supply_curve/supply_curve.html new file mode 100644 index 000000000..ddc1e1f66 --- /dev/null +++ b/_modules/reV/supply_curve/supply_curve.html @@ -0,0 +1,2085 @@ + + + + + + reV.supply_curve.supply_curve — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for reV.supply_curve.supply_curve

+# -*- coding: utf-8 -*-
+"""
+reV supply curve module
+- Calculation of LCOT
+- Supply Curve creation
+"""
+from copy import deepcopy
+import json
+import logging
+import numpy as np
+import os
+import pandas as pd
+from warnings import warn
+
+from reV.handlers.transmission import TransmissionCosts as TC
+from reV.handlers.transmission import TransmissionFeatures as TF
+from reV.supply_curve.competitive_wind_farms import CompetitiveWindFarms
+from reV.utilities.exceptions import SupplyCurveInputError, SupplyCurveError
+from reV.utilities import log_versions
+
+from rex import Resource
+from rex.utilities import parse_table, SpawnProcessPool
+
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class SupplyCurve: + """SupplyCurve""" + + def __init__(self, sc_points, trans_table, sc_features=None, + sc_capacity_col='capacity'): + """reV LCOT calculation and SupplyCurve sorting class. + + ``reV`` supply curve computes the transmission costs associated + with each supply curve point output by ``reV`` supply curve + aggregation. Transmission costs can either be computed + competitively (where total capacity remaining on the + transmission grid is tracked and updated after each new + connection) or non-competitively (where the cheapest connections + for each supply curve point are allowed regardless of the + remaining transmission grid capacity). In both cases, the + permutation of transmission costs between supply curve points + and transmission grid features should be computed using the + `reVX Least Cost Transmission Paths + <https://github.com/NREL/reVX/tree/main/reVX/least_cost_xmission>`_ + utility. + + Parameters + ---------- + sc_points : str | pandas.DataFrame + Path to CSV or JSON or DataFrame containing supply curve + point summary. Can also be a filepath to a ``reV`` bespoke + HDF5 output file where the ``meta`` dataset has the same + format as the supply curve aggregation output. + + .. Note:: If executing ``reV`` from the command line, this + input can also be ``"PIPELINE"`` to parse the output of + the previous pipeline step and use it as input to this + call. However, note that duplicate executions of any + preceding commands within the pipeline may invalidate this + parsing, meaning the `sc_points` input will have to be + specified manually. + + trans_table : str | pandas.DataFrame | list + Path to CSV or JSON or DataFrame containing supply curve + transmission mapping. This can also be a list of + transmission tables with different line voltage (capacity) + ratings. See the `reVX Least Cost Transmission Paths + <https://github.com/NREL/reVX/tree/main/reVX/least_cost_xmission>`_ + utility to generate these input tables. + sc_features : str | pandas.DataFrame, optional + Path to CSV or JSON or DataFrame containing additional + supply curve features (e.g. transmission multipliers, + regions, etc.). These features will be merged to the + `sc_points` input table on ALL columns that both have in + common. If ``None``, no extra supply curve features are + added. By default, ``None``. + sc_capacity_col : str, optional + Name of capacity column in `trans_sc_table`. The values in + this column determine the size of transmission lines built. + The transmission capital costs per MW and the reinforcement + costs per MW will be returned in terms of these capacity + values. Note that if this column != "capacity", then + "capacity" must also be included in `trans_sc_table` since + those values match the "mean_cf" data (which is used to + calculate LCOT and Total LCOE). This input can be used to, + e.g., size transmission lines based on solar AC capacity ( + ``sc_capacity_col="capacity_ac"``). By default, + ``"capacity"``. + + Examples + -------- + Standard outputs in addition to the values provided in + `sc_points`, produced by + :class:`reV.supply_curve.sc_aggregation.SupplyCurveAggregation`: + + - transmission_multiplier : int | float + Transmission cost multiplier that scales the line cost + but not the tie-in cost in the calculation of LCOT. + - trans_gid : int + Unique transmission feature identifier that each supply + curve point was connected to. + - trans_capacity : float + Total capacity (not available capacity) of the + transmission feature that each supply curve point was + connected to. Default units are MW. + - trans_type : str + Tranmission feature type that each supply curve point + was connected to (e.g. Transline, Substation). + - trans_cap_cost_per_mw : float + Capital cost of connecting each supply curve point to + their respective transmission feature. This value + includes line cost with transmission_multiplier and the + tie-in cost. Default units are $/MW. + - dist_km : float + Distance in km from supply curve point to transmission + connection. + - lcot : float + Levelized cost of connecting to transmission ($/MWh). + - total_lcoe : float + Total LCOE of each supply curve point (mean_lcoe + lcot) + ($/MWh). + - total_lcoe_friction : float + Total LCOE of each supply curve point considering the + LCOE friction scalar from the aggregation step + (mean_lcoe_friction + lcot) ($/MWh). + """ + log_versions(logger) + logger.info('Supply curve points input: {}'.format(sc_points)) + logger.info('Transmission table input: {}'.format(trans_table)) + logger.info('Supply curve capacity column: {}'.format(sc_capacity_col)) + + self._sc_capacity_col = sc_capacity_col + self._sc_points = self._parse_sc_points(sc_points, + sc_features=sc_features) + self._trans_table = self._map_tables(self._sc_points, trans_table, + sc_capacity_col=sc_capacity_col) + self._sc_gids, self._mask = self._parse_sc_gids(self._trans_table) + + def __repr__(self): + msg = "{} with {} points".format(self.__class__.__name__, len(self)) + + return msg + + def __len__(self): + return len(self._sc_gids) + + def __getitem__(self, gid): + if gid not in self._sc_gids: + msg = "Invalid supply curve gid {}".format(gid) + logger.error(msg) + raise KeyError(msg) + + i = self._sc_gids.index(gid) + + return self._sc_points.iloc[i] + + @staticmethod + def _parse_sc_points(sc_points, sc_features=None): + """ + Import supply curve point summary and add any additional features + + Parameters + ---------- + sc_points : str | pandas.DataFrame + Path to .csv or .json or DataFrame containing supply curve point + summary. Can also now be a filepath to a bespoke h5 where the + "meta" dataset has the same format as the sc aggregation output. + sc_features : str | pandas.DataFrame + Path to .csv or .json or DataFrame containing additional supply + curve features, e.g. transmission multipliers, regions + + Returns + ------- + sc_points : pandas.DataFrame + DataFrame of supply curve point summary with additional features + added if supplied + """ + if isinstance(sc_points, str) and sc_points.endswith('.h5'): + with Resource(sc_points) as res: + sc_points = res.meta + sc_points.index.name = 'sc_gid' + sc_points = sc_points.reset_index() + else: + sc_points = parse_table(sc_points) + + logger.debug('Supply curve points table imported with columns: {}' + .format(sc_points.columns.values.tolist())) + + if sc_features is not None: + sc_features = parse_table(sc_features) + merge_cols = [c for c in sc_features + if c in sc_points] + sc_points = sc_points.merge(sc_features, on=merge_cols, how='left') + logger.debug('Adding Supply Curve Features table with columns: {}' + .format(sc_features.columns.values.tolist())) + + if 'transmission_multiplier' in sc_points: + col = 'transmission_multiplier' + sc_points.loc[:, col] = sc_points.loc[:, col].fillna(1) + + logger.debug('Final supply curve points table has columns: {}' + .format(sc_points.columns.values.tolist())) + + return sc_points + + @staticmethod + def _get_merge_cols(sc_columns, trans_columns): + """ + Get columns with 'row' or 'col' in them to use for merging + + Parameters + ---------- + sc_columns : list + Columns to search + trans_cols + + Returns + ------- + merge_cols : dict + Columns to merge on which maps the sc columns (keys) to the + corresponding trans table columns (values) + """ + sc_columns = [c for c in sc_columns if c.startswith('sc_')] + trans_columns = [c for c in trans_columns if c.startswith('sc_')] + merge_cols = {} + for c_val in ['row', 'col']: + trans_col = [c for c in trans_columns if c_val in c] + sc_col = [c for c in sc_columns if c_val in c] + if trans_col and sc_col: + merge_cols[sc_col[0]] = trans_col[0] + + if len(merge_cols) != 2: + msg = ('Did not find a unique set of sc row and column ids to ' + 'merge on: {}'.format(merge_cols)) + logger.error(msg) + raise RuntimeError(msg) + + return merge_cols + + @staticmethod + def _parse_trans_table(trans_table): + """ + Import transmission features table + + Parameters + ---------- + trans_table : pd.DataFrame | str + Table mapping supply curve points to transmission features + (either str filepath to table file or pre-loaded dataframe). + + Returns + ------- + trans_table : pd.DataFrame + Loaded transmission feature table. + """ + + trans_table = parse_table(trans_table) + + # Update legacy transmission table columns to match new less ambiguous + # column names: + # trans_gid -> the transmission feature id, legacy name: trans_line_gid + # trans_line_gids -> gids of transmission lines connected to the given + # transmission feature (only used for Substations), + # legacy name: trans_gids + # also xformer_cost_p_mw -> xformer_cost_per_mw (not sure why there + # would be a *_p_mw but here we are...) + rename_map = {'trans_line_gid': 'trans_gid', + 'trans_gids': 'trans_line_gids', + 'xformer_cost_p_mw': 'xformer_cost_per_mw'} + trans_table = trans_table.rename(columns=rename_map) + + if 'dist_mi' in trans_table and 'dist_km' not in trans_table: + trans_table = trans_table.rename(columns={'dist_mi': 'dist_km'}) + trans_table['dist_km'] *= 1.60934 + + drop_cols = ['sc_gid', 'cap_left', 'sc_point_gid'] + drop_cols = [c for c in drop_cols if c in trans_table] + if drop_cols: + trans_table = trans_table.drop(columns=drop_cols) + + return trans_table + + @staticmethod + def _map_trans_capacity(trans_sc_table, sc_capacity_col='capacity'): + """ + Map SC gids to transmission features based on capacity. For any SC + gids with capacity > the maximum transmission feature capacity, map + SC gids to the feature with the largest capacity + + Parameters + ---------- + trans_sc_table : pandas.DataFrame + Table mapping supply curve points to transmission features. + sc_capacity_col : str, optional + Name of capacity column in `trans_sc_table`. The values in + this column determine the size of transmission lines built. + The transmission capital costs per MW and the reinforcement + costs per MW will be returned in terms of these capacity + values. Note that if this column != "capacity", then + "capacity" must also be included in `trans_sc_table` since + those values match the "mean_cf" data (which is used to + calculate LCOT and Total LCOE). By default, ``"capacity"``. + + Returns + ------- + trans_sc_table : pandas.DataFrame + Updated table mapping supply curve points to transmission features + based on maximum capacity + """ + + nx = trans_sc_table[sc_capacity_col] / trans_sc_table['max_cap'] + nx = np.ceil(nx).astype(int) + trans_sc_table['n_parallel_trans'] = nx + + if (nx > 1).any(): + mask = nx > 1 + tie_line_cost = (trans_sc_table.loc[mask, 'tie_line_cost'] + * nx[mask]) + + xformer_cost = (trans_sc_table.loc[mask, 'xformer_cost_per_mw'] + * trans_sc_table.loc[mask, 'max_cap'] * nx[mask]) + + conn_cost = (xformer_cost + + trans_sc_table.loc[mask, 'sub_upgrade_cost'] + + trans_sc_table.loc[mask, 'new_sub_cost']) + + trans_cap_cost = tie_line_cost + conn_cost + + trans_sc_table.loc[mask, 'tie_line_cost'] = tie_line_cost + trans_sc_table.loc[mask, 'xformer_cost'] = xformer_cost + trans_sc_table.loc[mask, 'connection_cost'] = conn_cost + trans_sc_table.loc[mask, 'trans_cap_cost'] = trans_cap_cost + + msg = ("{} SC points have a capacity that exceeds the maximum " + "transmission feature capacity and will be connected with " + "multiple parallel transmission features." + .format((nx > 1).sum())) + logger.info(msg) + + return trans_sc_table + + @staticmethod + def _parse_trans_line_gids(trans_line_gids): + """ + Parse json string of trans_line_gids if needed + + Parameters + ---------- + trans_line_gids : str | list + list of transmission line 'trans_gid's, if a json string, convert + to list + + Returns + ------- + trans_line_gids : list + list of transmission line 'trans_gid's + """ + if isinstance(trans_line_gids, str): + trans_line_gids = json.loads(trans_line_gids) + + return trans_line_gids + + @classmethod + def _check_sub_trans_lines(cls, features): + """ + Check to make sure all trans-lines are available for all sub-stations + + Parameters + ---------- + features : pandas.DataFrame + Table of transmission feature to check substation to transmission + line gid connections + + Returns + ------- + line_gids : list + List of missing transmission line 'trans_gid's for all substations + in features table + """ + features = features.rename(columns={'trans_line_gid': 'trans_gid', + 'trans_gids': 'trans_line_gids'}) + mask = features['category'].str.lower() == 'substation' + + if not any(mask): + return [] + + line_gids = (features.loc[mask, 'trans_line_gids'] + .apply(cls._parse_trans_line_gids)) + + line_gids = np.unique(np.concatenate(line_gids.values)) + + test = np.isin(line_gids, features['trans_gid'].values) + + return line_gids[~test].tolist() + + @classmethod + def _check_substation_conns(cls, trans_table, sc_cols='sc_gid'): + """ + Run checks on substation transmission features to make sure that + every sc point connecting to a substation can also connect to its + respective transmission lines + + Parameters + ---------- + trans_table : pd.DataFrame + Table mapping supply curve points to transmission features + (should already be merged with SC points). + sc_cols : str | list, optional + Column(s) in trans_table with unique supply curve id, + by default 'sc_gid' + """ + missing = {} + for sc_point, sc_table in trans_table.groupby(sc_cols): + tl_gids = cls._check_sub_trans_lines(sc_table) + if tl_gids: + missing[sc_point] = tl_gids + + if any(missing): + msg = ('The following sc_gid (keys) were connected to substations ' + 'but were not connected to the respective transmission line' + ' gids (values) which is required for full SC sort: {}' + .format(missing)) + logger.error(msg) + raise SupplyCurveInputError(msg) + + @classmethod + def _check_sc_trans_table(cls, sc_points, trans_table): + """Run self checks on sc_points table and the merged trans_table + + Parameters + ---------- + sc_points : pd.DataFrame + Table of supply curve point summary + trans_table : pd.DataFrame + Table mapping supply curve points to transmission features + (should already be merged with SC points). + """ + sc_gids = set(sc_points['sc_gid'].unique()) + trans_sc_gids = set(trans_table['sc_gid'].unique()) + missing = sorted(list(sc_gids - trans_sc_gids)) + if any(missing): + msg = ("There are {} Supply Curve points with missing " + "transmission mappings. Supply curve points with no " + "transmission features will not be connected! " + "Missing sc_gid's: {}" + .format(len(missing), missing)) + logger.warning(msg) + warn(msg) + + if not any(trans_sc_gids) or not any(sc_gids): + msg = ('Merging of sc points table and transmission features ' + 'table failed with {} original sc gids and {} transmission ' + 'sc gids after table merge.' + .format(len(sc_gids), len(trans_sc_gids))) + logger.error(msg) + raise SupplyCurveError(msg) + + logger.debug('There are {} original SC gids and {} sc gids in the ' + 'merged transmission table.' + .format(len(sc_gids), len(trans_sc_gids))) + logger.debug('Transmission Table created with columns: {}' + .format(trans_table.columns.values.tolist())) + + @classmethod + def _merge_sc_trans_tables(cls, sc_points, trans_table, + sc_cols=('sc_gid', 'capacity', 'mean_cf', + 'mean_lcoe'), + sc_capacity_col='capacity'): + """ + Merge the supply curve table with the transmission features table. + + Parameters + ---------- + sc_points : pd.DataFrame + Table of supply curve point summary + trans_table : pd.DataFrame | str + Table mapping supply curve points to transmission features + (either str filepath to table file, list of filepaths to tables by + line voltage (capacity) or pre-loaded dataframe). + sc_cols : tuple | list, optional + List of column from sc_points to transfer into the trans table, + If the `sc_capacity_col` is not included, it will get added. + by default ('sc_gid', 'capacity', 'mean_cf', 'mean_lcoe') + sc_capacity_col : str, optional + Name of capacity column in `trans_sc_table`. The values in + this column determine the size of transmission lines built. + The transmission capital costs per MW and the reinforcement + costs per MW will be returned in terms of these capacity + values. Note that if this column != "capacity", then + "capacity" must also be included in `trans_sc_table` since + those values match the "mean_cf" data (which is used to + calculate LCOT and Total LCOE). By default, ``"capacity"``. + + Returns + ------- + trans_sc_table : pd.DataFrame + Updated table mapping supply curve points to transmission features. + This is performed by an inner merging with trans_table + """ + if sc_capacity_col not in sc_cols: + sc_cols = tuple([sc_capacity_col] + list(sc_cols)) + + if isinstance(trans_table, (list, tuple)): + trans_sc_table = [] + for table in trans_table: + trans_sc_table.append(cls._merge_sc_trans_tables( + sc_points, table, sc_cols=sc_cols, + sc_capacity_col=sc_capacity_col)) + + trans_sc_table = pd.concat(trans_sc_table) + else: + trans_table = cls._parse_trans_table(trans_table) + + merge_cols = cls._get_merge_cols(sc_points.columns, + trans_table.columns) + logger.info('Merging SC table and Trans Table with ' + '{} mapping: {}' + .format('sc_table_col: trans_table_col', merge_cols)) + sc_points = sc_points.rename(columns=merge_cols) + merge_cols = list(merge_cols.values()) + + if isinstance(sc_cols, tuple): + sc_cols = list(sc_cols) + + if 'mean_lcoe_friction' in sc_points: + sc_cols.append('mean_lcoe_friction') + + if 'transmission_multiplier' in sc_points: + sc_cols.append('transmission_multiplier') + + sc_cols += merge_cols + sc_points = sc_points[sc_cols].copy() + trans_sc_table = trans_table.merge(sc_points, on=merge_cols, + how='inner') + + return trans_sc_table + + @classmethod + def _map_tables(cls, sc_points, trans_table, + sc_cols=('sc_gid', 'capacity', 'mean_cf', 'mean_lcoe'), + sc_capacity_col='capacity'): + """ + Map supply curve points to transmission features + + Parameters + ---------- + sc_points : pd.DataFrame + Table of supply curve point summary + trans_table : pd.DataFrame | str + Table mapping supply curve points to transmission features + (either str filepath to table file, list of filepaths to tables by + line voltage (capacity) or pre-loaded DataFrame). + sc_cols : tuple | list, optional + List of column from sc_points to transfer into the trans table, + If the `sc_capacity_col` is not included, it will get added. + by default ('sc_gid', 'capacity', 'mean_cf', 'mean_lcoe') + sc_capacity_col : str, optional + Name of capacity column in `trans_sc_table`. The values in + this column determine the size of transmission lines built. + The transmission capital costs per MW and the reinforcement + costs per MW will be returned in terms of these capacity + values. Note that if this column != "capacity", then + "capacity" must also be included in `trans_sc_table` since + those values match the "mean_cf" data (which is used to + calculate LCOT and Total LCOE). By default, ``"capacity"``. + + Returns + ------- + trans_sc_table : pd.DataFrame + Updated table mapping supply curve points to transmission features. + This is performed by an inner merging with trans_table + """ + scc = sc_capacity_col + trans_sc_table = cls._merge_sc_trans_tables(sc_points, trans_table, + sc_cols=sc_cols, + sc_capacity_col=scc) + + if 'max_cap' in trans_sc_table: + trans_sc_table = cls._map_trans_capacity(trans_sc_table, + sc_capacity_col=scc) + + trans_sc_table = \ + trans_sc_table.sort_values( + ['sc_gid', 'trans_gid']).reset_index(drop=True) + + cls._check_sc_trans_table(sc_points, trans_sc_table) + + return trans_sc_table + + @staticmethod + def _create_handler(trans_table, trans_costs=None, avail_cap_frac=1): + """ + Create TransmissionFeatures handler from supply curve transmission + mapping table. Update connection costs if given. + + Parameters + ---------- + trans_table : str | pandas.DataFrame + Path to .csv or .json or DataFrame containing supply curve + transmission mapping + trans_costs : str | dict + Transmission feature costs to use with TransmissionFeatures + handler: line_tie_in_cost, line_cost, station_tie_in_cost, + center_tie_in_cost, sink_tie_in_cost + avail_cap_frac: int, optional + Fraction of transmissions features capacity 'ac_cap' to make + available for connection to supply curve points, by default 1 + + Returns + ------- + trans_features : TransmissionFeatures + TransmissionFeatures or TransmissionCosts instance initilized + with specified transmission costs + """ + if trans_costs is not None: + kwargs = TF._parse_dictionary(trans_costs) + else: + kwargs = {} + + trans_features = TF(trans_table, avail_cap_frac=avail_cap_frac, + **kwargs) + + return trans_features + + @staticmethod + def _parse_sc_gids(trans_table, gid_key='sc_gid'): + """Extract unique sc gids, make bool mask from tranmission table + + Parameters + ---------- + trans_table : pd.DataFrame + reV Supply Curve table joined with transmission features table. + gid_key : str + Column label in trans_table containing the supply curve points + primary key. + + Returns + ------- + sc_gids : list + List of unique integer supply curve gids (non-nan) + mask : np.ndarray + Boolean array initialized as true. Length is equal to the maximum + SC gid so that the SC gids can be used to index the mask directly. + """ + sc_gids = list(np.sort(trans_table[gid_key].unique())) + sc_gids = [int(gid) for gid in sc_gids] + mask = np.ones(int(1 + max(sc_gids)), dtype=bool) + + return sc_gids, mask + + @staticmethod + def _get_capacity(sc_gid, sc_table, connectable=True, + sc_capacity_col='capacity'): + """ + Get capacity of supply curve point + + Parameters + ---------- + sc_gid : int + Supply curve gid + sc_table : pandas.DataFrame + DataFrame of sc point to transmission features mapping for given + sc_gid + connectable : bool, optional + Flag to ensure SC point can connect to transmission features, + by default True + sc_capacity_col : str, optional + Name of capacity column in `trans_sc_table`. The values in + this column determine the size of transmission lines built. + The transmission capital costs per MW and the reinforcement + costs per MW will be returned in terms of these capacity + values. Note that if this column != "capacity", then + "capacity" must also be included in `trans_sc_table` since + those values match the "mean_cf" data (which is used to + calculate LCOT and Total LCOE). By default, ``"capacity"``. + + Returns + ------- + capacity : float + Capacity of supply curve point + """ + if connectable: + capacity = sc_table[sc_capacity_col].unique() + if len(capacity) == 1: + capacity = capacity[0] + else: + msg = ('Each supply curve point should only have ' + 'a single capacity, but {} has {}' + .format(sc_gid, capacity)) + logger.error(msg) + raise RuntimeError(msg) + else: + capacity = None + + return capacity + + @classmethod + def _compute_trans_cap_cost(cls, trans_table, trans_costs=None, + avail_cap_frac=1, max_workers=None, + connectable=True, line_limited=False, + sc_capacity_col='capacity'): + """ + Compute levelized cost of transmission for all combinations of + supply curve points and tranmission features in trans_table + + Parameters + ---------- + trans_table : pd.DataFrame + Table mapping supply curve points to transmission features + MUST contain `sc_capacity_col` column. + fcr : float + Fixed charge rate needed to compute LCOT + trans_costs : str | dict + Transmission feature costs to use with TransmissionFeatures + handler: line_tie_in_cost, line_cost, station_tie_in_cost, + center_tie_in_cost, sink_tie_in_cost + avail_cap_frac: int, optional + Fraction of transmissions features capacity 'ac_cap' to make + available for connection to supply curve points, by default 1 + max_workers : int | NoneType + Number of workers to use to compute lcot, if > 1 run in parallel. + None uses all available cpu's. + connectable : bool, optional + Flag to only compute tranmission capital cost if transmission + feature has enough available capacity, by default True + line_limited : bool + Substation connection is limited by maximum capacity of the + attached lines, legacy method + sc_capacity_col : str, optional + Name of capacity column in `trans_sc_table`. The values in + this column determine the size of transmission lines built. + The transmission capital costs per MW and the reinforcement + costs per MW will be returned in terms of these capacity + values. Note that if this column != "capacity", then + "capacity" must also be included in `trans_sc_table` since + those values match the "mean_cf" data (which is used to + calculate LCOT and Total LCOE). By default, ``"capacity"``. + + Returns + ------- + lcot : list + Levelized cost of transmission for all supply curve - + tranmission feature connections + cost : list + Capital cost of tramsmission for all supply curve - transmission + feature connections + """ + scc = sc_capacity_col + if scc not in trans_table: + raise SupplyCurveInputError('Supply curve table must have ' + 'supply curve point capacity column' + '({}) to compute lcot'.format(scc)) + + if trans_costs is not None: + trans_costs = TF._parse_dictionary(trans_costs) + else: + trans_costs = {} + + if max_workers is None: + max_workers = os.cpu_count() + + logger.info('Computing LCOT costs for all possible connections...') + groups = trans_table.groupby('sc_gid') + if max_workers > 1: + loggers = [__name__, 'reV.handlers.transmission', 'reV'] + with SpawnProcessPool(max_workers=max_workers, + loggers=loggers) as exe: + futures = [] + for sc_gid, sc_table in groups: + capacity = cls._get_capacity(sc_gid, sc_table, + connectable=connectable, + sc_capacity_col=scc) + futures.append(exe.submit(TC.feature_costs, sc_table, + capacity=capacity, + avail_cap_frac=avail_cap_frac, + line_limited=line_limited, + **trans_costs)) + + cost = [future.result() for future in futures] + else: + cost = [] + for sc_gid, sc_table in groups: + capacity = cls._get_capacity(sc_gid, sc_table, + connectable=connectable, + sc_capacity_col=scc) + cost.append(TC.feature_costs(sc_table, + capacity=capacity, + avail_cap_frac=avail_cap_frac, + line_limited=line_limited, + **trans_costs)) + + cost = np.hstack(cost).astype('float32') + logger.info('LCOT cost calculation is complete.') + + return cost + +
[docs] def compute_total_lcoe(self, fcr, transmission_costs=None, + avail_cap_frac=1, line_limited=False, + connectable=True, max_workers=None, + consider_friction=True): + """ + Compute LCOT and total LCOE for all sc point to transmission feature + connections + + Parameters + ---------- + fcr : float + Fixed charge rate, used to compute LCOT + transmission_costs : str | dict, optional + Transmission feature costs to use with TransmissionFeatures + handler: line_tie_in_cost, line_cost, station_tie_in_cost, + center_tie_in_cost, sink_tie_in_cost, by default None + avail_cap_frac : int, optional + Fraction of transmissions features capacity 'ac_cap' to make + available for connection to supply curve points, by default 1 + line_limited : bool, optional + Flag to have substation connection is limited by maximum capacity + of the attached lines, legacy method, by default False + connectable : bool, optional + Flag to only compute tranmission capital cost if transmission + feature has enough available capacity, by default True + max_workers : int | NoneType, optional + Number of workers to use to compute lcot, if > 1 run in parallel. + None uses all available cpu's. by default None + consider_friction : bool, optional + Flag to consider friction layer on LCOE when "mean_lcoe_friction" + is in the sc points input, by default True + """ + if 'trans_cap_cost' not in self._trans_table: + scc = self._sc_capacity_col + cost = self._compute_trans_cap_cost(self._trans_table, + trans_costs=transmission_costs, + avail_cap_frac=avail_cap_frac, + line_limited=line_limited, + connectable=connectable, + max_workers=max_workers, + sc_capacity_col=scc) + self._trans_table['trans_cap_cost_per_mw'] = cost # $/MW + else: + cost = self._trans_table['trans_cap_cost'].values.copy() # $ + cost /= self._trans_table[self._sc_capacity_col] # $/MW + self._trans_table['trans_cap_cost_per_mw'] = cost + + cost *= self._trans_table[self._sc_capacity_col] + cost /= self._trans_table['capacity'] # align with "mean_cf" + + if 'reinforcement_cost_per_mw' in self._trans_table: + logger.info("'reinforcement_cost_per_mw' column found in " + "transmission table. Adding reinforcement costs " + "to total LCOE.") + cf_mean_arr = self._trans_table['mean_cf'].values + lcot = (cost * fcr) / (cf_mean_arr * 8760) + lcoe = lcot + self._trans_table['mean_lcoe'] + self._trans_table['lcot_no_reinforcement'] = lcot + self._trans_table['lcoe_no_reinforcement'] = lcoe + r_cost = (self._trans_table['reinforcement_cost_per_mw'] + .values.copy()) + r_cost *= self._trans_table[self._sc_capacity_col] + r_cost /= self._trans_table['capacity'] # align with "mean_cf" + cost += r_cost # $/MW + + cf_mean_arr = self._trans_table['mean_cf'].values + lcot = (cost * fcr) / (cf_mean_arr * 8760) + + self._trans_table['lcot'] = lcot + self._trans_table['total_lcoe'] = (self._trans_table['lcot'] + + self._trans_table['mean_lcoe']) + + if consider_friction: + self._calculate_total_lcoe_friction()
+ + def _calculate_total_lcoe_friction(self): + """Look for site mean LCOE with friction in the trans table and if + found make a total LCOE column with friction.""" + + if 'mean_lcoe_friction' in self._trans_table: + lcoe_friction = (self._trans_table['lcot'] + + self._trans_table['mean_lcoe_friction']) + self._trans_table['total_lcoe_friction'] = lcoe_friction + logger.info('Found mean LCOE with friction. Adding key ' + '"total_lcoe_friction" to trans table.') + + def _exclude_noncompetitive_wind_farms(self, comp_wind_dirs, sc_gid, + downwind=False): + """ + Exclude non-competitive wind farms for given sc_gid + + Parameters + ---------- + comp_wind_dirs : CompetitiveWindFarms + Pre-initilized CompetitiveWindFarms instance + sc_gid : int + Supply curve gid to exclude non-competitive wind farms around + downwind : bool, optional + Flag to remove downwind neighbors as well as upwind neighbors, + by default False + + Returns + ------- + comp_wind_dirs : CompetitiveWindFarms + updated CompetitiveWindFarms instance + """ + gid = comp_wind_dirs.check_sc_gid(sc_gid) + if gid is not None: + if comp_wind_dirs.mask[gid]: + exclude_gids = comp_wind_dirs['upwind', gid] + if downwind: + exclude_gids = np.append(exclude_gids, + comp_wind_dirs['downwind', gid]) + for n in exclude_gids: + check = comp_wind_dirs.exclude_sc_point_gid(n) + if check: + sc_gids = comp_wind_dirs['sc_gid', n] + for sc_id in sc_gids: + if self._mask[sc_id]: + logger.debug('Excluding sc_gid {}' + .format(sc_id)) + self._mask[sc_id] = False + + return comp_wind_dirs + +
[docs] @staticmethod + def add_sum_cols(table, sum_cols): + """Add a summation column to table. + + Parameters + ---------- + table : pd.DataFrame + Supply curve table. + sum_cols : dict + Mapping of new column label(s) to multiple column labels to sum. + Example: sum_col={'total_cap_cost': ['cap_cost1', 'cap_cost2']} + Which would add a new 'total_cap_cost' column which would be the + sum of 'cap_cost1' and 'cap_cost2' if they are present in table. + + Returns + ------- + table : pd.DataFrame + Supply curve table with additional summation columns. + """ + + for new_label, sum_labels in sum_cols.items(): + missing = [s for s in sum_labels if s not in table] + + if any(missing): + logger.info('Could not make sum column "{}", missing: {}' + .format(new_label, missing)) + else: + sum_arr = np.zeros(len(table)) + for s in sum_labels: + temp = table[s].values + temp[np.isnan(temp)] = 0 + sum_arr += temp + + table[new_label] = sum_arr + + return table
+ + def _full_sort(self, trans_table, trans_costs=None, + avail_cap_frac=1, comp_wind_dirs=None, + total_lcoe_fric=None, sort_on='total_lcoe', + columns=('trans_gid', 'trans_capacity', 'trans_type', + 'trans_cap_cost_per_mw', 'dist_km', 'lcot', + 'total_lcoe'), + downwind=False): + """ + Internal method to handle full supply curve sorting + + Parameters + ---------- + trans_table : pandas.DataFrame + Supply Curve Tranmission table to sort on + trans_costs : str | dict, optional + Transmission feature costs to use with TransmissionFeatures + handler: line_tie_in_cost, line_cost, station_tie_in_cost, + center_tie_in_cost, sink_tie_in_cost, by default None + avail_cap_frac : int, optional + Fraction of transmissions features capacity 'ac_cap' to make + available for connection to supply curve points, by default 1 + comp_wind_dirs : CompetitiveWindFarms, optional + Pre-initilized CompetitiveWindFarms instance, by default None + total_lcoe_fric : ndarray, optional + Vector of lcoe friction values, by default None + sort_on : str, optional + Column label to sort the Supply Curve table on. This affects the + build priority - connections with the lowest value in this column + will be built first, by default 'total_lcoe' + columns : tuple, optional + Columns to preserve in output connections dataframe, + by default ('trans_gid', 'trans_capacity', 'trans_type', + 'trans_cap_cost_per_mw', 'dist_km', 'lcot', + 'total_lcoe') + downwind : bool, optional + Flag to remove downwind neighbors as well as upwind neighbors, + by default False + + Returns + ------- + supply_curve : pandas.DataFrame + Updated sc_points table with transmission connections, LCOT + and LCOE+LCOT based on full supply curve connections + """ + trans_features = self._create_handler(self._trans_table, + trans_costs=trans_costs, + avail_cap_frac=avail_cap_frac) + init_list = [np.nan] * int(1 + np.max(self._sc_gids)) + columns = list(columns) + if sort_on not in columns: + columns.append(sort_on) + + conn_lists = {k: deepcopy(init_list) for k in columns} + + trans_sc_gids = trans_table['sc_gid'].values.astype(int) + + # syntax is final_key: source_key (source from trans_table) + all_cols = {k: k for k in columns} + essentials = {'trans_gid': 'trans_gid', + 'trans_capacity': 'avail_cap', + 'trans_type': 'category', + 'dist_km': 'dist_km', + 'trans_cap_cost_per_mw': 'trans_cap_cost_per_mw', + 'lcot': 'lcot', + 'total_lcoe': 'total_lcoe', + } + all_cols.update(essentials) + + arrays = {final_key: trans_table[source_key].values + for final_key, source_key in all_cols.items()} + + sc_capacities = trans_table[self._sc_capacity_col].values + + connected = 0 + progress = 0 + for i in range(len(trans_table)): + sc_gid = trans_sc_gids[i] + if self._mask[sc_gid]: + connect = trans_features.connect(arrays['trans_gid'][i], + sc_capacities[i]) + if connect: + connected += 1 + logger.debug('Connecting sc gid {}'.format(sc_gid)) + self._mask[sc_gid] = False + + for col_name, data_arr in arrays.items(): + conn_lists[col_name][sc_gid] = data_arr[i] + + if total_lcoe_fric is not None: + conn_lists['total_lcoe_friction'][sc_gid] = \ + total_lcoe_fric[i] + + current_prog = connected // (len(self) / 100) + if current_prog > progress: + progress = current_prog + logger.info('{} % of supply curve points connected' + .format(progress)) + + if comp_wind_dirs is not None: + comp_wind_dirs = \ + self._exclude_noncompetitive_wind_farms( + comp_wind_dirs, sc_gid, downwind=downwind) + + index = range(0, int(1 + np.max(self._sc_gids))) + connections = pd.DataFrame(conn_lists, index=index) + connections.index.name = 'sc_gid' + connections = connections.dropna(subset=[sort_on]) + connections = connections[columns].reset_index() + + sc_gids = self._sc_points['sc_gid'].values + connected = connections['sc_gid'].values + logger.debug('Connected gids {} out of total supply curve gids {}' + .format(len(connected), len(sc_gids))) + unconnected = ~np.isin(sc_gids, connected) + unconnected = sc_gids[unconnected].tolist() + + if unconnected: + msg = ("{} supply curve points were not connected to tranmission! " + "Unconnected sc_gid's: {}" + .format(len(unconnected), unconnected)) + logger.warning(msg) + warn(msg) + + supply_curve = self._sc_points.merge(connections, on='sc_gid') + + return supply_curve.reset_index(drop=True) + + def _check_feature_capacity(self, avail_cap_frac=1): + """ + Add the transmission connection feature capacity to the trans table if + needed + """ + if 'avail_cap' not in self._trans_table: + kwargs = {'avail_cap_frac': avail_cap_frac} + fc = TF.feature_capacity(self._trans_table, **kwargs) + self._trans_table = self._trans_table.merge(fc, on='trans_gid') + + def _adjust_output_columns(self, columns, consider_friction): + """Add extra output columns, if needed. """ + # These are essentially should-be-defaults that are not + # backwards-compatible, so have to explicitly check for them + extra_cols = ['ba_str', 'poi_lat', 'poi_lon', 'reinforcement_poi_lat', + 'reinforcement_poi_lon', 'eos_mult', 'reg_mult', + 'reinforcement_cost_per_mw', 'reinforcement_dist_km', + 'n_parallel_trans', 'total_lcoe_friction'] + if not consider_friction: + extra_cols -= {'total_lcoe_friction'} + + extra_cols = [col for col in extra_cols + if col in self._trans_table and col not in columns] + + return columns + extra_cols + + def _determine_sort_on(self, sort_on): + """Determine the `sort_on` column from user input and trans table""" + if 'reinforcement_cost_per_mw' in self._trans_table: + sort_on = sort_on or "lcoe_no_reinforcement" + return sort_on or 'total_lcoe' + +
[docs] def full_sort(self, fcr, transmission_costs=None, + avail_cap_frac=1, line_limited=False, + connectable=True, max_workers=None, + consider_friction=True, sort_on=None, + columns=('trans_gid', 'trans_capacity', 'trans_type', + 'trans_cap_cost_per_mw', 'dist_km', 'lcot', + 'total_lcoe'), + wind_dirs=None, n_dirs=2, downwind=False, + offshore_compete=False): + """ + run full supply curve sorting + + Parameters + ---------- + fcr : float + Fixed charge rate, used to compute LCOT + transmission_costs : str | dict, optional + Transmission feature costs to use with TransmissionFeatures + handler: line_tie_in_cost, line_cost, station_tie_in_cost, + center_tie_in_cost, sink_tie_in_cost, by default None + avail_cap_frac : int, optional + Fraction of transmissions features capacity 'ac_cap' to make + available for connection to supply curve points, by default 1 + line_limited : bool, optional + Flag to have substation connection is limited by maximum capacity + of the attached lines, legacy method, by default False + connectable : bool, optional + Flag to only compute tranmission capital cost if transmission + feature has enough available capacity, by default True + max_workers : int | NoneType, optional + Number of workers to use to compute lcot, if > 1 run in parallel. + None uses all available cpu's. by default None + consider_friction : bool, optional + Flag to consider friction layer on LCOE when "mean_lcoe_friction" + is in the sc points input, by default True + sort_on : str, optional + Column label to sort the Supply Curve table on. This affects the + build priority - connections with the lowest value in this column + will be built first, by default `None`, which will use + total LCOE without any reinforcement costs as the sort value. + columns : list | tuple, optional + Columns to preserve in output connections dataframe, + by default ('trans_gid', 'trans_capacity', 'trans_type', + 'trans_cap_cost_per_mw', 'dist_km', 'lcot', 'total_lcoe') + wind_dirs : pandas.DataFrame | str, optional + path to .csv or reVX.wind_dirs.wind_dirs.WindDirs output with + the neighboring supply curve point gids and power-rose value at + each cardinal direction, by default None + n_dirs : int, optional + Number of prominent directions to use, by default 2 + downwind : bool, optional + Flag to remove downwind neighbors as well as upwind neighbors, + by default False + offshore_compete : bool, default + Flag as to whether offshore farms should be included during + CompetitiveWindFarms, by default False + + Returns + ------- + supply_curve : pandas.DataFrame + Updated sc_points table with transmission connections, LCOT + and LCOE+LCOT based on full supply curve connections + """ + logger.info('Starting full competitive supply curve sort.') + self._check_substation_conns(self._trans_table) + self.compute_total_lcoe(fcr, transmission_costs=transmission_costs, + avail_cap_frac=avail_cap_frac, + line_limited=line_limited, + connectable=connectable, + max_workers=max_workers, + consider_friction=consider_friction) + self._check_feature_capacity(avail_cap_frac=avail_cap_frac) + + if isinstance(columns, tuple): + columns = list(columns) + + columns = self._adjust_output_columns(columns, consider_friction) + sort_on = self._determine_sort_on(sort_on) + + trans_table = self._trans_table.copy() + pos = trans_table['lcot'].isnull() + trans_table = trans_table.loc[~pos].sort_values([sort_on, 'trans_gid']) + + total_lcoe_fric = None + if consider_friction and 'mean_lcoe_friction' in trans_table: + total_lcoe_fric = trans_table['total_lcoe_friction'].values + + comp_wind_dirs = None + if wind_dirs is not None: + msg = "Excluding {} upwind".format(n_dirs) + if downwind: + msg += " and downwind" + + msg += " onshore" + if offshore_compete: + msg += " and offshore" + + msg += " windfarms" + logger.info(msg) + comp_wind_dirs = CompetitiveWindFarms(wind_dirs, + self._sc_points, + n_dirs=n_dirs, + offshore=offshore_compete) + + supply_curve = self._full_sort(trans_table, + trans_costs=transmission_costs, + avail_cap_frac=avail_cap_frac, + comp_wind_dirs=comp_wind_dirs, + total_lcoe_fric=total_lcoe_fric, + sort_on=sort_on, columns=columns, + downwind=downwind) + + return supply_curve
+ +
[docs] def simple_sort(self, fcr, transmission_costs=None, + avail_cap_frac=1, max_workers=None, + consider_friction=True, sort_on=None, + columns=('trans_gid', 'trans_type', 'lcot', 'total_lcoe', + 'dist_km', 'trans_cap_cost_per_mw'), + wind_dirs=None, n_dirs=2, downwind=False, + offshore_compete=False): + """ + Run simple supply curve sorting that does not take into account + available capacity + + Parameters + ---------- + fcr : float + Fixed charge rate, used to compute LCOT + transmission_costs : str | dict, optional + Transmission feature costs to use with TransmissionFeatures + handler: line_tie_in_cost, line_cost, station_tie_in_cost, + center_tie_in_cost, sink_tie_in_cost, by default None + avail_cap_frac : int, optional + Fraction of transmissions features capacity 'ac_cap' to make + available for connection to supply curve points, by default 1 + line_limited : bool, optional + Flag to have substation connection is limited by maximum capacity + of the attached lines, legacy method, by default False + connectable : bool, optional + Flag to only compute tranmission capital cost if transmission + feature has enough available capacity, by default True + max_workers : int | NoneType, optional + Number of workers to use to compute lcot, if > 1 run in parallel. + None uses all available cpu's. by default None + consider_friction : bool, optional + Flag to consider friction layer on LCOE when "mean_lcoe_friction" + is in the sc points input, by default True + sort_on : str, optional + Column label to sort the Supply Curve table on. This affects the + build priority - connections with the lowest value in this column + will be built first, by default `None`, which will use + total LCOE without any reinforcement costs as the sort value. + columns : list | tuple, optional + Columns to preserve in output connections dataframe, + by default ('trans_gid', 'trans_capacity', 'trans_type', + 'trans_cap_cost_per_mw', 'dist_km', 'lcot', 'total_lcoe') + wind_dirs : pandas.DataFrame | str, optional + path to .csv or reVX.wind_dirs.wind_dirs.WindDirs output with + the neighboring supply curve point gids and power-rose value at + each cardinal direction, by default None + n_dirs : int, optional + Number of prominent directions to use, by default 2 + downwind : bool, optional + Flag to remove downwind neighbors as well as upwind neighbors + offshore_compete : bool, default + Flag as to whether offshore farms should be included during + CompetitiveWindFarms, by default False + + Returns + ------- + supply_curve : pandas.DataFrame + Updated sc_points table with transmission connections, LCOT + and LCOE+LCOT based on simple supply curve connections + """ + logger.info('Starting simple supply curve sort (no capacity limits).') + self.compute_total_lcoe(fcr, transmission_costs=transmission_costs, + avail_cap_frac=avail_cap_frac, + connectable=False, + max_workers=max_workers, + consider_friction=consider_friction) + trans_table = self._trans_table.copy() + + if isinstance(columns, tuple): + columns = list(columns) + + columns = self._adjust_output_columns(columns, consider_friction) + sort_on = self._determine_sort_on(sort_on) + + connections = trans_table.sort_values([sort_on, 'trans_gid']) + connections = connections.groupby('sc_gid').first() + rename = {'trans_gid': 'trans_gid', + 'category': 'trans_type'} + connections = connections.rename(columns=rename) + connections = connections[columns].reset_index() + + supply_curve = self._sc_points.merge(connections, on='sc_gid') + if wind_dirs is not None: + supply_curve = \ + CompetitiveWindFarms.run(wind_dirs, + supply_curve, + n_dirs=n_dirs, + offshore=offshore_compete, + sort_on=sort_on, + downwind=downwind) + + supply_curve = supply_curve.reset_index(drop=True) + + return supply_curve
+ +
[docs] def run(self, out_fpath, fixed_charge_rate, simple=True, avail_cap_frac=1, + line_limited=False, transmission_costs=None, + consider_friction=True, sort_on=None, + columns=('trans_gid', 'trans_type', 'trans_cap_cost_per_mw', + 'dist_km', 'lcot', 'total_lcoe'), + max_workers=None, competition=None): + """Run Supply Curve Transmission calculations. + + Run full supply curve taking into account available capacity of + tranmission features when making connections. + + Parameters + ---------- + out_fpath : str + Full path to output CSV file. Does not need to include file + ending - it will be added automatically if missing. + fixed_charge_rate : float + Fixed charge rate, (in decimal form: 5% = 0.05). This value + is used to compute LCOT. + simple : bool, optional + Option to run the simple sort (does not keep track of + capacity available on the existing transmission grid). If + ``False``, a full transmission sort (where connections are + limited based on available transmission capacity) is run. + Note that the full transmission sort requires the + `avail_cap_frac` and `line_limited` inputs. + By default, ``True``. + avail_cap_frac : int, optional + This input has no effect if ``simple=True``. Fraction of + transmissions features capacity ``ac_cap`` to make available + for connection to supply curve points. By default, ``1``. + line_limited : bool, optional + This input has no effect if ``simple=True``. Flag to have + substation connection limited by maximum capacity + of the attached lines. This is a legacy method. + By default, ``False``. + transmission_costs : str | dict, optional + Dictionary of transmission feature costs or path to JSON + file containing a dictionary of transmission feature costs. + These costs are used to compute transmission capital cost + if the input transmission tables do not have a + ``"trans_cap_cost"`` column (this input is ignored + otherwise). The dictionary must include: + + - line_tie_in_cost + - line_cost + - station_tie_in_cost + - center_tie_in_cost + - sink_tie_in_cost + + By default, ``None``. + consider_friction : bool, optional + Flag to add a new ``"total_lcoe_friction"`` column to the + supply curve output that contains the sum of the computed + ``"total_lcoe"`` value and the input + ``"mean_lcoe_friction"`` values. If ``"mean_lcoe_friction"`` + is not in the `sc_points` input, this option is ignored. + By default, ``True``. + sort_on : str, optional + Column label to sort the supply curve table on. This affects + the build priority when doing a "full" sort - connections + with the lowest value in this column will be built first. + For a "simple" sort, only connections with the lowest value + in this column will be considered. If ``None``, the sort is + performed on the total LCOE *without* any reinforcement + costs added (this is typically what you want - it avoids + unrealistically long spur-line connections). + By default ``None``. + columns : list | tuple, optional + Columns to preserve in output supply curve dataframe. + By default, ``('trans_gid', 'trans_type', + 'trans_cap_cost_per_mw', 'dist_km', 'lcot', 'total_lcoe')``. + max_workers : int, optional + Number of workers to use to compute LCOT. If > 1, + computation is run in parallel. If ``None``, computation + uses all available CPU's. By default, ``None``. + competition : dict, optional + Optional dictionary of arguments for competitive wind farm + exclusions, which removes supply curve points upwind (and + optionally downwind) of the lowest LCOE supply curves. + If ``None``, no competition is applied. Otherwise, this + dictionary can have up to four keys: + + - ``wind_dirs`` (required) : A path to a CSV file or + :py:class:`reVX ProminentWindDirections + <reVX.wind_dirs.prominent_wind_dirs.ProminentWindDirections>` + output with the neighboring supply curve point gids + and power-rose values at each cardinal direction. + - ``n_dirs`` (optional) : An integer representing the + number of prominent directions to use during wind farm + competition. By default, ``2``. + - ``downwind`` (optional) : A flag indicating that + downwind neighbors should be removed in addition to + upwind neighbors during wind farm competition. + By default, ``False``. + - ``offshore_compete`` (optional) : A flag indicating + that offshore farms should be included during wind + farm competition. By default, ``False``. + + By default ``None``. + + Returns + ------- + str + Path to output supply curve. + """ + kwargs = {"fcr": fixed_charge_rate, + "transmission_costs": transmission_costs, + "consider_friction": consider_friction, + "sort_on": sort_on, + "columns": columns, + "max_workers": max_workers} + kwargs.update(competition or {}) + + if simple: + supply_curve = self.simple_sort(**kwargs) + else: + kwargs["avail_cap_frac"] = avail_cap_frac + kwargs["line_limited"] = line_limited + supply_curve = self.full_sort(**kwargs) + + out_fpath = _format_sc_out_fpath(out_fpath) + supply_curve.to_csv(out_fpath, index=False) + + return out_fpath
+ + +def _format_sc_out_fpath(out_fpath): + """Add CSV file ending and replace underscore, if necessary.""" + if not out_fpath.endswith(".csv"): + out_fpath = '{}.csv'.format(out_fpath) + + project_dir, out_fn = os.path.split(out_fpath) + out_fn = out_fn.replace("supply_curve", "supply-curve") + return os.path.join(project_dir, out_fn) +
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/supply_curve/tech_mapping.html b/_modules/reV/supply_curve/tech_mapping.html new file mode 100644 index 000000000..f32b9caba --- /dev/null +++ b/_modules/reV/supply_curve/tech_mapping.html @@ -0,0 +1,1085 @@ + + + + + + reV.supply_curve.tech_mapping — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + + +
  • +
  • +
+
+
+
+
+ +

Source code for reV.supply_curve.tech_mapping

+# -*- coding: utf-8 -*-
+"""reV tech mapping framework.
+
+This module manages the exclusions-to-resource mapping.
+The core of this module is a parallel cKDTree.
+
+Created on Fri Jun 21 16:05:47 2019
+
+@author: gbuster
+"""
+from concurrent.futures import as_completed
+import h5py
+import logging
+from math import ceil
+import numpy as np
+import os
+from scipy.spatial import cKDTree
+from warnings import warn
+
+from reV.supply_curve.extent import SupplyCurveExtent
+from reV.utilities.exceptions import FileInputWarning, FileInputError
+
+from rex.resource import Resource
+from rex.utilities.execution import SpawnProcessPool
+from rex.utilities.utilities import res_dist_threshold
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]class TechMapping: + """Framework to create map between tech layer (exclusions), res, and gen""" + + def __init__(self, excl_fpath, res_fpath, sc_resolution=2560, + dist_margin=1.05): + """ + Parameters + ---------- + excl_fpath : str + Filepath to exclusions h5 file, must contain latitude and longitude + arrays to allow for mapping to resource points + res_fpath : str + Filepath to .h5 resource file that we're mapping to. + sc_resolution : int | None, optional + Supply curve resolution, does not affect the exclusion to resource + (tech) mapping, but defines how many exclusion pixels are mapped + at a time, by default 2560 + dist_margin : float, optional + Extra margin to multiply times the computed distance between + neighboring resource points, by default 1.05 + """ + self._excl_fpath = excl_fpath + self._check_fout() + + self._tree, self._dist_thresh = \ + self._build_tree(res_fpath, dist_margin=dist_margin) + + with SupplyCurveExtent(self._excl_fpath, + resolution=sc_resolution) as sc: + self._sc_resolution = sc.resolution + self._gids = np.array(list(range(len(sc))), dtype=np.uint32) + self._excl_shape = sc.exclusions.shape + self._n_excl = np.product(self._excl_shape) + self._sc_row_indices = sc.row_indices + self._sc_col_indices = sc.col_indices + self._excl_row_slices = sc.excl_row_slices + self._excl_col_slices = sc.excl_col_slices + logger.info('Initialized TechMapping object with {} calc chunks ' + 'for {} tech exclusion points' + .format(len(self._gids), self._n_excl)) + + @property + def distance_threshold(self): + """Get the upper bound on NN distance between excl and res points. + + Returns + ------- + float + Estimate the distance between resource points. Calculated as half + of the diagonal between closest resource points, with desired + extra margin + """ + return self._dist_thresh + + @staticmethod + def _build_tree(res_fpath, dist_margin=1.05): + """ + Build cKDTree from resource lat, lon coordinates. Compute minimum + intra point distance between resource gids with provided extra margin. + + Parameters + ---------- + res_fpath : str + Filepath to .h5 resource file that we're mapping to. + dist_margin : float, optional + Extra margin to multiply times the computed distance between + neighboring resource points, by default 1.05 + + Returns + ------- + tree : cKDTree + cKDTree built from resource lat, lon coordinates + dist_tresh : float + Estimate the distance between resource points. Calculated as half + of the diagonal between closest resource points, with desired + extra margin + """ + with Resource(res_fpath) as f: + lat_lons = f.lat_lon + + # pylint: disable=not-callable + tree = cKDTree(lat_lons) + + dist_thresh = res_dist_threshold(lat_lons, tree=tree, + margin=dist_margin) + + return tree, dist_thresh + + @staticmethod + def _make_excl_iarr(shape): + """ + Create 2D array of 1D index values for the flattened h5 excl extent + + Parameters + ---------- + shape : tuple + exclusion extent shape + + Returns + ------- + iarr : ndarray + 2D array of 1D index values for the flattened h5 excl extent + """ + iarr = np.arange(np.product(shape), dtype=np.uint32) + + return iarr.reshape(shape) + + @staticmethod + def _get_excl_slices(gid, sc_row_indices, sc_col_indices, excl_row_slices, + excl_col_slices): + """ + Get the row and column slices of the exclusions grid corresponding + to the supply curve point gid. + + Parameters + ---------- + gid : int + Supply curve point gid. + sc_row_indices : list + List of row indices in exclusion array for for every sc_point gid + sc_col_indices : list + List of column indices in exclusion array for for every sc_point + gid + excl_row_slices : list + List representing the supply curve points rows. Each list entry + contains the exclusion row slice that are included in the sc + point. + excl_col_slices : list + List representing the supply curve points columns. Each list entry + contains the exclusion columns slice that are included in the sc + point. + + Returns + ------- + row_slice : int + Exclusions grid row index slice corresponding to the sc point gid. + col_slice : int + Exclusions grid col index slice corresponding to the sc point gid. + """ + + row_slice = excl_row_slices[sc_row_indices[gid]] + col_slice = excl_col_slices[sc_col_indices[gid]] + + return row_slice, col_slice + + @classmethod + def _get_excl_coords(cls, excl_fpath, gids, sc_row_indices, sc_col_indices, + excl_row_slices, excl_col_slices, + coord_labels=('latitude', 'longitude')): + """ + Extract the exclusion coordinates for teh desired gids for TechMapping. + + Parameters + ---------- + gids : np.ndarray + Supply curve gids with tech exclusion points to map to the + resource meta points. + excl_fpath : str + Filepath to exclusions h5 file, must contain latitude and longitude + arrays to allow for mapping to resource points + sc_row_indices : list + List of row indices in exclusion array for for every sc_point gid + sc_col_indices : list + List of column indices in exclusion array for for every sc_point + gid + excl_row_slices : list + List representing the supply curve points rows. Each list entry + contains the exclusion row slice that are included in the sc + point. + excl_col_slices : list + List representing the supply curve points columns. Each list entry + contains the exclusion columns slice that are included in the sc + point. + coord_labels : tuple + Labels for the coordinate datasets. + + Returns + ------- + coords_out : list + List of arrays of the un-projected latitude, longitude array of + tech exclusion points. List entries correspond to input gids. + """ + coords_out = [] + with h5py.File(excl_fpath, 'r') as f: + for gid in gids: + row_slice, col_slice = cls._get_excl_slices(gid, + sc_row_indices, + sc_col_indices, + excl_row_slices, + excl_col_slices) + try: + lats = f[coord_labels[0]][row_slice, col_slice] + lons = f[coord_labels[1]][row_slice, col_slice] + emeta = np.vstack((lats.flatten(), lons.flatten())).T + except Exception as e: + m = ('Could not unpack coordinates for gid {} with ' + 'row/col slice {}/{}. Received the following ' + 'error:\n{}'.format(gid, row_slice, col_slice, e)) + logger.error(m) + raise e + + coords_out.append(emeta) + + return coords_out + +
[docs] @classmethod + def map_resource_gids(cls, gids, excl_fpath, sc_row_indices, + sc_col_indices, excl_row_slices, excl_col_slices, + tree, dist_thresh): + """Map exclusion gids to the resource meta. + + Parameters + ---------- + gids : np.ndarray + Supply curve gids with tech exclusion points to map to the + resource meta points. + excl_fpath : str + Filepath to exclusions h5 file, must contain latitude and longitude + arrays to allow for mapping to resource points + sc_row_indices : list + List of row indices in exclusion array for for every sc_point gid + sc_col_indices : list + List of column indices in exclusion array for for every sc_point + gid + excl_row_slices : list + List representing the supply curve points rows. Each list entry + contains the exclusion row slice that are included in the sc + point. + excl_col_slices : list + List representing the supply curve points columns. Each list entry + contains the exclusion columns slice that are included in the sc + point. + tree : cKDTree + cKDTree built from resource lat, lon coordinates + dist_tresh : float + Estimate the distance between resource points. Calculated as half + of the diagonal between closest resource points, with an extra + 5% margin + + Returns + ------- + ind : list + List of arrays of index values from the NN. List entries correspond + to input gids. + """ + logger.debug('Getting tech map coordinates for chunks {} through {}' + .format(gids[0], gids[-1])) + ind_out = [] + coords_out = cls._get_excl_coords(excl_fpath, gids, sc_row_indices, + sc_col_indices, excl_row_slices, + excl_col_slices) + + logger.debug('Running tech mapping for chunks {} through {}' + .format(gids[0], gids[-1])) + for i, _ in enumerate(gids): + dist, ind = tree.query(coords_out[i]) + ind[(dist >= dist_thresh)] = -1 + ind_out.append(ind) + + return ind_out
+ +
[docs] @staticmethod + def save_tech_map(excl_fpath, dset, indices, distance_threshold=None, + res_fpath=None, chunks=(128, 128)): + """Save tech mapping indices and coordinates to an h5 output file. + + Parameters + ---------- + excl_fpath : str + Filepath to exclusions h5 file to add techmap to as 'dset' + dset : str + Dataset name in fpath_out to save mapping results to. + indices : np.ndarray + Index values of the NN resource point. -1 if no res point found. + 2D integer array with shape equal to the exclusions extent shape. + distance_threshold : float + Distance upper bound to save as attr. + res_fpath : str, optional + Filepath to .h5 resource file that we're mapping to, + by default None + chunks : tuple + Chunk shape of the 2D output datasets. + """ + logger.info('Writing tech map "{}" to {}'.format(dset, excl_fpath)) + + shape = indices.shape + chunks = (np.min((shape[0], chunks[0])), np.min((shape[1], chunks[1]))) + + with h5py.File(excl_fpath, 'a') as f: + if dset in list(f): + wmsg = ('TechMap results dataset "{}" is being replaced ' + 'in pre-existing Exclusions TechMapping file "{}"' + .format(dset, excl_fpath)) + logger.warning(wmsg) + warn(wmsg, FileInputWarning) + f[dset][...] = indices + else: + f.create_dataset(dset, shape=shape, dtype=indices.dtype, + data=indices, chunks=chunks) + + if distance_threshold: + f[dset].attrs['distance_threshold'] = distance_threshold + + if res_fpath: + f[dset].attrs['src_res_fpath'] = res_fpath + + logger.info('Successfully saved tech map "{}" to {}' + .format(dset, excl_fpath))
+ + def _check_fout(self): + """Check the TechMapping output file for cached data.""" + with h5py.File(self._excl_fpath, 'r') as f: + if 'latitude' not in f or 'longitude' not in f: + emsg = ('Datasets "latitude" and/or "longitude" not in ' + 'pre-existing Exclusions TechMapping file "{}". ' + 'Cannot proceed.' + .format(os.path.basename(self._excl_fpath))) + logger.exception(emsg) + raise FileInputError(emsg) + +
[docs] def map_resource(self, max_workers=None, points_per_worker=10): + """ + Map all resource gids to exclusion gids + + Parameters + ---------- + max_workers : int, optional + Number of cores to run mapping on. None uses all available cpus, + by default None + points_per_worker : int, optional + Number of supply curve points to map to resource gids on each + worker, by default 10 + + Returns + ------- + indices : np.ndarray + Index values of the NN resource point. -1 if no res point found. + 2D integer array with shape equal to the exclusions extent shape. + """ + gid_chunks = ceil(len(self._gids) / points_per_worker) + gid_chunks = np.array_split(self._gids, gid_chunks) + + # init full output arrays + indices = -1 * np.ones((self._n_excl, ), dtype=np.int32) + iarr = self._make_excl_iarr(self._excl_shape) + + futures = {} + loggers = [__name__, 'reV'] + with SpawnProcessPool(max_workers=max_workers, + loggers=loggers) as exe: + + # iterate through split executions, submitting each to worker + for i, gid_set in enumerate(gid_chunks): + # submit executions and append to futures list + futures[exe.submit(self.map_resource_gids, + gid_set, + self._excl_fpath, + self._sc_row_indices, + self._sc_col_indices, + self._excl_row_slices, + self._excl_col_slices, + self._tree, + self.distance_threshold)] = i + + n_finished = 0 + for future in as_completed(futures): + n_finished += 1 + logger.info('Parallel TechMapping futures collected: ' + '{} out of {}' + .format(n_finished, len(futures))) + + i = futures[future] + result = future.result() + + for j, gid in enumerate(gid_chunks[i]): + row_slice, col_slice = self._get_excl_slices( + gid, + self._sc_row_indices, + self._sc_col_indices, + self._excl_row_slices, + self._excl_col_slices) + ind_slice = iarr[row_slice, col_slice].flatten() + indices[ind_slice] = result[j] + + indices = indices.reshape(self._excl_shape) + + return indices
+ +
[docs] @classmethod + def run(cls, excl_fpath, res_fpath, dset=None, sc_resolution=2560, + dist_margin=1.05, max_workers=None, points_per_worker=10): + """Run parallel mapping and save to h5 file. + + Parameters + ---------- + excl_fpath : str + Filepath to exclusions h5 (tech layer). dset will be + created in excl_fpath. + res_fpath : str + Filepath to .h5 resource file that we're mapping to. + dset : str, optional + Dataset name in excl_fpath to save mapping results to, if None + do not save tech_map to excl_fpath, by default None + sc_resolution : int | None, optional + Supply curve resolution, does not affect the exclusion to resource + (tech) mapping, but defines how many exclusion pixels are mapped + at a time, by default 2560 + dist_margin : float, optional + Extra margin to multiply times the computed distance between + neighboring resource points, by default 1.05 + max_workers : int, optional + Number of cores to run mapping on. None uses all available cpus, + by default None + points_per_worker : int, optional + Number of supply curve points to map to resource gids on each + worker, by default 10 + + Returns + ------- + indices : np.ndarray + Index values of the NN resource point. -1 if no res point found. + 2D integer array with shape equal to the exclusions extent shape. + """ + kwargs = {"dist_margin": dist_margin, + "sc_resolution": sc_resolution} + mapper = cls(excl_fpath, res_fpath, **kwargs) + indices = mapper.map_resource(max_workers=max_workers, + points_per_worker=points_per_worker) + + if dset: + mapper.save_tech_map(excl_fpath, dset, indices, + distance_threshold=mapper.distance_threshold, + res_fpath=res_fpath) + + return indices
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/utilities.html b/_modules/reV/utilities.html new file mode 100644 index 000000000..7adaddfcc --- /dev/null +++ b/_modules/reV/utilities.html @@ -0,0 +1,689 @@ + + + + + + reV.utilities — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.utilities

+# -*- coding: utf-8 -*-
+"""
+reV utilities.
+"""
+from enum import Enum
+import PySAM
+from rex.utilities.loggers import log_versions as rex_log_versions
+from reV.version import __version__
+
+
+
[docs]class ModuleName(str, Enum): + """A collection of the module names available in reV. + + Each module name should match the name of the click command + that will be used to invoke its respective cli. As of 3/1/2022, + this means that all commands are lowercase with underscores + replaced by dashes. + + Reference + --------- + See this line in the click source code to get the most up-to-date + click name conversions: https://tinyurl.com/4rehbsvf + """ + + BESPOKE = 'bespoke' + COLLECT = 'collect' + ECON = 'econ' + GENERATION = 'generation' + HYBRIDS = 'hybrids' + MULTI_YEAR = 'multi-year' + NRWAL = 'nrwal' + QA_QC = 'qa-qc' + REP_PROFILES = 'rep-profiles' + SUPPLY_CURVE = 'supply-curve' + SUPPLY_CURVE_AGGREGATION = 'supply-curve-aggregation' + + def __str__(self): + return self.value + + def __format__(self, format_spec): + return str.__format__(self.value, format_spec) + +
[docs] @classmethod + def all_names(cls): + """All module names. + + Returns + ------- + set + The set of all module name strings. + """ + # pylint: disable=no-member + return {v.value for v in cls.__members__.values()}
+ + +
[docs]def log_versions(logger): + """Log package versions: + - rex and reV to info + - h5py, numpy, pandas, scipy, and PySAM to debug + + Parameters + ---------- + logger : logging.Logger + Logger object to log memory message to. + """ + logger.info('Running with reV version {}'.format(__version__)) + rex_log_versions(logger) + logger.debug('- PySAM version {}'.format(PySAM.__version__))
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/utilities/cli_functions.html b/_modules/reV/utilities/cli_functions.html new file mode 100644 index 000000000..a73cd7ae8 --- /dev/null +++ b/_modules/reV/utilities/cli_functions.html @@ -0,0 +1,739 @@ + + + + + + reV.utilities.cli_functions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.utilities.cli_functions

+# -*- coding: utf-8 -*-
+"""
+General CLI utility functions.
+"""
+import logging
+from warnings import warn
+
+from gaps.pipeline import Status
+from rex.utilities.loggers import init_mult
+
+from reV.utilities import ModuleName
+from reV.utilities.exceptions import ConfigWarning, PipelineError
+
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]def init_cli_logging(name, log_directory, verbose): + """Initialize CLI logger + + Parameters + ---------- + name : str + The name to use for the log file written to disk. + log_directory : str + Path to log file output directory. + verbose : bool + Option to make logger verbose (DEBUG). + """ + init_mult(name, log_directory, modules=['reV', 'rex'], verbose=verbose) + logger.info("Initialized reV/rex {}loggers with name {!r} and log " + "directory {!r}" + .format("verbose " if verbose else "", name, + str(log_directory)))
+ + +
[docs]def format_analysis_years(analysis_years=None): + """Format user's analysis_years input + + Parameters + ---------- + analysis_years : int | str | list, optional + Years to run reV analysis on. Can be an integer or string, or a + list of integers or strings (or ``None``). This input will get + converted to a list of values automatically. If ``None``, a + ``ConfigWarning`` will be thrown. By default, ``None``. + + Returns + ------- + list + List of analysis years. This list will never be empty, but it + can contain ``None`` as the only value. + """ + + if not isinstance(analysis_years, list): + analysis_years = [analysis_years] + + if analysis_years[0] is None: + warn('Years may not have been specified, may default ' + 'to available years in inputs files.', ConfigWarning) + + return analysis_years
+ + +
[docs]def parse_from_pipeline(config, out_dir, config_key, target_modules): + """Parse the out file from target modules and set as the values for key. + + This function only updates the ``config_key`` input if it is set to + ``"PIPELINE"``. + + Parameters + ---------- + config : dict + Configuration dictionary. The ``config_key`` will be updated in + this dictionary if it is set to ``"PIPELINE"``. + out_dir : str + Path to pipeline project directory where config and status files + are located. The status file is expected to be in this + directory. + config_key : str + Key in config files to replace with ``"out_file"`` value(s) from + previous pipeline step. + target_modules : list of str | list of `ModuleName` + List of (previous) target modules to parse for the + ``config_key``. + + Returns + ------- + dict + Input config dictionary with updated ``config_key`` input. + + Raises + ------ + PipelineError + If ``"out_file"`` not found in previous target module status + files. + """ + if config.get(config_key, None) == 'PIPELINE': + for target_module in target_modules: + gen_config_key = "gen" in config_key + module_sca = target_module == ModuleName.SUPPLY_CURVE_AGGREGATION + if gen_config_key and module_sca: + target_key = "gen_fpath" + else: + target_key = "out_file" + val = Status.parse_step_status(out_dir, target_module, target_key) + if len(val) == 1: + break + else: + raise PipelineError('Could not parse {} from previous ' + 'pipeline jobs.'.format(config_key)) + + config[config_key] = val[0] + logger.info('Config using the following pipeline input for {}: {}' + .format(config_key, val[0])) + + return config
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/utilities/curtailment.html b/_modules/reV/utilities/curtailment.html new file mode 100644 index 000000000..e5ad0310c --- /dev/null +++ b/_modules/reV/utilities/curtailment.html @@ -0,0 +1,751 @@ + + + + + + reV.utilities.curtailment — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.utilities.curtailment

+# -*- coding: utf-8 -*-
+"""Curtailment utility methods.
+
+Created on Fri Mar  1 13:47:30 2019
+
+@author: gbuster
+"""
+import datetime
+import logging
+import numpy as np
+import pandas as pd
+from warnings import warn
+
+from reV.utilities.exceptions import HandlerWarning
+
+from rex.utilities.solar_position import SolarPosition
+from rex.utilities.utilities import check_tz, get_lat_lon_cols
+
+logger = logging.getLogger(__name__)
+
+
+
[docs]def curtail(resource, curtailment, random_seed=0): + """Curtail the SAM wind resource object based on project points. + + Parameters + ---------- + resource : rex.sam_resource.SAMResource + SAM resource object for WIND resource. + curtailment : reV.config.curtailment.Curtailment + Curtailment config object. + random_seed : int | NoneType + Number to seed the numpy random number generator. Used to generate + reproducable psuedo-random results if the probability of curtailment + is not set to 1. Numpy random will be seeded with the system time if + this is None. + + Returns + ------- + resource : reV.handlers.sam_resource.SAMResource + Same as the input argument but with the wind speed dataset set to zero + where curtailment is in effect. + """ + + shape = resource.shape + + # start with curtailment everywhere + curtail_mult = np.zeros(shape) + + if curtailment.date_range is not None: + year = resource.time_index.year[0] + d0 = pd.to_datetime(datetime.datetime( + month=int(curtailment.date_range[0][:2]), + day=int(curtailment.date_range[0][2:]), + year=year), utc=True) + d1 = pd.to_datetime(datetime.datetime( + month=int(curtailment.date_range[1][:2]), + day=int(curtailment.date_range[1][2:]), + year=year), utc=True) + time_index = check_tz(resource.time_index) + mask = (time_index >= d0) & (time_index < d1) + mask = np.tile(np.expand_dims(mask, axis=1), shape[1]) + curtail_mult = np.where(mask, curtail_mult, 1) + + elif curtailment.months is not None: + # Curtail resource when in curtailment months + mask = np.isin(resource.time_index.month, curtailment.months) + mask = np.tile(np.expand_dims(mask, axis=1), shape[1]) + curtail_mult = np.where(mask, curtail_mult, 1) + + else: + msg = ('You must specify either months or date_range over ' + 'which curtailment is possible!') + logger.error(msg) + raise KeyError(msg) + + # Curtail resource when curtailment is possible and is nighttime + lat_lon_cols = get_lat_lon_cols(resource.meta) + solar_zenith_angle = SolarPosition( + resource.time_index, + resource.meta[lat_lon_cols].values).zenith + mask = (solar_zenith_angle > curtailment.dawn_dusk) + curtail_mult = np.where(mask, curtail_mult, 1) + + # Curtail resource when curtailment is possible and not raining + if curtailment.precipitation is not None: + if 'precipitationrate' not in resource._res_arrays: + warn('Curtailment has a precipitation threshold of "{}", but ' + '"precipitationrate" was not found in the SAM resource ' + 'variables. The following resource variables were ' + 'available: {}.' + .format(curtailment.precipitation, + list(resource._res_arrays.keys())), + HandlerWarning) + else: + mask = (resource._res_arrays['precipitationrate'] + < curtailment.precipitation) + curtail_mult = np.where(mask, curtail_mult, 1) + + # Curtail resource when curtailment is possible and temperature is high + if curtailment.temperature is not None: + mask = (resource._res_arrays['temperature'] + > curtailment.temperature) + curtail_mult = np.where(mask, curtail_mult, 1) + + # Curtail resource when curtailment is possible and not that windy + if curtailment.wind_speed is not None: + mask = (resource._res_arrays['windspeed'] + < curtailment.wind_speed) + curtail_mult = np.where(mask, curtail_mult, 1) + + if curtailment.equation is not None: + # pylint: disable=W0123,W0612 + wind_speed = resource._res_arrays['windspeed'] + temperature = resource._res_arrays['temperature'] + if 'precipitationrate' in resource._res_arrays: + precipitation_rate = resource._res_arrays['precipitationrate'] + mask = eval(curtailment.equation) + curtail_mult = np.where(mask, curtail_mult, 1) + + # Apply probability mask when curtailment is possible. + if curtailment.probability != 1: + np.random.seed(seed=random_seed) + mask = np.random.rand(shape[0], shape[1]) < curtailment.probability + curtail_mult = np.where(mask, curtail_mult, 1) + + # Apply curtailment multiplier directly to resource + resource.curtail_windspeed(resource.sites, curtail_mult) + + return resource
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/utilities/exceptions.html b/_modules/reV/utilities/exceptions.html new file mode 100644 index 000000000..dfcf45e89 --- /dev/null +++ b/_modules/reV/utilities/exceptions.html @@ -0,0 +1,874 @@ + + + + + + reV.utilities.exceptions — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.utilities.exceptions

+# -*- coding: utf-8 -*-
+"""
+Custom Exceptions and Errors for reV
+"""
+
+
+
[docs]class reVError(Exception): + """ + Generic Error for reV + """
+ + +
[docs]class ConfigError(Exception): + """ + Error for bad configuration inputs + """
+ + +
[docs]class InputError(Exception): + """ + Error during input checks. + """
+ + +
[docs]class FileInputError(Exception): + """ + Error during input file checks. + """
+ + +
[docs]class JSONError(Exception): + """ + Error reading json file. + """
+ + +
[docs]class ExecutionError(Exception): + """ + Error for execution failure + """
+ + +
[docs]class PipelineError(Exception): + """ + Error for pipeline execution failure + """
+ + +
[docs]class HandlerKeyError(Exception): + """ + KeyError for Handlers + """
+ + +
[docs]class HandlerRuntimeError(Exception): + """ + RuntimeError for Handlers + """
+ + +
[docs]class HandlerValueError(Exception): + """ + ValueError for Handlers + """
+ + +
[docs]class MultiFileExclusionError(Exception): + """ + Error for bad multi file exclusion inputs. + """
+ + +
[docs]class CollectionValueError(HandlerValueError): + """ + ValueError for collection handler. + """
+ + +
[docs]class CollectionRuntimeError(HandlerRuntimeError): + """ + RuntimeError for collection handler. + """
+ + +
[docs]class ResourceError(Exception): + """ + Error for poorly formatted resource. + """
+ + +
[docs]class PySAMVersionError(Exception): + """ + Version error for SAM installation + """
+ + +
[docs]class SAMExecutionError(Exception): + """ + Execution error for SAM simulations + """
+ + +
[docs]class SAMInputError(Exception): + """ + Input error for SAM simulations + """
+ + +
[docs]class reVLossesValueError(ValueError): + """Value Error for reV losses module. """
+ + +
[docs]class SupplyCurveError(Exception): + """ + Execution error for SAM simulations + """
+ + +
[docs]class EmptySupplyCurvePointError(SupplyCurveError): + """ + Execution error for SAM simulations + """
+ + +
[docs]class SupplyCurveInputError(SupplyCurveError): + """ + Execution error for SAM simulations + """
+ + +
[docs]class NearestNeighborError(Exception): + """ + Execution error for bad nearest neighbor mapping results. + """
+ + +
[docs]class DataShapeError(Exception): + """ + Error with mismatched data shapes. + """
+ + +
[docs]class ExclusionLayerError(Exception): + """ + Error with bad exclusion data + """
+ + +
[docs]class ProjectPointsValueError(Exception): + """ + Error for bad ProjectPoints CLI values + """
+ + +
[docs]class OffshoreWindInputError(Exception): + """ + Error for bad offshore wind inputs + """
+ + +
[docs]class WhileLoopPackingError(Exception): + """ + Error for stuck in while loop while packing + """
+ + +
[docs]class OutputWarning(Warning): + """ + Warning for suspect output files or data + """
+ + +
[docs]class ExtrapolationWarning(Warning): + """ + Warning for when value will be extrapolated + """
+ + +
[docs]class InputWarning(Warning): + """ + Warning for unclear or default configuration inputs + """
+ + +
[docs]class OffshoreWindInputWarning(Warning): + """ + Warning for potentially dangerous offshore wind inputs + """
+ + +
[docs]class ConfigWarning(Warning): + """ + Warning for unclear or default configuration inputs + """
+ + +
[docs]class SAMInputWarning(Warning): + """ + Warning for bad SAM inputs + """
+ + +
[docs]class SAMExecutionWarning(Warning): + """ + Warning for problematic SAM execution + """
+ + +
[docs]class PySAMVersionWarning(Warning): + """ + Version warning for SAM installation + """
+ + +
[docs]class reVLossesWarning(Warning): + """Warning for reV losses module. """
+ + +
[docs]class ParallelExecutionWarning(Warning): + """ + Warning for parallel job execution. + """
+ + +
[docs]class SlurmWarning(Warning): + """ + Warning for SLURM errors/warnings + """
+ + +
[docs]class HandlerWarning(Warning): + """ + Warning during .h5 handling + """
+ + +
[docs]class CollectionWarning(Warning): + """ + Warning during .h5 collection + """
+ + +
[docs]class FileInputWarning(Warning): + """ + Warning during input file checks. + """
+ + +
[docs]class reVDeprecationWarning(Warning): + """ + Warning of deprecated feature. + """
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/utilities/pytest_utils.html b/_modules/reV/utilities/pytest_utils.html new file mode 100644 index 000000000..7894880b3 --- /dev/null +++ b/_modules/reV/utilities/pytest_utils.html @@ -0,0 +1,721 @@ + + + + + + reV.utilities.pytest_utils — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.utilities.pytest_utils

+# -*- coding: utf-8 -*-
+"""Functions used for pytests"""
+
+import numpy as np
+import os
+import pandas as pd
+from packaging import version
+from rex.outputs import Outputs as RexOutputs
+
+
+
[docs]def pd_date_range(*args, **kwargs): + """A simple wrapper on the pd.date_range() method that handles the closed + vs. inclusive kwarg change in pd 1.4.0""" + incl = version.parse(pd.__version__) >= version.parse('1.4.0') + + if incl and 'closed' in kwargs: + kwargs['inclusive'] = kwargs.pop('closed') + elif not incl and 'inclusive' in kwargs: + kwargs['closed'] = kwargs.pop('inclusive') + if kwargs['closed'] == 'both': + kwargs['closed'] = None + + return pd.date_range(*args, **kwargs)
+ + +
[docs]def write_chunk(meta, times, data, features, out_file): + """Write data chunk to an h5 file + + Parameters + ---------- + meta : dict + Dictionary of meta data for this chunk. Includes flattened lat and lon + arrays + times : pd.DatetimeIndex + times in this chunk + features : list + List of feature names in this chunk + out_file : str + Name of output file + """ + with RexOutputs(out_file, 'w') as fh: + fh.meta = meta + fh.time_index = times + for feature in features: + flat_data = data.reshape((-1, len(times))) + flat_data = np.transpose(flat_data, (1, 0)) + fh.add_dataset(out_file, feature, flat_data, dtype=np.float32)
+ + +
[docs]def make_fake_h5_chunks(td, features, shuffle=False): + """Make fake h5 chunks to test collection + + Parameters + ---------- + td : tempfile.TemporaryDirectory + Test TemporaryDirectory + features : list + List of dsets to write to chunks + shuffle : bool + Whether to shuffle gids + + Returns + ------- + out_pattern : str + Pattern for output file names + data : ndarray + Full non-chunked data array + features : list + List of feature names in output + s_slices : list + List of spatial slices used to chunk full data array + times : pd.DatetimeIndex + Times in output + """ + shape = (50, 50, 48) + data = np.random.uniform(0, 20, shape) + lat = np.linspace(90, 0, 50) + lon = np.linspace(-180, 0, 50) + lon, lat = np.meshgrid(lon, lat) + gids = np.arange(np.product(lat.shape)) + if shuffle: + np.random.shuffle(gids) + gids = gids.reshape(shape[:-1]) + times = pd_date_range('20220101', '20220103', freq='3600s', + inclusive='left') + s_slices = [slice(0, 25), slice(25, 50)] + out_pattern = os.path.join(td, 'chunks_{i}_{j}.h5') + + for i, s1 in enumerate(s_slices): + for j, s2 in enumerate(s_slices): + out_file = out_pattern.format(i=i, j=j) + meta = pd.DataFrame({'latitude': lat[s1, s2].flatten(), + 'longitude': lon[s1, s2].flatten(), + 'gid': gids[s1, s2].flatten()}) + write_chunk(meta=meta, times=times, data=data[s1, s2], + features=features, out_file=out_file) + + out = (out_pattern.format(i='*', j='*'), data, features, s_slices, times) + return out
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/reV/utilities/slots.html b/_modules/reV/utilities/slots.html new file mode 100644 index 000000000..d352bd8ce --- /dev/null +++ b/_modules/reV/utilities/slots.html @@ -0,0 +1,705 @@ + + + + + + reV.utilities.slots — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for reV.utilities.slots

+# -*- coding: utf-8 -*-
+"""Slotted memory framework classes."""
+
+
+
[docs]class SlottedDict: + """Slotted memory dictionary emulator.""" + + # make attribute slots for all dictionary keys + __slots__ = ['var_list'] + + def __init__(self): + self.var_list = [] + + def __setitem__(self, key, value): + """Send data to a slot. Raise KeyError if key is not recognized""" + if key in self.__slots__: + if key not in self.var_list: + self.var_list.append(key) + setattr(self, key, value) + else: + raise KeyError('Could not save "{}" to slotted dictionary. ' + 'The following output variable slots are ' + 'available: {}'.format(key, self.__slots__)) + + def __getitem__(self, key): + """Retrieve data from slot. Raise KeyError if key is not recognized""" + if key in self.var_list: + return getattr(self, key) + else: + raise KeyError('Variable "{}" has not been saved to this slotted ' + 'dictionary instance. Saved variables are: {}' + .format(key, self.keys())) + +
[docs] def update(self, slotted_dict): + """Add output variables from another instance into this instance. + + Parameters + ---------- + slotted_dict : SlottedDict + An different instance of this class (slotted dictionary class) to + merge into this instance. Variable data in this instance could be + overwritten by the new data. + """ + + attrs = slotted_dict.var_list + for attr in attrs: + if attr in self.__slots__: + value = getattr(slotted_dict, attr, None) + if value is not None: + self[attr] = value
+ +
[docs] def items(self): + """Get an items iterator similar to a dictionary. + + Parameters + ---------- + items : iterator + [key, value] iterator similar to the output of dict.items() + """ + + keys = self.keys() + values = self.values() + return zip(keys, values)
+ +
[docs] def keys(self): + """Get a keys list similar to a dictionary. + + Parameters + ---------- + key : list + List of slotted variable names that have been set. + """ + return self.var_list
+ +
[docs] def values(self): + """Get a values list similar to a dictionary. + + Parameters + ---------- + values : list + List of slotted variable values that have been set. + """ + return [self[k] for k in self.var_list]
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_modules/rex/outputs.html b/_modules/rex/outputs.html new file mode 100644 index 000000000..0d319d73a --- /dev/null +++ b/_modules/rex/outputs.html @@ -0,0 +1,1642 @@ + + + + + + rex.outputs — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +

Source code for rex.outputs

+# -*- coding: utf-8 -*-
+"""
+Classes to handle h5 output files.
+"""
+import json
+import logging
+import numpy as np
+import pandas as pd
+import time
+import sys
+import click
+import h5py
+import h5pyd
+import scipy
+
+from rex.version import __version__
+from rex.utilities.exceptions import (HandlerRuntimeError, HandlerValueError,
+                                      ResourceKeyError)
+from rex.resource import BaseResource
+from rex.utilities.parse_keys import parse_keys, parse_slice
+from rex.utilities.utilities import to_records_array
+
+logger = logging.getLogger(__name__)
+
+
+class Outputs(BaseResource):
+    """
+    Base class to handle output data in .h5 format
+
+    Examples
+    --------
+    The Outputs handler can be used to initialize h5 files in the standard
+    reV/rex resource data format.
+
+    >>> from rex import Outputs
+    >>> import pandas as pd
+    >>> import numpy as np
+    >>>
+    >>> meta = pd.DataFrame({'latitude': np.ones(100),
+    >>>                      'longitude': np.ones(100)})
+    >>>
+    >>> time_index = pd.date_range('20210101', '20220101', freq='1h',
+    >>>                            closed='right')
+    >>>
+    >>> with Outputs('test.h5', 'w') as f:
+    >>>     f.meta = meta
+    >>>     f.time_index = time_index
+
+    You can also use the Outputs handler to read output h5 files from disk.
+    The Outputs handler will automatically parse the meta data and time index
+    into the expected pandas objects (DataFrame and DatetimeIndex,
+    respectively).
+
+    >>> with Outputs('test.h5') as f:
+    >>>     print(f.meta.head())
+    >>>
+         latitude  longitude
+    gid
+    0         1.0        1.0
+    1         1.0        1.0
+    2         1.0        1.0
+    3         1.0        1.0
+    4         1.0        1.0
+
+    >>> with Outputs('test.h5') as f:
+    >>>     print(f.time_index)
+    DatetimeIndex(['2021-01-01 01:00:00+00:00', '2021-01-01 02:00:00+00:00',
+                   '2021-01-01 03:00:00+00:00', '2021-01-01 04:00:00+00:00',
+                   '2021-01-01 05:00:00+00:00', '2021-01-01 06:00:00+00:00',
+                   '2021-01-01 07:00:00+00:00', '2021-01-01 08:00:00+00:00',
+                   '2021-01-01 09:00:00+00:00', '2021-01-01 10:00:00+00:00',
+                   ...
+                   '2021-12-31 15:00:00+00:00', '2021-12-31 16:00:00+00:00',
+                   '2021-12-31 17:00:00+00:00', '2021-12-31 18:00:00+00:00',
+                   '2021-12-31 19:00:00+00:00', '2021-12-31 20:00:00+00:00',
+                   '2021-12-31 21:00:00+00:00', '2021-12-31 22:00:00+00:00',
+                   '2021-12-31 23:00:00+00:00', '2022-01-01 00:00:00+00:00'],
+                  dtype='datetime64[ns, UTC]', length=8760, freq=None)
+
+    There are a few ways to use the Outputs handler to write data to a file.
+    Here is one example using the pre-initialized file we created earlier.
+    Note that the Outputs handler will automatically scale float data using
+    the "scale_factor" attribute. The Outputs handler will unscale the data
+    while being read unless the unscale kwarg is explicityly set to False.
+    This behavior is intended to reduce disk storage requirements for big
+    data and can be disabled by setting dtype=np.float32 or dtype=np.float64
+    when writing data.
+
+    >>> Outputs.add_dataset(h5_file='test.h5', dset_name='dset1',
+    >>>                     dset_data=np.ones((8760, 100)) * 42.42,
+    >>>                     attrs={'scale_factor': 100}, dtype=np.int32)
+
+
+    >>> with Outputs('test.h5') as f:
+    >>>     print(f['dset1'])
+    >>>     print(f['dset1'].dtype)
+    [[42.42 42.42 42.42 ... 42.42 42.42 42.42]
+     [42.42 42.42 42.42 ... 42.42 42.42 42.42]
+     [42.42 42.42 42.42 ... 42.42 42.42 42.42]
+     ...
+     [42.42 42.42 42.42 ... 42.42 42.42 42.42]
+     [42.42 42.42 42.42 ... 42.42 42.42 42.42]
+     [42.42 42.42 42.42 ... 42.42 42.42 42.42]]
+    float32
+
+    >>> with Outputs('test.h5', unscale=False) as f:
+    >>>     print(f['dset1'])
+    >>>     print(f['dset1'].dtype)
+    [[4242 4242 4242 ... 4242 4242 4242]
+     [4242 4242 4242 ... 4242 4242 4242]
+     [4242 4242 4242 ... 4242 4242 4242]
+     ...
+     [4242 4242 4242 ... 4242 4242 4242]
+     [4242 4242 4242 ... 4242 4242 4242]
+     [4242 4242 4242 ... 4242 4242 4242]]
+    int32
+
+    Note that the Outputs handler is specifically designed to read and
+    write spatiotemporal data. It is therefore important to intialize the meta
+    data and time index objects even if your data is only spatial or only
+    temporal. Furthermore, the Outputs handler will always assume that 1D
+    datasets represent scalar data (non-timeseries) that corresponds to the
+    meta data shape, and that 2D datasets represent spatiotemporal data whose
+    shape corresponds to (len(time_index), len(meta)). You can see these
+    constraints here:
+
+    >>> Outputs.add_dataset(h5_file='test.h5', dset_name='bad_shape',
+                            dset_data=np.ones((1, 100)) * 42.42,
+                            attrs={'scale_factor': 100}, dtype=np.int32)
+    HandlerValueError: 2D data with shape (1, 100) is not of the proper
+    spatiotemporal shape: (8760, 100)
+
+    >>> Outputs.add_dataset(h5_file='test.h5', dset_name='bad_shape',
+                            dset_data=np.ones((8760,)) * 42.42,
+                            attrs={'scale_factor': 100}, dtype=np.int32)
+    HandlerValueError: 1D data with shape (8760,) is not of the proper
+    spatial shape: (100,)
+    """
+
+    def __init__(self, h5_file, mode='r', unscale=True, str_decode=True,
+                 group=None):
+        """
+        Parameters
+        ----------
+        h5_file : str
+            Path to .h5 resource file
+        mode : str, optional
+            Mode to instantiate h5py.File instance, by default 'r'
+        unscale : bool, optional
+            Boolean flag to automatically unscale variables on extraction,
+            by default True
+        str_decode : bool, optional
+            Boolean flag to decode the bytestring meta data into normal
+            strings. Setting this to False will speed up the meta data read,
+            by default True
+        group : str, optional
+            Group within .h5 resource file to open, by default None
+        """
+        super().__init__(h5_file, unscale=unscale, hsds=False,
+                         str_decode=str_decode, group=group, mode=mode)
+        self._mode = mode
+        self._group = self._check_group(group)
+        self._shape = None
+
+        if self.writable:
+            self.set_version_attr()
+
+    def __len__(self):
+        _len = 0
+        if 'meta' in self.datasets:
+            _len = self.h5['meta'].shape[0]
+
+        return _len
+
+    def __setitem__(self, keys, arr):
+        if self.writable:
+            ds, ds_slice = parse_keys(keys)
+
+            slice_test = False
+            if isinstance(ds_slice, tuple):
+                slice_test = ds_slice[0] == slice(None, None, None)
+
+            if ds.endswith('meta') and slice_test:
+                self._set_meta(ds, arr)
+            elif ds.endswith('time_index') and slice_test:
+                self._set_time_index(ds, arr)
+            else:
+                self._set_ds_array(ds, arr, ds_slice)
+
+    @property
+    def full_version_record(self):
+        """Get record of versions for dependencies
+
+        Returns
+        -------
+        dict
+            Dictionary of package versions for dependencies
+        """
+        versions = {'rex': __version__,
+                    'pandas': pd.__version__,
+                    'numpy': np.__version__,
+                    'python': sys.version,
+                    'click': click.__version__,
+                    'h5py': h5py.__version__,
+                    'h5pyd': h5pyd.__version__,
+                    'scipy': scipy.__version__
+                    }
+        return versions
+
+    def set_version_attr(self):
+        """Set the version attribute to the h5 file."""
+        self.h5.attrs['version'] = __version__
+        self.h5.attrs['full_version_record'] = json.dumps(
+            self.full_version_record)
+        self.h5.attrs['package'] = 'rex'
+
+    @property
+    def version(self):
+        """
+        Version of package used to create file
+
+        Returns
+        -------
+        str
+        """
+        return self.h5.attrs['version']
+
+    @property
+    def package(self):
+        """
+        Package used to create file
+
+        Returns
+        -------
+        str
+        """
+        return self.h5.attrs['package']
+
+    @property
+    def source(self):
+        """
+        Package and version used to create file
+
+        Returns
+        -------
+        str
+        """
+        out = ("{}_{}"
+               .format(self.h5.attrs['package'], self.h5.attrs['version']))
+        return out
+
+    @property
+    def shape(self):
+        """
+        Variable array shape from time_index and meta
+
+        Returns
+        -------
+        tuple
+            shape of variables arrays == (time, locations)
+        """
+        if self._shape is None:
+            dsets = self.datasets
+            if 'meta' in dsets:
+                self._shape = self.h5['meta'].shape
+                if 'time_index' in dsets:
+                    self._shape = self.h5['time_index'].shape + self._shape
+
+        return self._shape
+
+    @property
+    def writable(self):
+        """
+        Check to see if h5py.File instance is writable
+
+        Returns
+        -------
+        is_writable : bool
+            Flag if mode is writable
+        """
+        is_writable = True
+        mode = ['a', 'w', 'w-', 'x']
+        if self._mode not in mode:
+            is_writable = False
+
+        return is_writable
+
+    @BaseResource.meta.setter  # pylint: disable-msg=E1101
+    def meta(self, meta):
+        """
+        Write meta data to disk, convert type if neccessary
+
+        Parameters
+        ----------
+        meta : pandas.DataFrame | numpy.recarray
+            Locational meta data
+        """
+        self._set_meta('meta', meta)
+
+    @BaseResource.time_index.setter  # pylint: disable-msg=E1101
+    def time_index(self, time_index):
+        """
+        Write time_index to dics, convert type if neccessary
+
+        Parameters
+        ----------
+        time_index : pandas.DatetimeIndex | ndarray
+            Temporal index of timesteps
+        """
+        self._set_time_index('time_index', time_index)
+
+    @property
+    def SAM_configs(self):
+        """
+        SAM configuration JSONs used to create CF profiles
+
+        Returns
+        -------
+        configs : dict
+            Dictionary of SAM configuration JSONs
+        """
+        if 'meta' in self.datasets:
+            configs = {k: json.loads(v)
+                       for k, v in self.h5['meta'].attrs.items()}
+        else:
+            configs = {}
+
+        return configs
+
+    @property
+    def run_attrs(self):
+        """
+        Runtime attributes stored at the global (file) level
+
+        Returns
+        -------
+        global_attrs : dict
+        """
+        return self.global_attrs
+
+    @run_attrs.setter
+    def run_attrs(self, run_attrs):
+        """
+        Set runtime attributes as global (file) attributes
+
+        Parameters
+        ----------
+        run_attrs : dict
+            Dictionary of runtime attributes (args, kwargs)
+        """
+        if self.writable:
+            for k, v in run_attrs.items():
+                self.h5.attrs[k] = v
+
+    @staticmethod
+    def _check_data_dtype(dset_name, data, dtype, attrs=None):
+        """
+        Check data dtype and scale if needed
+
+        Parameters
+        ----------
+        dset_name : str
+            Name of dataset being written to disk
+        data : ndarray
+            Data to be written to disc
+        dtype : str
+            dtype of data on disc
+        attrs : dict, optional
+            Attributes to be set. May include 'scale_factor',
+            by default None
+
+        Returns
+        -------
+        data : ndarray
+            Data ready for writing to disc:
+            - Scaled and converted to dtype
+        """
+        if attrs is None:
+            attrs = {}
+
+        scale_factor = attrs.get('scale_factor', None)
+
+        scale = (scale_factor is not None
+                 and not np.issubdtype(data.dtype, np.integer))
+        if scale:
+            if scale_factor != 1 and not np.issubdtype(dtype, np.integer):
+                msg = ('Output dtype for "{}" must be an integer in '
+                       'order to apply scale factor {}".'
+                       .format(dset_name, scale_factor))
+                logger.error(msg)
+                raise HandlerRuntimeError(msg)
+
+            data_type_differs = not np.issubdtype(data.dtype, np.dtype(dtype))
+            is_integer = np.issubdtype(dtype, np.integer)
+            if data_type_differs and is_integer:
+                # apply scale factor and dtype
+                data = np.round(data * scale_factor).astype(dtype)
+
+        elif (not np.issubdtype(data.dtype, np.dtype(dtype))
+                and not np.issubdtype(np.dtype(dtype), np.floating)):
+            msg = ('A scale_factor is needed to scale '
+                   '"{}" of type "{}" to "{}".'
+                   .format(dset_name, data.dtype, dtype))
+            raise HandlerRuntimeError(msg)
+
+        return data
+
+    def _check_group(self, group):
+        """
+        Ensure group is in .h5 file
+
+        Parameters
+        ----------
+        group : str
+            Group of interest
+        """
+        if group is not None:
+            if group not in self._h5:
+                try:
+                    if self.writable:
+                        self._h5.create_group(group)
+                except Exception as ex:
+                    msg = ('Cannot create group {}: {}'
+                           .format(group, ex))
+                    raise HandlerRuntimeError(msg) from ex
+
+        return group
+
+    def _set_meta(self, ds, meta, attrs=None):
+        """
+        Write meta data to disk
+
+        Parameters
+        ----------
+        ds : str
+            meta dataset name
+        meta : pandas.DataFrame | numpy.recarray
+            Locational meta data
+        attrs : dict
+            Attributes to add to the meta data dataset
+        """
+        # pylint: disable=attribute-defined-outside-init
+        self._meta = meta
+        if isinstance(meta, pd.DataFrame):
+            meta = to_records_array(meta)
+
+        if ds in self.datasets:
+            self.update_dset(ds, meta)
+        else:
+            self._create_dset(ds, meta.shape, meta.dtype, data=meta,
+                              attrs=attrs)
+
+    def _set_time_index(self, ds, time_index, attrs=None):
+        """
+        Write time index to disk
+
+        Parameters
+        ----------
+        ds : str
+            time index dataset name
+        time_index : pandas.DatetimeIndex | ndarray
+            Temporal index of timesteps
+        attrs : dict
+            Attributes to add to the meta data dataset
+        """
+        # pylint: disable=attribute-defined-outside-init
+        self._time_index = time_index
+        if isinstance(time_index, pd.DatetimeIndex):
+            time_index = time_index.astype(str)
+            dtype = "S{}".format(len(time_index[0]))
+            time_index = np.array(time_index, dtype=dtype)
+
+        if ds in self.datasets:
+            self.update_dset(ds, time_index)
+        else:
+            self._create_dset(ds, time_index.shape, time_index.dtype,
+                              data=time_index, attrs=attrs)
+
+
[docs] def get_config(self, config_name): + """ + Get SAM config + + Parameters + ---------- + config_name : str + Name of config + + Returns + ------- + config : dict + SAM config JSON as a dictionary + """ + if 'meta' in self.datasets: + config = json.loads(self.h5['meta'].attrs[config_name]) + else: + config = None + + return config
+ +
[docs] def set_configs(self, SAM_configs): + """ + Set SAM configuration JSONs as attributes of 'meta' + + Parameters + ---------- + SAM_configs : dict + Dictionary of SAM configuration JSONs + """ + if self.writable: + for key, config in SAM_configs.items(): + if isinstance(config, dict): + config = json.dumps(config) + + if not isinstance(key, str): + key = str(key) + + self.h5['meta'].attrs[key] = config
+ + def _set_ds_array(self, ds_name, arr, ds_slice): + """ + Write ds to disk + + Parameters + ---------- + ds_name : str + Dataset name + arr : ndarray + Dataset data array + ds_slice : tuple + Dataset slicing that corresponds to arr + """ + if ds_name not in self.datasets: + msg = '{} must be initialized!'.format(ds_name) + raise HandlerRuntimeError(msg) + + dtype = self.h5[ds_name].dtype + attrs = self.get_attrs(ds_name) + ds_slice = parse_slice(ds_slice) + self.h5[ds_name][ds_slice] = self._check_data_dtype( + ds_name, arr, dtype, attrs=attrs) + + def _check_chunks(self, chunks, data=None): + """ + Convert dataset chunk size into valid tuple based on variable array + shape + + Parameters + ---------- + chunks : tuple + Desired dataset chunk size + data : ndarray + Dataset array being chunked + + Returns + ------- + ds_chunks : tuple | None + dataset chunk size + """ + if chunks is None: + return None + + if data is not None: + shape = data.shape + else: + shape = self.shape + + if len(shape) != len(chunks): + msg = ('Shape dimensions ({}) are not the same length as chunks ' + '({}). Please provide a single chunk value for each ' + 'dimension!' + .format(shape, chunks)) + logger.error(msg) + raise HandlerRuntimeError(msg) + + return tuple(np.min((s, s if c is None else c)) + for s, c in zip(shape, chunks)) + + def _create_dset(self, ds_name, shape, dtype, chunks=None, attrs=None, + data=None, replace=True): + """ + Initialize dataset + + Parameters + ---------- + ds_name : str + Dataset name + shape : tuple + Dataset shape + dtype : str + Dataset numpy dtype + chunks : tuple + Dataset chunk size + attrs : dict + Dataset attributes + data : ndarray + Dataset data array + replace : bool + If previous dataset exists with the same name, it will be replaced. + """ + ds = None + if self.writable: + if ds_name in self.datasets and replace: + del self.h5[ds_name] + + elif ds_name in self.datasets: + old_shape, old_dtype, _ = self.get_dset_properties(ds_name) + if old_shape != shape or old_dtype != dtype: + e = ('Trying to create dataset "{}", but already exists ' + 'with mismatched shape and dtype. New shape/dtype ' + 'is {}/{}, previous shape/dtype is {}/{}' + .format(ds_name, shape, dtype, old_shape, old_dtype)) + logger.error(e) + raise HandlerRuntimeError(e) + + if ds_name not in self.datasets: + chunks = self._check_chunks(chunks, data=data) + try: + ds = self.h5.create_dataset(ds_name, shape=shape, + dtype=dtype, chunks=chunks) + except Exception as e: + msg = ('Could not create dataset "{}" in file!' + .format(ds_name)) + logger.error(msg) + raise IOError(msg) from e + + if attrs is not None: + self._create_ds_attrs(ds, ds_name, attrs) + + if data is not None: + ds[...] = data + + @staticmethod + def _create_ds_attrs(ds, ds_name, attrs): + """Create dataset attributes. + + Parameters + ---------- + ds : h5py.Dataset + Dataset object to write attributes to. + ds_name : str + Dataset name for logging / debugging + attrs : dict | None + Dataset attributes to write (None if no attributes to write). + """ + if attrs is not None: + for key, value in attrs.items(): + try: + ds.attrs[key] = value + except Exception as e: + msg = ('Could not save datset "{}" attribute "{}" ' + 'to value: {}'.format(ds_name, key, value)) + logger.error(msg) + raise IOError(msg) from e + + def _check_dset_shape(self, dset_name, dset_data): + """ + Check to ensure that dataset array is of the proper shape + + Parameters + ---------- + dset_name : str + Dataset name being written to disk. + dset_data : ndarray + Dataset data array + """ + dset_shape = dset_data.shape + if len(dset_shape) == 1: + possible_shapes = {} + try: + possible_shapes["spatial"] = (len(self.meta),) + except ResourceKeyError: + pass + try: + possible_shapes["temporal"] = (len(self.time_index),) + except ResourceKeyError: + pass + + if not possible_shapes: + msg = ("Please load either 'meta' or 'time_index' before " + "loading a 1D dataset.") + logger.error(msg) + raise HandlerRuntimeError(msg) + + if dset_shape not in possible_shapes.values(): + possible_shapes_str = " or ".join(["{} {}".format(k, v) + for k, v + in possible_shapes.items()]) + msg = ('1D dataset "{}" with shape {} is not of ' + 'the proper {} shape!' + .format(dset_name, dset_shape, possible_shapes_str)) + logger.error(msg) + raise HandlerValueError(msg) + else: + shape = self.shape + if shape: + if dset_shape != shape: + msg = ('2D dataset "{}" with shape {} is not of the ' + 'proper spatiotemporal shape: {}' + .format(dset_name, dset_shape, shape)) + logger.error(msg) + raise HandlerValueError(msg) + else: + msg = ("'meta' and 'time_index' have not been loaded") + logger.error(msg) + raise HandlerRuntimeError(msg) + + def _add_dset(self, dset_name, data, dtype, chunks=None, attrs=None): + """ + Write dataset to disk. Dataset it created in .h5 file and data is + scaled if needed. + + Parameters + ---------- + dset_name : str + Name of dataset to be added to h5 file. + data : ndarray + Data to be added to h5 file. + dtype : str + Intended dataset datatype after scaling. + chunks : tuple + Chunk size for capacity factor means dataset. + attrs : dict + Attributes to be set. May include 'scale_factor'. + """ + self._check_dset_shape(dset_name, data) + + data = self._check_data_dtype(dset_name, data, dtype, attrs=attrs) + + self._create_dset(dset_name, data.shape, dtype, + chunks=chunks, attrs=attrs, data=data) + +
[docs] def update_dset(self, dset, dset_array, dset_slice=None): + """ + Check to see if dset needs to be updated on disk + If so write dset_array to disk + + Parameters + ---------- + dset : str + dataset to update + dset_array : ndarray + dataset array + dset_slice : tuple + slice of dataset to update, it None update all + """ + if dset_slice is None: + dset_slice = (slice(None, None, None), ) + + keys = (dset, ) + dset_slice + + arr = self.__getitem__(keys) + if not np.array_equal(arr, dset_array): + self._set_ds_array(dset, dset_array, dset_slice)
+ +
[docs] def write_dataset(self, dset_name, data, dtype, chunks=None, attrs=None): + """ + Write dataset to disk. Dataset it created in .h5 file and data is + scaled if needed. + + Parameters + ---------- + dset_name : str + Name of dataset to be added to h5 file. + data : ndarray + Data to be added to h5 file. + dtype : str + Intended dataset datatype after scaling. + chunks : tuple + Chunk size for capacity factor means dataset. + attrs : dict + Attributes to be set. May include 'scale_factor'. + """ + self._add_dset(dset_name, data, dtype, chunks=chunks, attrs=attrs)
+ +
[docs] @classmethod + def write_profiles(cls, h5_file, meta, time_index, dset_name, profiles, + dtype, attrs=None, SAM_configs=None, chunks=(None, 100), + unscale=True, mode='w-', str_decode=True, group=None): + """ + Write profiles to disk + + Parameters + ---------- + h5_file : str + Path to .h5 resource file + meta : pandas.Dataframe + Locational meta data + time_index : pandas.DatetimeIndex + Temporal timesteps + dset_name : str + Name of the target dataset (should identify the profiles). + profiles : ndarray + output result timeseries profiles + dtype : str + Intended dataset datatype after scaling. + attrs : dict, optional + Attributes to be set. May include 'scale_factor', by default None + SAM_configs : dict, optional + Dictionary of SAM configuration JSONs used to compute cf means, + by default None + chunks : tuple, optional + Chunk size for capacity factor means dataset, + by default (None, 100) + unscale : bool, optional + Boolean flag to automatically unscale variables on extraction, + by default True + mode : str, optional + Mode to instantiate h5py.File instance, by default 'w-' + str_decode : bool, optional + Boolean flag to decode the bytestring meta data into normal + strings. Setting this to False will speed up the meta data read, + by default True + group : str, optional + Group within .h5 resource file to open, by default None + """ + logger.info("Saving profiles ({}) to {}".format(dset_name, h5_file)) + if profiles.shape != (len(time_index), len(meta)): + raise HandlerValueError("Profile dimensions does not match" + "'time_index' and 'meta'") + ts = time.time() + kwargs = {"unscale": unscale, "mode": mode, "str_decode": str_decode, + "group": group} + with cls(h5_file, **kwargs) as f: + # Save time index + f['time_index'] = time_index + logger.debug("\t- 'time_index' saved to disc") + # Save meta + f['meta'] = meta + logger.debug("\t- 'meta' saved to disc") + # Add SAM configurations as attributes to meta + if SAM_configs is not None: + f.set_configs(SAM_configs) + logger.debug("\t- SAM configurations saved as attributes " + "on 'meta'") + + # Write dset to disk + f._add_dset(dset_name, profiles, dtype, + chunks=chunks, attrs=attrs) + logger.debug("\t- '{}' saved to disc".format(dset_name)) + + tt = (time.time() - ts) / 60 + logger.info('{} is complete'.format(h5_file)) + logger.debug('\t- Saving to disc took {:.4f} minutes' + .format(tt))
+ +
[docs] @classmethod + def write_means(cls, h5_file, meta, dset_name, means, dtype, attrs=None, + SAM_configs=None, chunks=None, unscale=True, mode='w-', + str_decode=True, group=None): + """ + Write means array to disk + + Parameters + ---------- + h5_file : str + Path to .h5 resource file + meta : pandas.Dataframe + Locational meta data + dset_name : str + Name of the target dataset (should identify the means). + means : ndarray + output means array. + dtype : str + Intended dataset datatype after scaling. + attrs : dict, optional + Attributes to be set. May include 'scale_factor', by default None + SAM_configs : dict, optional + Dictionary of SAM configuration JSONs used to compute cf means, + by default None + chunks : tuple, optional + Chunk size for capacity factor means dataset, by default None + unscale : bool, optional + Boolean flag to automatically unscale variables on extraction, + by default True + mode : str, optional + Mode to instantiate h5py.File instance, by default 'w-' + str_decode : bool, optional + Boolean flag to decode the bytestring meta data into normal + strings. Setting this to False will speed up the meta data read, + by default True + group : str, optional + Group within .h5 resource file to open, by default None + """ + logger.info("Saving means ({}) to {}".format(dset_name, h5_file)) + if len(means) != len(meta): + msg = 'Number of means does not match meta' + raise HandlerValueError(msg) + + ts = time.time() + kwargs = {"unscale": unscale, "mode": mode, "str_decode": str_decode, + "group": group} + with cls(h5_file, **kwargs) as f: + # Save meta + f['meta'] = meta + logger.debug("\t- 'meta' saved to disc") + # Add SAM configurations as attributes to meta + if SAM_configs is not None: + f.set_configs(SAM_configs) + logger.debug("\t- SAM configurations saved as attributes " + "on 'meta'") + + # Write dset to disk + f._add_dset(dset_name, means, dtype, + chunks=chunks, attrs=attrs) + logger.debug("\t- '{}' saved to disc".format(dset_name)) + + tt = (time.time() - ts) / 60 + logger.info('{} is complete'.format(h5_file)) + logger.debug('\t- Saving to disc took {:.4f} minutes' + .format(tt))
+ +
[docs] @classmethod + def add_dataset(cls, h5_file, dset_name, dset_data, dtype, attrs=None, + chunks=None, unscale=True, mode='a', str_decode=True, + group=None): + """ + Add dataset to h5_file + + Parameters + ---------- + h5_file : str + Path to .h5 resource file + dset_name : str + Name of dataset to be added to h5 file + dset_data : ndarray + Data to be added to h5 file + dtype : str + Intended dataset datatype after scaling. + attrs : dict, optional + Attributes to be set. May include 'scale_factor', by default None + unscale : bool, optional + Boolean flag to automatically unscale variables on extraction, + by default True + mode : str, optional + Mode to instantiate h5py.File instance, by default 'a' + str_decode : bool, optional + Boolean flag to decode the bytestring meta data into normal + strings. Setting this to False will speed up the meta data read, + by default True + group : str, optional + Group within .h5 resource file to open, by default None + """ + logger.info("Adding {} to {}".format(dset_name, h5_file)) + ts = time.time() + kwargs = {"unscale": unscale, "mode": mode, "str_decode": str_decode, + "group": group} + with cls(h5_file, **kwargs) as f: + f._add_dset(dset_name, dset_data, dtype, + chunks=chunks, attrs=attrs) + + tt = (time.time() - ts) / 60 + logger.info('{} added'.format(dset_name)) + logger.debug('\t- Saving to disc took {:.4f} minutes' + .format(tt))
+ +
[docs] @classmethod + def init_h5(cls, h5_file, dsets, shapes, attrs, chunks, dtypes, + meta, time_index=None, configs=None, unscale=True, mode='w', + str_decode=True, group=None, run_attrs=None): + """Init a full output file with the final intended shape without data. + + Parameters + ---------- + h5_file : str + Full h5 output filepath. + dsets : list + List of strings of dataset names to initialize (does not include + meta or time_index). + shapes : dict + Dictionary of dataset shapes (keys correspond to dsets). + attrs : dict + Dictionary of dataset attributes (keys correspond to dsets). + chunks : dict + Dictionary of chunk tuples (keys correspond to dsets). + dtypes : dict + dictionary of numpy datatypes (keys correspond to dsets). + meta : pd.DataFrame + Full meta data. + time_index : pd.datetimeindex | None + Full pandas datetime index. None implies that only 1D results + (no site profiles) are being written. + configs : dict | None + Optional input configs to set as attr on meta. + unscale : bool + Boolean flag to automatically unscale variables on extraction + mode : str + Mode to instantiate h5py.File instance + str_decode : bool + Boolean flag to decode the bytestring meta data into normal + strings. Setting this to False will speed up the meta data read. + group : str + Group within .h5 resource file to open + run_attrs : dict | NoneType + Runtime attributes (args, kwargs) to add as global (file) + attributes + """ + + logger.debug("Initializing output file: {}".format(h5_file)) + kwargs = {"unscale": unscale, "mode": mode, "str_decode": str_decode, + "group": group} + with cls(h5_file, **kwargs) as f: + if run_attrs is not None: + f.run_attrs = run_attrs + + f['meta'] = meta + + if time_index is not None: + f['time_index'] = time_index + + for dset in dsets: + if dset not in ('meta', 'time_index'): + # initialize each dset to disk + f._create_dset(dset, shapes[dset], dtypes[dset], + chunks=chunks[dset], attrs=attrs[dset]) + + if configs is not None: + f.set_configs(configs) + logger.debug("\t- Configurations saved as attributes " + "on 'meta'") + + logger.debug('Output file has been initialized.')
+
+ +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.SAM.RevPySam.rst.txt b/_sources/_autosummary/reV.SAM.SAM.RevPySam.rst.txt new file mode 100644 index 000000000..7cf954dbe --- /dev/null +++ b/_sources/_autosummary/reV.SAM.SAM.RevPySam.rst.txt @@ -0,0 +1,47 @@ +reV.SAM.SAM.RevPySam +==================== + +.. currentmodule:: reV.SAM.SAM + +.. autoclass:: RevPySam + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~RevPySam.assign_inputs + ~RevPySam.collect_outputs + ~RevPySam.default + ~RevPySam.drop_leap + ~RevPySam.ensure_res_len + ~RevPySam.execute + ~RevPySam.get_sam_res + ~RevPySam.get_time_interval + ~RevPySam.make_datetime + ~RevPySam.outputs_to_utc_arr + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~RevPySam.DIR + ~RevPySam.IGNORE_ATTRS + ~RevPySam.MODULE + ~RevPySam.attr_dict + ~RevPySam.input_list + ~RevPySam.meta + ~RevPySam.module + ~RevPySam.pysam + ~RevPySam.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.SAM.Sam.rst.txt b/_sources/_autosummary/reV.SAM.SAM.Sam.rst.txt new file mode 100644 index 000000000..a0532c6b7 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.SAM.Sam.rst.txt @@ -0,0 +1,35 @@ +reV.SAM.SAM.Sam +=============== + +.. currentmodule:: reV.SAM.SAM + +.. autoclass:: Sam + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Sam.assign_inputs + ~Sam.default + ~Sam.execute + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Sam.IGNORE_ATTRS + ~Sam.attr_dict + ~Sam.input_list + ~Sam.pysam + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.SAM.SamResourceRetriever.rst.txt b/_sources/_autosummary/reV.SAM.SAM.SamResourceRetriever.rst.txt new file mode 100644 index 000000000..85944fac8 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.SAM.SamResourceRetriever.rst.txt @@ -0,0 +1,30 @@ +reV.SAM.SAM.SamResourceRetriever +================================ + +.. currentmodule:: reV.SAM.SAM + +.. autoclass:: SamResourceRetriever + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SamResourceRetriever.get + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SamResourceRetriever.RESOURCE_TYPES + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.SAM.rst.txt b/_sources/_autosummary/reV.SAM.SAM.rst.txt new file mode 100644 index 000000000..964e49062 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.SAM.rst.txt @@ -0,0 +1,33 @@ +reV.SAM.SAM +=========== + +.. automodule:: reV.SAM.SAM + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + RevPySam + Sam + SamResourceRetriever + + + + + + + + + diff --git a/_sources/_autosummary/reV.SAM.defaults.AbstractDefaultFromConfigFile.rst.txt b/_sources/_autosummary/reV.SAM.defaults.AbstractDefaultFromConfigFile.rst.txt new file mode 100644 index 000000000..a333b7ff9 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.AbstractDefaultFromConfigFile.rst.txt @@ -0,0 +1,31 @@ +reV.SAM.defaults.AbstractDefaultFromConfigFile +============================================== + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: AbstractDefaultFromConfigFile + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractDefaultFromConfigFile.init_default_pysam_obj + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractDefaultFromConfigFile.CONFIG_FILE_NAME + ~AbstractDefaultFromConfigFile.PYSAM_MODULE + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultGeothermal.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultGeothermal.rst.txt new file mode 100644 index 000000000..df065ec92 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultGeothermal.rst.txt @@ -0,0 +1,31 @@ +reV.SAM.defaults.DefaultGeothermal +================================== + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultGeothermal + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultGeothermal.default + ~DefaultGeothermal.init_default_pysam_obj + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DefaultGeothermal.CONFIG_FILE_NAME + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultLCOE.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultLCOE.rst.txt new file mode 100644 index 000000000..d54ec595f --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultLCOE.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultLCOE +============================ + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultLCOE + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultLCOE.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultLinearFresnelDsgIph.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultLinearFresnelDsgIph.rst.txt new file mode 100644 index 000000000..a5b51758c --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultLinearFresnelDsgIph.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultLinearFresnelDsgIph +=========================================== + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultLinearFresnelDsgIph + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultLinearFresnelDsgIph.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultMhkWave.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultMhkWave.rst.txt new file mode 100644 index 000000000..ab6be4f11 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultMhkWave.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultMhkWave +=============================== + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultMhkWave + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultMhkWave.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultPvSamv1.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultPvSamv1.rst.txt new file mode 100644 index 000000000..0bc1f555a --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultPvSamv1.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultPvSamv1 +=============================== + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultPvSamv1 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultPvSamv1.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultPvWattsv5.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultPvWattsv5.rst.txt new file mode 100644 index 000000000..28cc7cfd3 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultPvWattsv5.rst.txt @@ -0,0 +1,31 @@ +reV.SAM.defaults.DefaultPvWattsv5 +================================= + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultPvWattsv5 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultPvWattsv5.default + ~DefaultPvWattsv5.init_default_pysam_obj + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~DefaultPvWattsv5.CONFIG_FILE_NAME + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultPvWattsv8.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultPvWattsv8.rst.txt new file mode 100644 index 000000000..ce8d521df --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultPvWattsv8.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultPvWattsv8 +================================= + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultPvWattsv8 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultPvWattsv8.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultSingleOwner.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultSingleOwner.rst.txt new file mode 100644 index 000000000..0e8b1f49c --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultSingleOwner.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultSingleOwner +=================================== + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultSingleOwner + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultSingleOwner.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultSwh.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultSwh.rst.txt new file mode 100644 index 000000000..70f41853d --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultSwh.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultSwh +=========================== + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultSwh + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultSwh.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultTcsMoltenSalt.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultTcsMoltenSalt.rst.txt new file mode 100644 index 000000000..627b23f47 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultTcsMoltenSalt.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultTcsMoltenSalt +===================================== + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultTcsMoltenSalt + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultTcsMoltenSalt.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultTroughPhysicalProcessHeat.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultTroughPhysicalProcessHeat.rst.txt new file mode 100644 index 000000000..0b9b0590a --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultTroughPhysicalProcessHeat.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultTroughPhysicalProcessHeat +================================================= + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultTroughPhysicalProcessHeat + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultTroughPhysicalProcessHeat.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.DefaultWindPower.rst.txt b/_sources/_autosummary/reV.SAM.defaults.DefaultWindPower.rst.txt new file mode 100644 index 000000000..2a6b2905d --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.DefaultWindPower.rst.txt @@ -0,0 +1,24 @@ +reV.SAM.defaults.DefaultWindPower +================================= + +.. currentmodule:: reV.SAM.defaults + +.. autoclass:: DefaultWindPower + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~DefaultWindPower.default + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.defaults.rst.txt b/_sources/_autosummary/reV.SAM.defaults.rst.txt new file mode 100644 index 000000000..898c62eec --- /dev/null +++ b/_sources/_autosummary/reV.SAM.defaults.rst.txt @@ -0,0 +1,43 @@ +reV.SAM.defaults +================ + +.. automodule:: reV.SAM.defaults + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + AbstractDefaultFromConfigFile + DefaultGeothermal + DefaultLCOE + DefaultLinearFresnelDsgIph + DefaultMhkWave + DefaultPvSamv1 + DefaultPvWattsv5 + DefaultPvWattsv8 + DefaultSingleOwner + DefaultSwh + DefaultTcsMoltenSalt + DefaultTroughPhysicalProcessHeat + DefaultWindPower + + + + + + + + + diff --git a/_sources/_autosummary/reV.SAM.econ.Economic.rst.txt b/_sources/_autosummary/reV.SAM.econ.Economic.rst.txt new file mode 100644 index 000000000..afcc73c01 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.econ.Economic.rst.txt @@ -0,0 +1,55 @@ +reV.SAM.econ.Economic +===================== + +.. currentmodule:: reV.SAM.econ + +.. autoclass:: Economic + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Economic.assign_inputs + ~Economic.collect_outputs + ~Economic.default + ~Economic.drop_leap + ~Economic.ensure_res_len + ~Economic.execute + ~Economic.flip_actual_irr + ~Economic.get_sam_res + ~Economic.get_time_interval + ~Economic.gross_revenue + ~Economic.lcoe_fcr + ~Economic.lcoe_nom + ~Economic.lcoe_real + ~Economic.make_datetime + ~Economic.npv + ~Economic.outputs_to_utc_arr + ~Economic.ppa_price + ~Economic.reV_run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Economic.DIR + ~Economic.IGNORE_ATTRS + ~Economic.MODULE + ~Economic.attr_dict + ~Economic.input_list + ~Economic.meta + ~Economic.module + ~Economic.pysam + ~Economic.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.econ.LCOE.rst.txt b/_sources/_autosummary/reV.SAM.econ.LCOE.rst.txt new file mode 100644 index 000000000..54a8e5b64 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.econ.LCOE.rst.txt @@ -0,0 +1,55 @@ +reV.SAM.econ.LCOE +================= + +.. currentmodule:: reV.SAM.econ + +.. autoclass:: LCOE + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~LCOE.assign_inputs + ~LCOE.collect_outputs + ~LCOE.default + ~LCOE.drop_leap + ~LCOE.ensure_res_len + ~LCOE.execute + ~LCOE.flip_actual_irr + ~LCOE.get_sam_res + ~LCOE.get_time_interval + ~LCOE.gross_revenue + ~LCOE.lcoe_fcr + ~LCOE.lcoe_nom + ~LCOE.lcoe_real + ~LCOE.make_datetime + ~LCOE.npv + ~LCOE.outputs_to_utc_arr + ~LCOE.ppa_price + ~LCOE.reV_run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LCOE.DIR + ~LCOE.IGNORE_ATTRS + ~LCOE.MODULE + ~LCOE.attr_dict + ~LCOE.input_list + ~LCOE.meta + ~LCOE.module + ~LCOE.pysam + ~LCOE.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.econ.SingleOwner.rst.txt b/_sources/_autosummary/reV.SAM.econ.SingleOwner.rst.txt new file mode 100644 index 000000000..c8d9d4202 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.econ.SingleOwner.rst.txt @@ -0,0 +1,55 @@ +reV.SAM.econ.SingleOwner +======================== + +.. currentmodule:: reV.SAM.econ + +.. autoclass:: SingleOwner + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SingleOwner.assign_inputs + ~SingleOwner.collect_outputs + ~SingleOwner.default + ~SingleOwner.drop_leap + ~SingleOwner.ensure_res_len + ~SingleOwner.execute + ~SingleOwner.flip_actual_irr + ~SingleOwner.get_sam_res + ~SingleOwner.get_time_interval + ~SingleOwner.gross_revenue + ~SingleOwner.lcoe_fcr + ~SingleOwner.lcoe_nom + ~SingleOwner.lcoe_real + ~SingleOwner.make_datetime + ~SingleOwner.npv + ~SingleOwner.outputs_to_utc_arr + ~SingleOwner.ppa_price + ~SingleOwner.reV_run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SingleOwner.DIR + ~SingleOwner.IGNORE_ATTRS + ~SingleOwner.MODULE + ~SingleOwner.attr_dict + ~SingleOwner.input_list + ~SingleOwner.meta + ~SingleOwner.module + ~SingleOwner.pysam + ~SingleOwner.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.econ.rst.txt b/_sources/_autosummary/reV.SAM.econ.rst.txt new file mode 100644 index 000000000..15ed2f10d --- /dev/null +++ b/_sources/_autosummary/reV.SAM.econ.rst.txt @@ -0,0 +1,33 @@ +reV.SAM.econ +============ + +.. automodule:: reV.SAM.econ + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Economic + LCOE + SingleOwner + + + + + + + + + diff --git a/_sources/_autosummary/reV.SAM.generation.AbstractSamGeneration.rst.txt b/_sources/_autosummary/reV.SAM.generation.AbstractSamGeneration.rst.txt new file mode 100644 index 000000000..6f36c8fca --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.AbstractSamGeneration.rst.txt @@ -0,0 +1,63 @@ +reV.SAM.generation.AbstractSamGeneration +======================================== + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: AbstractSamGeneration + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractSamGeneration.add_scheduled_losses + ~AbstractSamGeneration.annual_energy + ~AbstractSamGeneration.assign_inputs + ~AbstractSamGeneration.cf_mean + ~AbstractSamGeneration.cf_profile + ~AbstractSamGeneration.check_resource_data + ~AbstractSamGeneration.collect_outputs + ~AbstractSamGeneration.default + ~AbstractSamGeneration.drop_leap + ~AbstractSamGeneration.energy_yield + ~AbstractSamGeneration.ensure_res_len + ~AbstractSamGeneration.execute + ~AbstractSamGeneration.gen_profile + ~AbstractSamGeneration.get_sam_res + ~AbstractSamGeneration.get_time_interval + ~AbstractSamGeneration.make_datetime + ~AbstractSamGeneration.outputs_to_utc_arr + ~AbstractSamGeneration.reV_run + ~AbstractSamGeneration.run + ~AbstractSamGeneration.run_gen_and_econ + ~AbstractSamGeneration.set_resource_data + ~AbstractSamGeneration.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractSamGeneration.DIR + ~AbstractSamGeneration.IGNORE_ATTRS + ~AbstractSamGeneration.MODULE + ~AbstractSamGeneration.OUTAGE_CONFIG_KEY + ~AbstractSamGeneration.OUTAGE_SEED_CONFIG_KEY + ~AbstractSamGeneration.attr_dict + ~AbstractSamGeneration.has_timezone + ~AbstractSamGeneration.input_list + ~AbstractSamGeneration.meta + ~AbstractSamGeneration.module + ~AbstractSamGeneration.outage_seed + ~AbstractSamGeneration.pysam + ~AbstractSamGeneration.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.AbstractSamGenerationFromWeatherFile.rst.txt b/_sources/_autosummary/reV.SAM.generation.AbstractSamGenerationFromWeatherFile.rst.txt new file mode 100644 index 000000000..a0d087fb4 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.AbstractSamGenerationFromWeatherFile.rst.txt @@ -0,0 +1,65 @@ +reV.SAM.generation.AbstractSamGenerationFromWeatherFile +======================================================= + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: AbstractSamGenerationFromWeatherFile + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractSamGenerationFromWeatherFile.add_scheduled_losses + ~AbstractSamGenerationFromWeatherFile.annual_energy + ~AbstractSamGenerationFromWeatherFile.assign_inputs + ~AbstractSamGenerationFromWeatherFile.cf_mean + ~AbstractSamGenerationFromWeatherFile.cf_profile + ~AbstractSamGenerationFromWeatherFile.check_resource_data + ~AbstractSamGenerationFromWeatherFile.collect_outputs + ~AbstractSamGenerationFromWeatherFile.default + ~AbstractSamGenerationFromWeatherFile.drop_leap + ~AbstractSamGenerationFromWeatherFile.energy_yield + ~AbstractSamGenerationFromWeatherFile.ensure_res_len + ~AbstractSamGenerationFromWeatherFile.execute + ~AbstractSamGenerationFromWeatherFile.gen_profile + ~AbstractSamGenerationFromWeatherFile.get_sam_res + ~AbstractSamGenerationFromWeatherFile.get_time_interval + ~AbstractSamGenerationFromWeatherFile.make_datetime + ~AbstractSamGenerationFromWeatherFile.outputs_to_utc_arr + ~AbstractSamGenerationFromWeatherFile.reV_run + ~AbstractSamGenerationFromWeatherFile.run + ~AbstractSamGenerationFromWeatherFile.run_gen_and_econ + ~AbstractSamGenerationFromWeatherFile.set_resource_data + ~AbstractSamGenerationFromWeatherFile.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractSamGenerationFromWeatherFile.DIR + ~AbstractSamGenerationFromWeatherFile.IGNORE_ATTRS + ~AbstractSamGenerationFromWeatherFile.MODULE + ~AbstractSamGenerationFromWeatherFile.OUTAGE_CONFIG_KEY + ~AbstractSamGenerationFromWeatherFile.OUTAGE_SEED_CONFIG_KEY + ~AbstractSamGenerationFromWeatherFile.PYSAM_WEATHER_TAG + ~AbstractSamGenerationFromWeatherFile.WF_META_DROP_COLS + ~AbstractSamGenerationFromWeatherFile.attr_dict + ~AbstractSamGenerationFromWeatherFile.has_timezone + ~AbstractSamGenerationFromWeatherFile.input_list + ~AbstractSamGenerationFromWeatherFile.meta + ~AbstractSamGenerationFromWeatherFile.module + ~AbstractSamGenerationFromWeatherFile.outage_seed + ~AbstractSamGenerationFromWeatherFile.pysam + ~AbstractSamGenerationFromWeatherFile.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.AbstractSamPv.rst.txt b/_sources/_autosummary/reV.SAM.generation.AbstractSamPv.rst.txt new file mode 100644 index 000000000..0107af02f --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.AbstractSamPv.rst.txt @@ -0,0 +1,72 @@ +reV.SAM.generation.AbstractSamPv +================================ + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: AbstractSamPv + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractSamPv.ac + ~AbstractSamPv.add_scheduled_losses + ~AbstractSamPv.agg_albedo + ~AbstractSamPv.annual_energy + ~AbstractSamPv.assign_inputs + ~AbstractSamPv.cf_mean + ~AbstractSamPv.cf_mean_ac + ~AbstractSamPv.cf_profile + ~AbstractSamPv.cf_profile_ac + ~AbstractSamPv.check_resource_data + ~AbstractSamPv.clipped_power + ~AbstractSamPv.collect_outputs + ~AbstractSamPv.dc + ~AbstractSamPv.default + ~AbstractSamPv.drop_leap + ~AbstractSamPv.energy_yield + ~AbstractSamPv.ensure_res_len + ~AbstractSamPv.execute + ~AbstractSamPv.gen_profile + ~AbstractSamPv.get_sam_res + ~AbstractSamPv.get_time_interval + ~AbstractSamPv.make_datetime + ~AbstractSamPv.outputs_to_utc_arr + ~AbstractSamPv.reV_run + ~AbstractSamPv.run + ~AbstractSamPv.run_gen_and_econ + ~AbstractSamPv.set_latitude_tilt_az + ~AbstractSamPv.set_resource_data + ~AbstractSamPv.system_capacity_ac + ~AbstractSamPv.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractSamPv.DIR + ~AbstractSamPv.IGNORE_ATTRS + ~AbstractSamPv.MODULE + ~AbstractSamPv.OUTAGE_CONFIG_KEY + ~AbstractSamPv.OUTAGE_SEED_CONFIG_KEY + ~AbstractSamPv.PYSAM + ~AbstractSamPv.attr_dict + ~AbstractSamPv.has_timezone + ~AbstractSamPv.input_list + ~AbstractSamPv.meta + ~AbstractSamPv.module + ~AbstractSamPv.outage_seed + ~AbstractSamPv.pysam + ~AbstractSamPv.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.AbstractSamSolar.rst.txt b/_sources/_autosummary/reV.SAM.generation.AbstractSamSolar.rst.txt new file mode 100644 index 000000000..fabd62ae7 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.AbstractSamSolar.rst.txt @@ -0,0 +1,64 @@ +reV.SAM.generation.AbstractSamSolar +=================================== + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: AbstractSamSolar + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractSamSolar.add_scheduled_losses + ~AbstractSamSolar.agg_albedo + ~AbstractSamSolar.annual_energy + ~AbstractSamSolar.assign_inputs + ~AbstractSamSolar.cf_mean + ~AbstractSamSolar.cf_profile + ~AbstractSamSolar.check_resource_data + ~AbstractSamSolar.collect_outputs + ~AbstractSamSolar.default + ~AbstractSamSolar.drop_leap + ~AbstractSamSolar.energy_yield + ~AbstractSamSolar.ensure_res_len + ~AbstractSamSolar.execute + ~AbstractSamSolar.gen_profile + ~AbstractSamSolar.get_sam_res + ~AbstractSamSolar.get_time_interval + ~AbstractSamSolar.make_datetime + ~AbstractSamSolar.outputs_to_utc_arr + ~AbstractSamSolar.reV_run + ~AbstractSamSolar.run + ~AbstractSamSolar.run_gen_and_econ + ~AbstractSamSolar.set_resource_data + ~AbstractSamSolar.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractSamSolar.DIR + ~AbstractSamSolar.IGNORE_ATTRS + ~AbstractSamSolar.MODULE + ~AbstractSamSolar.OUTAGE_CONFIG_KEY + ~AbstractSamSolar.OUTAGE_SEED_CONFIG_KEY + ~AbstractSamSolar.attr_dict + ~AbstractSamSolar.has_timezone + ~AbstractSamSolar.input_list + ~AbstractSamSolar.meta + ~AbstractSamSolar.module + ~AbstractSamSolar.outage_seed + ~AbstractSamSolar.pysam + ~AbstractSamSolar.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.AbstractSamWind.rst.txt b/_sources/_autosummary/reV.SAM.generation.AbstractSamWind.rst.txt new file mode 100644 index 000000000..c7f0a9d4a --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.AbstractSamWind.rst.txt @@ -0,0 +1,67 @@ +reV.SAM.generation.AbstractSamWind +================================== + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: AbstractSamWind + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractSamWind.add_power_curve_losses + ~AbstractSamWind.add_scheduled_losses + ~AbstractSamWind.annual_energy + ~AbstractSamWind.assign_inputs + ~AbstractSamWind.cf_mean + ~AbstractSamWind.cf_profile + ~AbstractSamWind.check_resource_data + ~AbstractSamWind.collect_outputs + ~AbstractSamWind.default + ~AbstractSamWind.drop_leap + ~AbstractSamWind.energy_yield + ~AbstractSamWind.ensure_res_len + ~AbstractSamWind.execute + ~AbstractSamWind.gen_profile + ~AbstractSamWind.get_sam_res + ~AbstractSamWind.get_time_interval + ~AbstractSamWind.make_datetime + ~AbstractSamWind.outputs_to_utc_arr + ~AbstractSamWind.reV_run + ~AbstractSamWind.run + ~AbstractSamWind.run_gen_and_econ + ~AbstractSamWind.set_resource_data + ~AbstractSamWind.tz_elev_check + ~AbstractSamWind.wind_resource_from_input + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractSamWind.DIR + ~AbstractSamWind.IGNORE_ATTRS + ~AbstractSamWind.MODULE + ~AbstractSamWind.OUTAGE_CONFIG_KEY + ~AbstractSamWind.OUTAGE_SEED_CONFIG_KEY + ~AbstractSamWind.POWER_CURVE_CONFIG_KEY + ~AbstractSamWind.attr_dict + ~AbstractSamWind.has_timezone + ~AbstractSamWind.input_list + ~AbstractSamWind.input_power_curve + ~AbstractSamWind.meta + ~AbstractSamWind.module + ~AbstractSamWind.outage_seed + ~AbstractSamWind.pysam + ~AbstractSamWind.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.Geothermal.rst.txt b/_sources/_autosummary/reV.SAM.generation.Geothermal.rst.txt new file mode 100644 index 000000000..4256b657c --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.Geothermal.rst.txt @@ -0,0 +1,65 @@ +reV.SAM.generation.Geothermal +============================= + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: Geothermal + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Geothermal.add_scheduled_losses + ~Geothermal.annual_energy + ~Geothermal.assign_inputs + ~Geothermal.cf_mean + ~Geothermal.cf_profile + ~Geothermal.check_resource_data + ~Geothermal.collect_outputs + ~Geothermal.default + ~Geothermal.drop_leap + ~Geothermal.energy_yield + ~Geothermal.ensure_res_len + ~Geothermal.execute + ~Geothermal.gen_profile + ~Geothermal.get_sam_res + ~Geothermal.get_time_interval + ~Geothermal.make_datetime + ~Geothermal.outputs_to_utc_arr + ~Geothermal.reV_run + ~Geothermal.run + ~Geothermal.run_gen_and_econ + ~Geothermal.set_resource_data + ~Geothermal.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Geothermal.DIR + ~Geothermal.IGNORE_ATTRS + ~Geothermal.MODULE + ~Geothermal.OUTAGE_CONFIG_KEY + ~Geothermal.OUTAGE_SEED_CONFIG_KEY + ~Geothermal.PYSAM_WEATHER_TAG + ~Geothermal.WF_META_DROP_COLS + ~Geothermal.attr_dict + ~Geothermal.has_timezone + ~Geothermal.input_list + ~Geothermal.meta + ~Geothermal.module + ~Geothermal.outage_seed + ~Geothermal.pysam + ~Geothermal.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.LinearDirectSteam.rst.txt b/_sources/_autosummary/reV.SAM.generation.LinearDirectSteam.rst.txt new file mode 100644 index 000000000..475c69246 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.LinearDirectSteam.rst.txt @@ -0,0 +1,65 @@ +reV.SAM.generation.LinearDirectSteam +==================================== + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: LinearDirectSteam + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~LinearDirectSteam.add_scheduled_losses + ~LinearDirectSteam.annual_energy + ~LinearDirectSteam.assign_inputs + ~LinearDirectSteam.cf_mean + ~LinearDirectSteam.cf_profile + ~LinearDirectSteam.check_resource_data + ~LinearDirectSteam.collect_outputs + ~LinearDirectSteam.default + ~LinearDirectSteam.drop_leap + ~LinearDirectSteam.energy_yield + ~LinearDirectSteam.ensure_res_len + ~LinearDirectSteam.execute + ~LinearDirectSteam.gen_profile + ~LinearDirectSteam.get_sam_res + ~LinearDirectSteam.get_time_interval + ~LinearDirectSteam.make_datetime + ~LinearDirectSteam.outputs_to_utc_arr + ~LinearDirectSteam.reV_run + ~LinearDirectSteam.run + ~LinearDirectSteam.run_gen_and_econ + ~LinearDirectSteam.set_resource_data + ~LinearDirectSteam.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LinearDirectSteam.DIR + ~LinearDirectSteam.IGNORE_ATTRS + ~LinearDirectSteam.MODULE + ~LinearDirectSteam.OUTAGE_CONFIG_KEY + ~LinearDirectSteam.OUTAGE_SEED_CONFIG_KEY + ~LinearDirectSteam.PYSAM_WEATHER_TAG + ~LinearDirectSteam.WF_META_DROP_COLS + ~LinearDirectSteam.attr_dict + ~LinearDirectSteam.has_timezone + ~LinearDirectSteam.input_list + ~LinearDirectSteam.meta + ~LinearDirectSteam.module + ~LinearDirectSteam.outage_seed + ~LinearDirectSteam.pysam + ~LinearDirectSteam.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.MhkWave.rst.txt b/_sources/_autosummary/reV.SAM.generation.MhkWave.rst.txt new file mode 100644 index 000000000..be6d586f4 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.MhkWave.rst.txt @@ -0,0 +1,63 @@ +reV.SAM.generation.MhkWave +========================== + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: MhkWave + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MhkWave.add_scheduled_losses + ~MhkWave.annual_energy + ~MhkWave.assign_inputs + ~MhkWave.cf_mean + ~MhkWave.cf_profile + ~MhkWave.check_resource_data + ~MhkWave.collect_outputs + ~MhkWave.default + ~MhkWave.drop_leap + ~MhkWave.energy_yield + ~MhkWave.ensure_res_len + ~MhkWave.execute + ~MhkWave.gen_profile + ~MhkWave.get_sam_res + ~MhkWave.get_time_interval + ~MhkWave.make_datetime + ~MhkWave.outputs_to_utc_arr + ~MhkWave.reV_run + ~MhkWave.run + ~MhkWave.run_gen_and_econ + ~MhkWave.set_resource_data + ~MhkWave.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MhkWave.DIR + ~MhkWave.IGNORE_ATTRS + ~MhkWave.MODULE + ~MhkWave.OUTAGE_CONFIG_KEY + ~MhkWave.OUTAGE_SEED_CONFIG_KEY + ~MhkWave.attr_dict + ~MhkWave.has_timezone + ~MhkWave.input_list + ~MhkWave.meta + ~MhkWave.module + ~MhkWave.outage_seed + ~MhkWave.pysam + ~MhkWave.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.PvSamv1.rst.txt b/_sources/_autosummary/reV.SAM.generation.PvSamv1.rst.txt new file mode 100644 index 000000000..fab18f6b6 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.PvSamv1.rst.txt @@ -0,0 +1,71 @@ +reV.SAM.generation.PvSamv1 +========================== + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: PvSamv1 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PvSamv1.ac + ~PvSamv1.add_scheduled_losses + ~PvSamv1.agg_albedo + ~PvSamv1.annual_energy + ~PvSamv1.assign_inputs + ~PvSamv1.cf_mean + ~PvSamv1.cf_mean_ac + ~PvSamv1.cf_profile + ~PvSamv1.cf_profile_ac + ~PvSamv1.check_resource_data + ~PvSamv1.clipped_power + ~PvSamv1.collect_outputs + ~PvSamv1.dc + ~PvSamv1.default + ~PvSamv1.drop_leap + ~PvSamv1.energy_yield + ~PvSamv1.ensure_res_len + ~PvSamv1.execute + ~PvSamv1.gen_profile + ~PvSamv1.get_sam_res + ~PvSamv1.get_time_interval + ~PvSamv1.make_datetime + ~PvSamv1.outputs_to_utc_arr + ~PvSamv1.reV_run + ~PvSamv1.run + ~PvSamv1.run_gen_and_econ + ~PvSamv1.set_latitude_tilt_az + ~PvSamv1.set_resource_data + ~PvSamv1.system_capacity_ac + ~PvSamv1.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PvSamv1.DIR + ~PvSamv1.IGNORE_ATTRS + ~PvSamv1.MODULE + ~PvSamv1.OUTAGE_CONFIG_KEY + ~PvSamv1.OUTAGE_SEED_CONFIG_KEY + ~PvSamv1.attr_dict + ~PvSamv1.has_timezone + ~PvSamv1.input_list + ~PvSamv1.meta + ~PvSamv1.module + ~PvSamv1.outage_seed + ~PvSamv1.pysam + ~PvSamv1.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.PvWattsv5.rst.txt b/_sources/_autosummary/reV.SAM.generation.PvWattsv5.rst.txt new file mode 100644 index 000000000..6d0ba1d1c --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.PvWattsv5.rst.txt @@ -0,0 +1,71 @@ +reV.SAM.generation.PvWattsv5 +============================ + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: PvWattsv5 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PvWattsv5.ac + ~PvWattsv5.add_scheduled_losses + ~PvWattsv5.agg_albedo + ~PvWattsv5.annual_energy + ~PvWattsv5.assign_inputs + ~PvWattsv5.cf_mean + ~PvWattsv5.cf_mean_ac + ~PvWattsv5.cf_profile + ~PvWattsv5.cf_profile_ac + ~PvWattsv5.check_resource_data + ~PvWattsv5.clipped_power + ~PvWattsv5.collect_outputs + ~PvWattsv5.dc + ~PvWattsv5.default + ~PvWattsv5.drop_leap + ~PvWattsv5.energy_yield + ~PvWattsv5.ensure_res_len + ~PvWattsv5.execute + ~PvWattsv5.gen_profile + ~PvWattsv5.get_sam_res + ~PvWattsv5.get_time_interval + ~PvWattsv5.make_datetime + ~PvWattsv5.outputs_to_utc_arr + ~PvWattsv5.reV_run + ~PvWattsv5.run + ~PvWattsv5.run_gen_and_econ + ~PvWattsv5.set_latitude_tilt_az + ~PvWattsv5.set_resource_data + ~PvWattsv5.system_capacity_ac + ~PvWattsv5.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PvWattsv5.DIR + ~PvWattsv5.IGNORE_ATTRS + ~PvWattsv5.MODULE + ~PvWattsv5.OUTAGE_CONFIG_KEY + ~PvWattsv5.OUTAGE_SEED_CONFIG_KEY + ~PvWattsv5.attr_dict + ~PvWattsv5.has_timezone + ~PvWattsv5.input_list + ~PvWattsv5.meta + ~PvWattsv5.module + ~PvWattsv5.outage_seed + ~PvWattsv5.pysam + ~PvWattsv5.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.PvWattsv7.rst.txt b/_sources/_autosummary/reV.SAM.generation.PvWattsv7.rst.txt new file mode 100644 index 000000000..9f4d1236a --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.PvWattsv7.rst.txt @@ -0,0 +1,71 @@ +reV.SAM.generation.PvWattsv7 +============================ + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: PvWattsv7 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PvWattsv7.ac + ~PvWattsv7.add_scheduled_losses + ~PvWattsv7.agg_albedo + ~PvWattsv7.annual_energy + ~PvWattsv7.assign_inputs + ~PvWattsv7.cf_mean + ~PvWattsv7.cf_mean_ac + ~PvWattsv7.cf_profile + ~PvWattsv7.cf_profile_ac + ~PvWattsv7.check_resource_data + ~PvWattsv7.clipped_power + ~PvWattsv7.collect_outputs + ~PvWattsv7.dc + ~PvWattsv7.default + ~PvWattsv7.drop_leap + ~PvWattsv7.energy_yield + ~PvWattsv7.ensure_res_len + ~PvWattsv7.execute + ~PvWattsv7.gen_profile + ~PvWattsv7.get_sam_res + ~PvWattsv7.get_time_interval + ~PvWattsv7.make_datetime + ~PvWattsv7.outputs_to_utc_arr + ~PvWattsv7.reV_run + ~PvWattsv7.run + ~PvWattsv7.run_gen_and_econ + ~PvWattsv7.set_latitude_tilt_az + ~PvWattsv7.set_resource_data + ~PvWattsv7.system_capacity_ac + ~PvWattsv7.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PvWattsv7.DIR + ~PvWattsv7.IGNORE_ATTRS + ~PvWattsv7.MODULE + ~PvWattsv7.OUTAGE_CONFIG_KEY + ~PvWattsv7.OUTAGE_SEED_CONFIG_KEY + ~PvWattsv7.attr_dict + ~PvWattsv7.has_timezone + ~PvWattsv7.input_list + ~PvWattsv7.meta + ~PvWattsv7.module + ~PvWattsv7.outage_seed + ~PvWattsv7.pysam + ~PvWattsv7.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.PvWattsv8.rst.txt b/_sources/_autosummary/reV.SAM.generation.PvWattsv8.rst.txt new file mode 100644 index 000000000..72b6ff1f7 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.PvWattsv8.rst.txt @@ -0,0 +1,71 @@ +reV.SAM.generation.PvWattsv8 +============================ + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: PvWattsv8 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PvWattsv8.ac + ~PvWattsv8.add_scheduled_losses + ~PvWattsv8.agg_albedo + ~PvWattsv8.annual_energy + ~PvWattsv8.assign_inputs + ~PvWattsv8.cf_mean + ~PvWattsv8.cf_mean_ac + ~PvWattsv8.cf_profile + ~PvWattsv8.cf_profile_ac + ~PvWattsv8.check_resource_data + ~PvWattsv8.clipped_power + ~PvWattsv8.collect_outputs + ~PvWattsv8.dc + ~PvWattsv8.default + ~PvWattsv8.drop_leap + ~PvWattsv8.energy_yield + ~PvWattsv8.ensure_res_len + ~PvWattsv8.execute + ~PvWattsv8.gen_profile + ~PvWattsv8.get_sam_res + ~PvWattsv8.get_time_interval + ~PvWattsv8.make_datetime + ~PvWattsv8.outputs_to_utc_arr + ~PvWattsv8.reV_run + ~PvWattsv8.run + ~PvWattsv8.run_gen_and_econ + ~PvWattsv8.set_latitude_tilt_az + ~PvWattsv8.set_resource_data + ~PvWattsv8.system_capacity_ac + ~PvWattsv8.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PvWattsv8.DIR + ~PvWattsv8.IGNORE_ATTRS + ~PvWattsv8.MODULE + ~PvWattsv8.OUTAGE_CONFIG_KEY + ~PvWattsv8.OUTAGE_SEED_CONFIG_KEY + ~PvWattsv8.attr_dict + ~PvWattsv8.has_timezone + ~PvWattsv8.input_list + ~PvWattsv8.meta + ~PvWattsv8.module + ~PvWattsv8.outage_seed + ~PvWattsv8.pysam + ~PvWattsv8.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.SolarWaterHeat.rst.txt b/_sources/_autosummary/reV.SAM.generation.SolarWaterHeat.rst.txt new file mode 100644 index 000000000..b9a5884bf --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.SolarWaterHeat.rst.txt @@ -0,0 +1,65 @@ +reV.SAM.generation.SolarWaterHeat +================================= + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: SolarWaterHeat + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SolarWaterHeat.add_scheduled_losses + ~SolarWaterHeat.annual_energy + ~SolarWaterHeat.assign_inputs + ~SolarWaterHeat.cf_mean + ~SolarWaterHeat.cf_profile + ~SolarWaterHeat.check_resource_data + ~SolarWaterHeat.collect_outputs + ~SolarWaterHeat.default + ~SolarWaterHeat.drop_leap + ~SolarWaterHeat.energy_yield + ~SolarWaterHeat.ensure_res_len + ~SolarWaterHeat.execute + ~SolarWaterHeat.gen_profile + ~SolarWaterHeat.get_sam_res + ~SolarWaterHeat.get_time_interval + ~SolarWaterHeat.make_datetime + ~SolarWaterHeat.outputs_to_utc_arr + ~SolarWaterHeat.reV_run + ~SolarWaterHeat.run + ~SolarWaterHeat.run_gen_and_econ + ~SolarWaterHeat.set_resource_data + ~SolarWaterHeat.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SolarWaterHeat.DIR + ~SolarWaterHeat.IGNORE_ATTRS + ~SolarWaterHeat.MODULE + ~SolarWaterHeat.OUTAGE_CONFIG_KEY + ~SolarWaterHeat.OUTAGE_SEED_CONFIG_KEY + ~SolarWaterHeat.PYSAM_WEATHER_TAG + ~SolarWaterHeat.WF_META_DROP_COLS + ~SolarWaterHeat.attr_dict + ~SolarWaterHeat.has_timezone + ~SolarWaterHeat.input_list + ~SolarWaterHeat.meta + ~SolarWaterHeat.module + ~SolarWaterHeat.outage_seed + ~SolarWaterHeat.pysam + ~SolarWaterHeat.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.TcsMoltenSalt.rst.txt b/_sources/_autosummary/reV.SAM.generation.TcsMoltenSalt.rst.txt new file mode 100644 index 000000000..68f9eb4fe --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.TcsMoltenSalt.rst.txt @@ -0,0 +1,64 @@ +reV.SAM.generation.TcsMoltenSalt +================================ + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: TcsMoltenSalt + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TcsMoltenSalt.add_scheduled_losses + ~TcsMoltenSalt.agg_albedo + ~TcsMoltenSalt.annual_energy + ~TcsMoltenSalt.assign_inputs + ~TcsMoltenSalt.cf_mean + ~TcsMoltenSalt.cf_profile + ~TcsMoltenSalt.check_resource_data + ~TcsMoltenSalt.collect_outputs + ~TcsMoltenSalt.default + ~TcsMoltenSalt.drop_leap + ~TcsMoltenSalt.energy_yield + ~TcsMoltenSalt.ensure_res_len + ~TcsMoltenSalt.execute + ~TcsMoltenSalt.gen_profile + ~TcsMoltenSalt.get_sam_res + ~TcsMoltenSalt.get_time_interval + ~TcsMoltenSalt.make_datetime + ~TcsMoltenSalt.outputs_to_utc_arr + ~TcsMoltenSalt.reV_run + ~TcsMoltenSalt.run + ~TcsMoltenSalt.run_gen_and_econ + ~TcsMoltenSalt.set_resource_data + ~TcsMoltenSalt.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TcsMoltenSalt.DIR + ~TcsMoltenSalt.IGNORE_ATTRS + ~TcsMoltenSalt.MODULE + ~TcsMoltenSalt.OUTAGE_CONFIG_KEY + ~TcsMoltenSalt.OUTAGE_SEED_CONFIG_KEY + ~TcsMoltenSalt.attr_dict + ~TcsMoltenSalt.has_timezone + ~TcsMoltenSalt.input_list + ~TcsMoltenSalt.meta + ~TcsMoltenSalt.module + ~TcsMoltenSalt.outage_seed + ~TcsMoltenSalt.pysam + ~TcsMoltenSalt.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.TroughPhysicalHeat.rst.txt b/_sources/_autosummary/reV.SAM.generation.TroughPhysicalHeat.rst.txt new file mode 100644 index 000000000..ab05d7723 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.TroughPhysicalHeat.rst.txt @@ -0,0 +1,65 @@ +reV.SAM.generation.TroughPhysicalHeat +===================================== + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: TroughPhysicalHeat + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TroughPhysicalHeat.add_scheduled_losses + ~TroughPhysicalHeat.annual_energy + ~TroughPhysicalHeat.assign_inputs + ~TroughPhysicalHeat.cf_mean + ~TroughPhysicalHeat.cf_profile + ~TroughPhysicalHeat.check_resource_data + ~TroughPhysicalHeat.collect_outputs + ~TroughPhysicalHeat.default + ~TroughPhysicalHeat.drop_leap + ~TroughPhysicalHeat.energy_yield + ~TroughPhysicalHeat.ensure_res_len + ~TroughPhysicalHeat.execute + ~TroughPhysicalHeat.gen_profile + ~TroughPhysicalHeat.get_sam_res + ~TroughPhysicalHeat.get_time_interval + ~TroughPhysicalHeat.make_datetime + ~TroughPhysicalHeat.outputs_to_utc_arr + ~TroughPhysicalHeat.reV_run + ~TroughPhysicalHeat.run + ~TroughPhysicalHeat.run_gen_and_econ + ~TroughPhysicalHeat.set_resource_data + ~TroughPhysicalHeat.tz_elev_check + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TroughPhysicalHeat.DIR + ~TroughPhysicalHeat.IGNORE_ATTRS + ~TroughPhysicalHeat.MODULE + ~TroughPhysicalHeat.OUTAGE_CONFIG_KEY + ~TroughPhysicalHeat.OUTAGE_SEED_CONFIG_KEY + ~TroughPhysicalHeat.PYSAM_WEATHER_TAG + ~TroughPhysicalHeat.WF_META_DROP_COLS + ~TroughPhysicalHeat.attr_dict + ~TroughPhysicalHeat.has_timezone + ~TroughPhysicalHeat.input_list + ~TroughPhysicalHeat.meta + ~TroughPhysicalHeat.module + ~TroughPhysicalHeat.outage_seed + ~TroughPhysicalHeat.pysam + ~TroughPhysicalHeat.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.WindPower.rst.txt b/_sources/_autosummary/reV.SAM.generation.WindPower.rst.txt new file mode 100644 index 000000000..842eae751 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.WindPower.rst.txt @@ -0,0 +1,67 @@ +reV.SAM.generation.WindPower +============================ + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: WindPower + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~WindPower.add_power_curve_losses + ~WindPower.add_scheduled_losses + ~WindPower.annual_energy + ~WindPower.assign_inputs + ~WindPower.cf_mean + ~WindPower.cf_profile + ~WindPower.check_resource_data + ~WindPower.collect_outputs + ~WindPower.default + ~WindPower.drop_leap + ~WindPower.energy_yield + ~WindPower.ensure_res_len + ~WindPower.execute + ~WindPower.gen_profile + ~WindPower.get_sam_res + ~WindPower.get_time_interval + ~WindPower.make_datetime + ~WindPower.outputs_to_utc_arr + ~WindPower.reV_run + ~WindPower.run + ~WindPower.run_gen_and_econ + ~WindPower.set_resource_data + ~WindPower.tz_elev_check + ~WindPower.wind_resource_from_input + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~WindPower.DIR + ~WindPower.IGNORE_ATTRS + ~WindPower.MODULE + ~WindPower.OUTAGE_CONFIG_KEY + ~WindPower.OUTAGE_SEED_CONFIG_KEY + ~WindPower.POWER_CURVE_CONFIG_KEY + ~WindPower.attr_dict + ~WindPower.has_timezone + ~WindPower.input_list + ~WindPower.input_power_curve + ~WindPower.meta + ~WindPower.module + ~WindPower.outage_seed + ~WindPower.pysam + ~WindPower.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.WindPowerPD.rst.txt b/_sources/_autosummary/reV.SAM.generation.WindPowerPD.rst.txt new file mode 100644 index 000000000..6a1e18360 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.WindPowerPD.rst.txt @@ -0,0 +1,67 @@ +reV.SAM.generation.WindPowerPD +============================== + +.. currentmodule:: reV.SAM.generation + +.. autoclass:: WindPowerPD + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~WindPowerPD.add_power_curve_losses + ~WindPowerPD.add_scheduled_losses + ~WindPowerPD.annual_energy + ~WindPowerPD.assign_inputs + ~WindPowerPD.cf_mean + ~WindPowerPD.cf_profile + ~WindPowerPD.check_resource_data + ~WindPowerPD.collect_outputs + ~WindPowerPD.default + ~WindPowerPD.drop_leap + ~WindPowerPD.energy_yield + ~WindPowerPD.ensure_res_len + ~WindPowerPD.execute + ~WindPowerPD.gen_profile + ~WindPowerPD.get_sam_res + ~WindPowerPD.get_time_interval + ~WindPowerPD.make_datetime + ~WindPowerPD.outputs_to_utc_arr + ~WindPowerPD.reV_run + ~WindPowerPD.run + ~WindPowerPD.run_gen_and_econ + ~WindPowerPD.set_resource_data + ~WindPowerPD.tz_elev_check + ~WindPowerPD.wind_resource_from_input + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~WindPowerPD.DIR + ~WindPowerPD.IGNORE_ATTRS + ~WindPowerPD.MODULE + ~WindPowerPD.OUTAGE_CONFIG_KEY + ~WindPowerPD.OUTAGE_SEED_CONFIG_KEY + ~WindPowerPD.POWER_CURVE_CONFIG_KEY + ~WindPowerPD.attr_dict + ~WindPowerPD.has_timezone + ~WindPowerPD.input_list + ~WindPowerPD.input_power_curve + ~WindPowerPD.meta + ~WindPowerPD.module + ~WindPowerPD.outage_seed + ~WindPowerPD.pysam + ~WindPowerPD.site + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.generation.rst.txt b/_sources/_autosummary/reV.SAM.generation.rst.txt new file mode 100644 index 000000000..405467c71 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.generation.rst.txt @@ -0,0 +1,47 @@ +reV.SAM.generation +================== + +.. automodule:: reV.SAM.generation + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + AbstractSamGeneration + AbstractSamGenerationFromWeatherFile + AbstractSamPv + AbstractSamSolar + AbstractSamWind + Geothermal + LinearDirectSteam + MhkWave + PvSamv1 + PvWattsv5 + PvWattsv7 + PvWattsv8 + SolarWaterHeat + TcsMoltenSalt + TroughPhysicalHeat + WindPower + WindPowerPD + + + + + + + + + diff --git a/_sources/_autosummary/reV.SAM.rst.txt b/_sources/_autosummary/reV.SAM.rst.txt new file mode 100644 index 000000000..b422c2f3f --- /dev/null +++ b/_sources/_autosummary/reV.SAM.rst.txt @@ -0,0 +1,35 @@ +reV.SAM +======= + +.. automodule:: reV.SAM + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.SAM.SAM + reV.SAM.defaults + reV.SAM.econ + reV.SAM.generation + reV.SAM.version_checker + reV.SAM.windbos + diff --git a/_sources/_autosummary/reV.SAM.version_checker.PySamVersionChecker.rst.txt b/_sources/_autosummary/reV.SAM.version_checker.PySamVersionChecker.rst.txt new file mode 100644 index 000000000..c12b10ba6 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.version_checker.PySamVersionChecker.rst.txt @@ -0,0 +1,32 @@ +reV.SAM.version\_checker.PySamVersionChecker +============================================ + +.. currentmodule:: reV.SAM.version_checker + +.. autoclass:: PySamVersionChecker + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PySamVersionChecker.run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PySamVersionChecker.V2_CORRECTION_KEYS + ~PySamVersionChecker.WIND + ~PySamVersionChecker.pysam_version + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.version_checker.rst.txt b/_sources/_autosummary/reV.SAM.version_checker.rst.txt new file mode 100644 index 000000000..eaa30af2b --- /dev/null +++ b/_sources/_autosummary/reV.SAM.version_checker.rst.txt @@ -0,0 +1,31 @@ +reV.SAM.version\_checker +======================== + +.. automodule:: reV.SAM.version_checker + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + PySamVersionChecker + + + + + + + + + diff --git a/_sources/_autosummary/reV.SAM.windbos.WindBos.rst.txt b/_sources/_autosummary/reV.SAM.windbos.WindBos.rst.txt new file mode 100644 index 000000000..a05738dea --- /dev/null +++ b/_sources/_autosummary/reV.SAM.windbos.WindBos.rst.txt @@ -0,0 +1,42 @@ +reV.SAM.windbos.WindBos +======================= + +.. currentmodule:: reV.SAM.windbos + +.. autoclass:: WindBos + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~WindBos.reV_run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~WindBos.KEYS + ~WindBos.MODULE + ~WindBos.bos_cost + ~WindBos.hub_height + ~WindBos.machine_rating + ~WindBos.number_of_turbines + ~WindBos.output + ~WindBos.rotor_diameter + ~WindBos.sales_tax_cost + ~WindBos.sales_tax_mult + ~WindBos.total_installed_cost + ~WindBos.turbine_capital_cost + ~WindBos.turbine_cost + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.SAM.windbos.rst.txt b/_sources/_autosummary/reV.SAM.windbos.rst.txt new file mode 100644 index 000000000..359386633 --- /dev/null +++ b/_sources/_autosummary/reV.SAM.windbos.rst.txt @@ -0,0 +1,31 @@ +reV.SAM.windbos +=============== + +.. automodule:: reV.SAM.windbos + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + WindBos + + + + + + + + + diff --git a/_sources/_autosummary/reV.bespoke.bespoke.BespokeMultiPlantData.rst.txt b/_sources/_autosummary/reV.bespoke.bespoke.BespokeMultiPlantData.rst.txt new file mode 100644 index 000000000..f10ed0b82 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.bespoke.BespokeMultiPlantData.rst.txt @@ -0,0 +1,24 @@ +reV.bespoke.bespoke.BespokeMultiPlantData +========================================= + +.. currentmodule:: reV.bespoke.bespoke + +.. autoclass:: BespokeMultiPlantData + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BespokeMultiPlantData.get_preloaded_data_for_gid + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.bespoke.BespokeSinglePlant.rst.txt b/_sources/_autosummary/reV.bespoke.bespoke.BespokeSinglePlant.rst.txt new file mode 100644 index 000000000..b4ad653d3 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.bespoke.BespokeSinglePlant.rst.txt @@ -0,0 +1,58 @@ +reV.bespoke.bespoke.BespokeSinglePlant +====================================== + +.. currentmodule:: reV.bespoke.bespoke + +.. autoclass:: BespokeSinglePlant + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BespokeSinglePlant.agg_data_layers + ~BespokeSinglePlant.check_dependencies + ~BespokeSinglePlant.close + ~BespokeSinglePlant.get_lcoe_kwargs + ~BespokeSinglePlant.get_weighted_res_dir + ~BespokeSinglePlant.get_weighted_res_ts + ~BespokeSinglePlant.get_wind_handler + ~BespokeSinglePlant.initialize_wind_plant_ts + ~BespokeSinglePlant.recalc_lcoe + ~BespokeSinglePlant.run + ~BespokeSinglePlant.run_plant_optimization + ~BespokeSinglePlant.run_wind_plant_ts + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BespokeSinglePlant.DEPENDENCIES + ~BespokeSinglePlant.OUT_ATTRS + ~BespokeSinglePlant.annual_time_indexes + ~BespokeSinglePlant.gid + ~BespokeSinglePlant.hub_height + ~BespokeSinglePlant.include_mask + ~BespokeSinglePlant.meta + ~BespokeSinglePlant.original_sam_sys_inputs + ~BespokeSinglePlant.outputs + ~BespokeSinglePlant.pixel_side_length + ~BespokeSinglePlant.plant_optimizer + ~BespokeSinglePlant.res_df + ~BespokeSinglePlant.sam_sys_inputs + ~BespokeSinglePlant.sc_point + ~BespokeSinglePlant.wind_dist + ~BespokeSinglePlant.wind_plant_pd + ~BespokeSinglePlant.wind_plant_ts + ~BespokeSinglePlant.years + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.bespoke.BespokeSinglePlantData.rst.txt b/_sources/_autosummary/reV.bespoke.bespoke.BespokeSinglePlantData.rst.txt new file mode 100644 index 000000000..0ff3fc97c --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.bespoke.BespokeSinglePlantData.rst.txt @@ -0,0 +1,23 @@ +reV.bespoke.bespoke.BespokeSinglePlantData +========================================== + +.. currentmodule:: reV.bespoke.bespoke + +.. autoclass:: BespokeSinglePlantData + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.bespoke.BespokeWindPlants.rst.txt b/_sources/_autosummary/reV.bespoke.bespoke.BespokeWindPlants.rst.txt new file mode 100644 index 000000000..5aad04d6c --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.bespoke.BespokeWindPlants.rst.txt @@ -0,0 +1,39 @@ +reV.bespoke.bespoke.BespokeWindPlants +===================================== + +.. currentmodule:: reV.bespoke.bespoke + +.. autoclass:: BespokeWindPlants + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BespokeWindPlants.run + ~BespokeWindPlants.run_parallel + ~BespokeWindPlants.run_serial + ~BespokeWindPlants.sam_sys_inputs_with_site_data + ~BespokeWindPlants.save_outputs + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BespokeWindPlants.completed_gids + ~BespokeWindPlants.gids + ~BespokeWindPlants.meta + ~BespokeWindPlants.outputs + ~BespokeWindPlants.shape + ~BespokeWindPlants.slice_lookup + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.bespoke.rst.txt b/_sources/_autosummary/reV.bespoke.bespoke.rst.txt new file mode 100644 index 000000000..06b22e6e1 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.bespoke.rst.txt @@ -0,0 +1,34 @@ +reV.bespoke.bespoke +=================== + +.. automodule:: reV.bespoke.bespoke + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BespokeMultiPlantData + BespokeSinglePlant + BespokeSinglePlantData + BespokeWindPlants + + + + + + + + + diff --git a/_sources/_autosummary/reV.bespoke.cli_bespoke.rst.txt b/_sources/_autosummary/reV.bespoke.cli_bespoke.rst.txt new file mode 100644 index 000000000..1513a33ea --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.cli_bespoke.rst.txt @@ -0,0 +1,23 @@ +reV.bespoke.cli\_bespoke +======================== + +.. automodule:: reV.bespoke.cli_bespoke + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.bespoke.gradient_free.GeneticAlgorithm.rst.txt b/_sources/_autosummary/reV.bespoke.gradient_free.GeneticAlgorithm.rst.txt new file mode 100644 index 000000000..5f4fde1e5 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.gradient_free.GeneticAlgorithm.rst.txt @@ -0,0 +1,31 @@ +reV.bespoke.gradient\_free.GeneticAlgorithm +=========================================== + +.. currentmodule:: reV.bespoke.gradient_free + +.. autoclass:: GeneticAlgorithm + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~GeneticAlgorithm.chromosome_2_variables + ~GeneticAlgorithm.crossover + ~GeneticAlgorithm.initialize_bits + ~GeneticAlgorithm.initialize_design_variables + ~GeneticAlgorithm.initialize_fitness + ~GeneticAlgorithm.initialize_population + ~GeneticAlgorithm.mutate + ~GeneticAlgorithm.optimize_ga + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.gradient_free.rst.txt b/_sources/_autosummary/reV.bespoke.gradient_free.rst.txt new file mode 100644 index 000000000..7264fb1a2 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.gradient_free.rst.txt @@ -0,0 +1,31 @@ +reV.bespoke.gradient\_free +========================== + +.. automodule:: reV.bespoke.gradient_free + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + GeneticAlgorithm + + + + + + + + + diff --git a/_sources/_autosummary/reV.bespoke.pack_turbs.PackTurbines.rst.txt b/_sources/_autosummary/reV.bespoke.pack_turbs.PackTurbines.rst.txt new file mode 100644 index 000000000..a4cdd7753 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.pack_turbs.PackTurbines.rst.txt @@ -0,0 +1,25 @@ +reV.bespoke.pack\_turbs.PackTurbines +==================================== + +.. currentmodule:: reV.bespoke.pack_turbs + +.. autoclass:: PackTurbines + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PackTurbines.clear + ~PackTurbines.pack_turbines_poly + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.pack_turbs.rst.txt b/_sources/_autosummary/reV.bespoke.pack_turbs.rst.txt new file mode 100644 index 000000000..b131179ef --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.pack_turbs.rst.txt @@ -0,0 +1,38 @@ +reV.bespoke.pack\_turbs +======================= + +.. automodule:: reV.bespoke.pack_turbs + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + smallest_area_with_tiebreakers + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + PackTurbines + + + + + + + + + diff --git a/_sources/_autosummary/reV.bespoke.pack_turbs.smallest_area_with_tiebreakers.rst.txt b/_sources/_autosummary/reV.bespoke.pack_turbs.smallest_area_with_tiebreakers.rst.txt new file mode 100644 index 000000000..ffea71590 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.pack_turbs.smallest_area_with_tiebreakers.rst.txt @@ -0,0 +1,6 @@ +reV.bespoke.pack\_turbs.smallest\_area\_with\_tiebreakers +========================================================= + +.. currentmodule:: reV.bespoke.pack_turbs + +.. autofunction:: smallest_area_with_tiebreakers \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.place_turbines.PlaceTurbines.rst.txt b/_sources/_autosummary/reV.bespoke.place_turbines.PlaceTurbines.rst.txt new file mode 100644 index 000000000..38060a457 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.place_turbines.PlaceTurbines.rst.txt @@ -0,0 +1,51 @@ +reV.bespoke.place\_turbines.PlaceTurbines +========================================= + +.. currentmodule:: reV.bespoke.place_turbines + +.. autoclass:: PlaceTurbines + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PlaceTurbines.capital_cost_per_kw + ~PlaceTurbines.define_exclusions + ~PlaceTurbines.initialize_packing + ~PlaceTurbines.optimization_objective + ~PlaceTurbines.optimize + ~PlaceTurbines.place_turbines + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PlaceTurbines.aep + ~PlaceTurbines.area + ~PlaceTurbines.capacity + ~PlaceTurbines.capacity_density + ~PlaceTurbines.capital_cost + ~PlaceTurbines.convex_hull + ~PlaceTurbines.convex_hull_area + ~PlaceTurbines.convex_hull_capacity_density + ~PlaceTurbines.fixed_charge_rate + ~PlaceTurbines.fixed_operating_cost + ~PlaceTurbines.full_cell_area + ~PlaceTurbines.full_cell_capacity_density + ~PlaceTurbines.nturbs + ~PlaceTurbines.objective + ~PlaceTurbines.turbine_x + ~PlaceTurbines.turbine_y + ~PlaceTurbines.variable_operating_cost + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.place_turbines.none_until_optimized.rst.txt b/_sources/_autosummary/reV.bespoke.place_turbines.none_until_optimized.rst.txt new file mode 100644 index 000000000..a1aa5d135 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.place_turbines.none_until_optimized.rst.txt @@ -0,0 +1,6 @@ +reV.bespoke.place\_turbines.none\_until\_optimized +================================================== + +.. currentmodule:: reV.bespoke.place_turbines + +.. autofunction:: none_until_optimized \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.place_turbines.rst.txt b/_sources/_autosummary/reV.bespoke.place_turbines.rst.txt new file mode 100644 index 000000000..0f9f54a13 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.place_turbines.rst.txt @@ -0,0 +1,38 @@ +reV.bespoke.place\_turbines +=========================== + +.. automodule:: reV.bespoke.place_turbines + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + none_until_optimized + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + PlaceTurbines + + + + + + + + + diff --git a/_sources/_autosummary/reV.bespoke.plotting_functions.get_xy.rst.txt b/_sources/_autosummary/reV.bespoke.plotting_functions.get_xy.rst.txt new file mode 100644 index 000000000..ffb3a9730 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.plotting_functions.get_xy.rst.txt @@ -0,0 +1,6 @@ +reV.bespoke.plotting\_functions.get\_xy +======================================= + +.. currentmodule:: reV.bespoke.plotting_functions + +.. autofunction:: get_xy \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.plotting_functions.plot_poly.rst.txt b/_sources/_autosummary/reV.bespoke.plotting_functions.plot_poly.rst.txt new file mode 100644 index 000000000..dbc48e8c2 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.plotting_functions.plot_poly.rst.txt @@ -0,0 +1,6 @@ +reV.bespoke.plotting\_functions.plot\_poly +========================================== + +.. currentmodule:: reV.bespoke.plotting_functions + +.. autofunction:: plot_poly \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.plotting_functions.plot_turbines.rst.txt b/_sources/_autosummary/reV.bespoke.plotting_functions.plot_turbines.rst.txt new file mode 100644 index 000000000..1ec1e7585 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.plotting_functions.plot_turbines.rst.txt @@ -0,0 +1,6 @@ +reV.bespoke.plotting\_functions.plot\_turbines +============================================== + +.. currentmodule:: reV.bespoke.plotting_functions + +.. autofunction:: plot_turbines \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.plotting_functions.plot_windrose.rst.txt b/_sources/_autosummary/reV.bespoke.plotting_functions.plot_windrose.rst.txt new file mode 100644 index 000000000..a3469a6d5 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.plotting_functions.plot_windrose.rst.txt @@ -0,0 +1,6 @@ +reV.bespoke.plotting\_functions.plot\_windrose +============================================== + +.. currentmodule:: reV.bespoke.plotting_functions + +.. autofunction:: plot_windrose \ No newline at end of file diff --git a/_sources/_autosummary/reV.bespoke.plotting_functions.rst.txt b/_sources/_autosummary/reV.bespoke.plotting_functions.rst.txt new file mode 100644 index 000000000..6e571f644 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.plotting_functions.rst.txt @@ -0,0 +1,33 @@ +reV.bespoke.plotting\_functions +=============================== + +.. automodule:: reV.bespoke.plotting_functions + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + get_xy + plot_poly + plot_turbines + plot_windrose + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.bespoke.rst.txt b/_sources/_autosummary/reV.bespoke.rst.txt new file mode 100644 index 000000000..960062124 --- /dev/null +++ b/_sources/_autosummary/reV.bespoke.rst.txt @@ -0,0 +1,35 @@ +reV.bespoke +=========== + +.. automodule:: reV.bespoke + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.bespoke.bespoke + reV.bespoke.cli_bespoke + reV.bespoke.gradient_free + reV.bespoke.pack_turbs + reV.bespoke.place_turbines + reV.bespoke.plotting_functions + diff --git a/_sources/_autosummary/reV.cli.rst.txt b/_sources/_autosummary/reV.cli.rst.txt new file mode 100644 index 000000000..aadb169e2 --- /dev/null +++ b/_sources/_autosummary/reV.cli.rst.txt @@ -0,0 +1,23 @@ +reV.cli +======= + +.. automodule:: reV.cli + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.config.base_analysis_config.AnalysisConfig.rst.txt b/_sources/_autosummary/reV.config.base_analysis_config.AnalysisConfig.rst.txt new file mode 100644 index 000000000..8894ea139 --- /dev/null +++ b/_sources/_autosummary/reV.config.base_analysis_config.AnalysisConfig.rst.txt @@ -0,0 +1,54 @@ +reV.config.base\_analysis\_config.AnalysisConfig +================================================ + +.. currentmodule:: reV.config.base_analysis_config + +.. autoclass:: AnalysisConfig + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AnalysisConfig.check_files + ~AnalysisConfig.check_overwrite_keys + ~AnalysisConfig.clear + ~AnalysisConfig.copy + ~AnalysisConfig.fromkeys + ~AnalysisConfig.get + ~AnalysisConfig.items + ~AnalysisConfig.keys + ~AnalysisConfig.pop + ~AnalysisConfig.popitem + ~AnalysisConfig.resolve_path + ~AnalysisConfig.set_self_dict + ~AnalysisConfig.setdefault + ~AnalysisConfig.str_replace_and_resolve + ~AnalysisConfig.update + ~AnalysisConfig.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AnalysisConfig.NAME + ~AnalysisConfig.REQUIREMENTS + ~AnalysisConfig.STR_REP + ~AnalysisConfig.analysis_years + ~AnalysisConfig.config_dir + ~AnalysisConfig.config_keys + ~AnalysisConfig.execution_control + ~AnalysisConfig.log_directory + ~AnalysisConfig.log_level + ~AnalysisConfig.name + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.base_analysis_config.rst.txt b/_sources/_autosummary/reV.config.base_analysis_config.rst.txt new file mode 100644 index 000000000..7a9158f2b --- /dev/null +++ b/_sources/_autosummary/reV.config.base_analysis_config.rst.txt @@ -0,0 +1,31 @@ +reV.config.base\_analysis\_config +================================= + +.. automodule:: reV.config.base_analysis_config + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + AnalysisConfig + + + + + + + + + diff --git a/_sources/_autosummary/reV.config.base_config.BaseConfig.rst.txt b/_sources/_autosummary/reV.config.base_config.BaseConfig.rst.txt new file mode 100644 index 000000000..cc1b9973e --- /dev/null +++ b/_sources/_autosummary/reV.config.base_config.BaseConfig.rst.txt @@ -0,0 +1,50 @@ +reV.config.base\_config.BaseConfig +================================== + +.. currentmodule:: reV.config.base_config + +.. autoclass:: BaseConfig + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseConfig.check_files + ~BaseConfig.check_overwrite_keys + ~BaseConfig.clear + ~BaseConfig.copy + ~BaseConfig.fromkeys + ~BaseConfig.get + ~BaseConfig.items + ~BaseConfig.keys + ~BaseConfig.pop + ~BaseConfig.popitem + ~BaseConfig.resolve_path + ~BaseConfig.set_self_dict + ~BaseConfig.setdefault + ~BaseConfig.str_replace_and_resolve + ~BaseConfig.update + ~BaseConfig.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseConfig.REQUIREMENTS + ~BaseConfig.STR_REP + ~BaseConfig.config_dir + ~BaseConfig.config_keys + ~BaseConfig.log_level + ~BaseConfig.name + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.base_config.rst.txt b/_sources/_autosummary/reV.config.base_config.rst.txt new file mode 100644 index 000000000..0860b68ca --- /dev/null +++ b/_sources/_autosummary/reV.config.base_config.rst.txt @@ -0,0 +1,31 @@ +reV.config.base\_config +======================= + +.. automodule:: reV.config.base_config + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseConfig + + + + + + + + + diff --git a/_sources/_autosummary/reV.config.cli_project_points.rst.txt b/_sources/_autosummary/reV.config.cli_project_points.rst.txt new file mode 100644 index 000000000..651728126 --- /dev/null +++ b/_sources/_autosummary/reV.config.cli_project_points.rst.txt @@ -0,0 +1,23 @@ +reV.config.cli\_project\_points +=============================== + +.. automodule:: reV.config.cli_project_points + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.config.curtailment.Curtailment.rst.txt b/_sources/_autosummary/reV.config.curtailment.Curtailment.rst.txt new file mode 100644 index 000000000..9d64d08fb --- /dev/null +++ b/_sources/_autosummary/reV.config.curtailment.Curtailment.rst.txt @@ -0,0 +1,59 @@ +reV.config.curtailment.Curtailment +================================== + +.. currentmodule:: reV.config.curtailment + +.. autoclass:: Curtailment + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Curtailment.check_files + ~Curtailment.check_overwrite_keys + ~Curtailment.clear + ~Curtailment.copy + ~Curtailment.fromkeys + ~Curtailment.get + ~Curtailment.items + ~Curtailment.keys + ~Curtailment.pop + ~Curtailment.popitem + ~Curtailment.resolve_path + ~Curtailment.set_self_dict + ~Curtailment.setdefault + ~Curtailment.str_replace_and_resolve + ~Curtailment.update + ~Curtailment.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Curtailment.REQUIREMENTS + ~Curtailment.STR_REP + ~Curtailment.config_dir + ~Curtailment.config_keys + ~Curtailment.date_range + ~Curtailment.dawn_dusk + ~Curtailment.equation + ~Curtailment.log_level + ~Curtailment.months + ~Curtailment.name + ~Curtailment.precipitation + ~Curtailment.probability + ~Curtailment.random_seed + ~Curtailment.temperature + ~Curtailment.wind_speed + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.curtailment.rst.txt b/_sources/_autosummary/reV.config.curtailment.rst.txt new file mode 100644 index 000000000..3f3135ba8 --- /dev/null +++ b/_sources/_autosummary/reV.config.curtailment.rst.txt @@ -0,0 +1,31 @@ +reV.config.curtailment +====================== + +.. automodule:: reV.config.curtailment + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Curtailment + + + + + + + + + diff --git a/_sources/_autosummary/reV.config.execution.BaseExecutionConfig.rst.txt b/_sources/_autosummary/reV.config.execution.BaseExecutionConfig.rst.txt new file mode 100644 index 000000000..5a91a026e --- /dev/null +++ b/_sources/_autosummary/reV.config.execution.BaseExecutionConfig.rst.txt @@ -0,0 +1,56 @@ +reV.config.execution.BaseExecutionConfig +======================================== + +.. currentmodule:: reV.config.execution + +.. autoclass:: BaseExecutionConfig + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseExecutionConfig.check_files + ~BaseExecutionConfig.check_overwrite_keys + ~BaseExecutionConfig.clear + ~BaseExecutionConfig.copy + ~BaseExecutionConfig.fromkeys + ~BaseExecutionConfig.get + ~BaseExecutionConfig.items + ~BaseExecutionConfig.keys + ~BaseExecutionConfig.pop + ~BaseExecutionConfig.popitem + ~BaseExecutionConfig.resolve_path + ~BaseExecutionConfig.set_self_dict + ~BaseExecutionConfig.setdefault + ~BaseExecutionConfig.str_replace_and_resolve + ~BaseExecutionConfig.update + ~BaseExecutionConfig.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseExecutionConfig.REQUIREMENTS + ~BaseExecutionConfig.STR_REP + ~BaseExecutionConfig.config_dir + ~BaseExecutionConfig.config_keys + ~BaseExecutionConfig.log_level + ~BaseExecutionConfig.max_workers + ~BaseExecutionConfig.memory_utilization_limit + ~BaseExecutionConfig.name + ~BaseExecutionConfig.nodes + ~BaseExecutionConfig.option + ~BaseExecutionConfig.sh_script + ~BaseExecutionConfig.sites_per_worker + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.execution.HPCConfig.rst.txt b/_sources/_autosummary/reV.config.execution.HPCConfig.rst.txt new file mode 100644 index 000000000..25c743ffc --- /dev/null +++ b/_sources/_autosummary/reV.config.execution.HPCConfig.rst.txt @@ -0,0 +1,60 @@ +reV.config.execution.HPCConfig +============================== + +.. currentmodule:: reV.config.execution + +.. autoclass:: HPCConfig + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~HPCConfig.check_files + ~HPCConfig.check_overwrite_keys + ~HPCConfig.clear + ~HPCConfig.copy + ~HPCConfig.fromkeys + ~HPCConfig.get + ~HPCConfig.items + ~HPCConfig.keys + ~HPCConfig.pop + ~HPCConfig.popitem + ~HPCConfig.resolve_path + ~HPCConfig.set_self_dict + ~HPCConfig.setdefault + ~HPCConfig.str_replace_and_resolve + ~HPCConfig.update + ~HPCConfig.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~HPCConfig.REQUIREMENTS + ~HPCConfig.STR_REP + ~HPCConfig.allocation + ~HPCConfig.conda_env + ~HPCConfig.config_dir + ~HPCConfig.config_keys + ~HPCConfig.feature + ~HPCConfig.log_level + ~HPCConfig.max_workers + ~HPCConfig.memory_utilization_limit + ~HPCConfig.module + ~HPCConfig.name + ~HPCConfig.nodes + ~HPCConfig.option + ~HPCConfig.sh_script + ~HPCConfig.sites_per_worker + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.execution.SlurmConfig.rst.txt b/_sources/_autosummary/reV.config.execution.SlurmConfig.rst.txt new file mode 100644 index 000000000..c28a8a898 --- /dev/null +++ b/_sources/_autosummary/reV.config.execution.SlurmConfig.rst.txt @@ -0,0 +1,62 @@ +reV.config.execution.SlurmConfig +================================ + +.. currentmodule:: reV.config.execution + +.. autoclass:: SlurmConfig + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SlurmConfig.check_files + ~SlurmConfig.check_overwrite_keys + ~SlurmConfig.clear + ~SlurmConfig.copy + ~SlurmConfig.fromkeys + ~SlurmConfig.get + ~SlurmConfig.items + ~SlurmConfig.keys + ~SlurmConfig.pop + ~SlurmConfig.popitem + ~SlurmConfig.resolve_path + ~SlurmConfig.set_self_dict + ~SlurmConfig.setdefault + ~SlurmConfig.str_replace_and_resolve + ~SlurmConfig.update + ~SlurmConfig.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SlurmConfig.REQUIREMENTS + ~SlurmConfig.STR_REP + ~SlurmConfig.allocation + ~SlurmConfig.conda_env + ~SlurmConfig.config_dir + ~SlurmConfig.config_keys + ~SlurmConfig.feature + ~SlurmConfig.log_level + ~SlurmConfig.max_workers + ~SlurmConfig.memory + ~SlurmConfig.memory_utilization_limit + ~SlurmConfig.module + ~SlurmConfig.name + ~SlurmConfig.nodes + ~SlurmConfig.option + ~SlurmConfig.sh_script + ~SlurmConfig.sites_per_worker + ~SlurmConfig.walltime + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.execution.rst.txt b/_sources/_autosummary/reV.config.execution.rst.txt new file mode 100644 index 000000000..24a88b876 --- /dev/null +++ b/_sources/_autosummary/reV.config.execution.rst.txt @@ -0,0 +1,33 @@ +reV.config.execution +==================== + +.. automodule:: reV.config.execution + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseExecutionConfig + HPCConfig + SlurmConfig + + + + + + + + + diff --git a/_sources/_autosummary/reV.config.output_request.OutputRequest.rst.txt b/_sources/_autosummary/reV.config.output_request.OutputRequest.rst.txt new file mode 100644 index 000000000..5120c9329 --- /dev/null +++ b/_sources/_autosummary/reV.config.output_request.OutputRequest.rst.txt @@ -0,0 +1,40 @@ +reV.config.output\_request.OutputRequest +======================================== + +.. currentmodule:: reV.config.output_request + +.. autoclass:: OutputRequest + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~OutputRequest.append + ~OutputRequest.clear + ~OutputRequest.copy + ~OutputRequest.count + ~OutputRequest.extend + ~OutputRequest.index + ~OutputRequest.insert + ~OutputRequest.pop + ~OutputRequest.remove + ~OutputRequest.reverse + ~OutputRequest.sort + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~OutputRequest.CORRECTIONS + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.output_request.SAMOutputRequest.rst.txt b/_sources/_autosummary/reV.config.output_request.SAMOutputRequest.rst.txt new file mode 100644 index 000000000..91a33ee27 --- /dev/null +++ b/_sources/_autosummary/reV.config.output_request.SAMOutputRequest.rst.txt @@ -0,0 +1,40 @@ +reV.config.output\_request.SAMOutputRequest +=========================================== + +.. currentmodule:: reV.config.output_request + +.. autoclass:: SAMOutputRequest + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SAMOutputRequest.append + ~SAMOutputRequest.clear + ~SAMOutputRequest.copy + ~SAMOutputRequest.count + ~SAMOutputRequest.extend + ~SAMOutputRequest.index + ~SAMOutputRequest.insert + ~SAMOutputRequest.pop + ~SAMOutputRequest.remove + ~SAMOutputRequest.reverse + ~SAMOutputRequest.sort + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SAMOutputRequest.CORRECTIONS + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.output_request.rst.txt b/_sources/_autosummary/reV.config.output_request.rst.txt new file mode 100644 index 000000000..63a4f4a43 --- /dev/null +++ b/_sources/_autosummary/reV.config.output_request.rst.txt @@ -0,0 +1,32 @@ +reV.config.output\_request +========================== + +.. automodule:: reV.config.output_request + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + OutputRequest + SAMOutputRequest + + + + + + + + + diff --git a/_sources/_autosummary/reV.config.project_points.PointsControl.rst.txt b/_sources/_autosummary/reV.config.project_points.PointsControl.rst.txt new file mode 100644 index 000000000..dc986587d --- /dev/null +++ b/_sources/_autosummary/reV.config.project_points.PointsControl.rst.txt @@ -0,0 +1,34 @@ +reV.config.project\_points.PointsControl +======================================== + +.. currentmodule:: reV.config.project_points + +.. autoclass:: PointsControl + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PointsControl.split + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PointsControl.N + ~PointsControl.project_points + ~PointsControl.sites + ~PointsControl.sites_per_split + ~PointsControl.split_range + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.project_points.ProjectPoints.rst.txt b/_sources/_autosummary/reV.config.project_points.ProjectPoints.rst.txt new file mode 100644 index 000000000..4f2ee4ef8 --- /dev/null +++ b/_sources/_autosummary/reV.config.project_points.ProjectPoints.rst.txt @@ -0,0 +1,46 @@ +reV.config.project\_points.ProjectPoints +======================================== + +.. currentmodule:: reV.config.project_points + +.. autoclass:: ProjectPoints + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ProjectPoints.get_sites_from_config + ~ProjectPoints.index + ~ProjectPoints.join_df + ~ProjectPoints.lat_lon_coords + ~ProjectPoints.regions + ~ProjectPoints.split + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ProjectPoints.all_sam_input_keys + ~ProjectPoints.curtailment + ~ProjectPoints.d + ~ProjectPoints.df + ~ProjectPoints.gids + ~ProjectPoints.h + ~ProjectPoints.sam_config_ids + ~ProjectPoints.sam_config_obj + ~ProjectPoints.sam_inputs + ~ProjectPoints.sites + ~ProjectPoints.sites_as_slice + ~ProjectPoints.tech + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.project_points.rst.txt b/_sources/_autosummary/reV.config.project_points.rst.txt new file mode 100644 index 000000000..ddebda646 --- /dev/null +++ b/_sources/_autosummary/reV.config.project_points.rst.txt @@ -0,0 +1,32 @@ +reV.config.project\_points +========================== + +.. automodule:: reV.config.project_points + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + PointsControl + ProjectPoints + + + + + + + + + diff --git a/_sources/_autosummary/reV.config.rst.txt b/_sources/_autosummary/reV.config.rst.txt new file mode 100644 index 000000000..e2f225538 --- /dev/null +++ b/_sources/_autosummary/reV.config.rst.txt @@ -0,0 +1,37 @@ +reV.config +========== + +.. automodule:: reV.config + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.config.base_analysis_config + reV.config.base_config + reV.config.cli_project_points + reV.config.curtailment + reV.config.execution + reV.config.output_request + reV.config.project_points + reV.config.sam_config + diff --git a/_sources/_autosummary/reV.config.sam_config.SAMConfig.rst.txt b/_sources/_autosummary/reV.config.sam_config.SAMConfig.rst.txt new file mode 100644 index 000000000..ad1c01cf0 --- /dev/null +++ b/_sources/_autosummary/reV.config.sam_config.SAMConfig.rst.txt @@ -0,0 +1,56 @@ +reV.config.sam\_config.SAMConfig +================================ + +.. currentmodule:: reV.config.sam_config + +.. autoclass:: SAMConfig + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SAMConfig.check_files + ~SAMConfig.check_overwrite_keys + ~SAMConfig.clear + ~SAMConfig.copy + ~SAMConfig.fromkeys + ~SAMConfig.get + ~SAMConfig.items + ~SAMConfig.keys + ~SAMConfig.pop + ~SAMConfig.popitem + ~SAMConfig.resolve_path + ~SAMConfig.set_self_dict + ~SAMConfig.setdefault + ~SAMConfig.str_replace_and_resolve + ~SAMConfig.update + ~SAMConfig.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SAMConfig.REQUIREMENTS + ~SAMConfig.STR_REP + ~SAMConfig.bifacial + ~SAMConfig.clearsky + ~SAMConfig.config_dir + ~SAMConfig.config_keys + ~SAMConfig.downscale + ~SAMConfig.icing + ~SAMConfig.inputs + ~SAMConfig.log_level + ~SAMConfig.name + ~SAMConfig.time_index_step + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.sam_config.SAMInputsChecker.rst.txt b/_sources/_autosummary/reV.config.sam_config.SAMInputsChecker.rst.txt new file mode 100644 index 000000000..177a3c4d4 --- /dev/null +++ b/_sources/_autosummary/reV.config.sam_config.SAMInputsChecker.rst.txt @@ -0,0 +1,31 @@ +reV.config.sam\_config.SAMInputsChecker +======================================= + +.. currentmodule:: reV.config.sam_config + +.. autoclass:: SAMInputsChecker + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SAMInputsChecker.check + ~SAMInputsChecker.check_pv + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SAMInputsChecker.KEYS_PV + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.config.sam_config.rst.txt b/_sources/_autosummary/reV.config.sam_config.rst.txt new file mode 100644 index 000000000..3ca2a2f3b --- /dev/null +++ b/_sources/_autosummary/reV.config.sam_config.rst.txt @@ -0,0 +1,32 @@ +reV.config.sam\_config +====================== + +.. automodule:: reV.config.sam_config + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SAMConfig + SAMInputsChecker + + + + + + + + + diff --git a/_sources/_autosummary/reV.econ.cli_econ.rst.txt b/_sources/_autosummary/reV.econ.cli_econ.rst.txt new file mode 100644 index 000000000..5d0e3d0f1 --- /dev/null +++ b/_sources/_autosummary/reV.econ.cli_econ.rst.txt @@ -0,0 +1,23 @@ +reV.econ.cli\_econ +================== + +.. automodule:: reV.econ.cli_econ + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.econ.econ.Econ.rst.txt b/_sources/_autosummary/reV.econ.econ.Econ.rst.txt new file mode 100644 index 000000000..f40303455 --- /dev/null +++ b/_sources/_autosummary/reV.econ.econ.Econ.rst.txt @@ -0,0 +1,58 @@ +reV.econ.econ.Econ +================== + +.. currentmodule:: reV.econ.econ + +.. autoclass:: Econ + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Econ.add_site_data_to_pp + ~Econ.flush + ~Econ.get_pc + ~Econ.get_sites_per_worker + ~Econ.handle_leap_ti + ~Econ.run + ~Econ.site_index + ~Econ.unpack_futures + ~Econ.unpack_output + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Econ.ECON_ATTRS + ~Econ.LCOE_ARGS + ~Econ.OPTIONS + ~Econ.OUT_ATTRS + ~Econ.cf_file + ~Econ.meta + ~Econ.out + ~Econ.out_chunk + ~Econ.output_request + ~Econ.points_control + ~Econ.project_points + ~Econ.run_attrs + ~Econ.sam_configs + ~Econ.sam_metas + ~Econ.sam_module + ~Econ.site_data + ~Econ.site_limit + ~Econ.site_mem + ~Econ.tech + ~Econ.time_index + ~Econ.year + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.econ.econ.rst.txt b/_sources/_autosummary/reV.econ.econ.rst.txt new file mode 100644 index 000000000..05eca0bb6 --- /dev/null +++ b/_sources/_autosummary/reV.econ.econ.rst.txt @@ -0,0 +1,31 @@ +reV.econ.econ +============= + +.. automodule:: reV.econ.econ + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Econ + + + + + + + + + diff --git a/_sources/_autosummary/reV.econ.economies_of_scale.EconomiesOfScale.rst.txt b/_sources/_autosummary/reV.econ.economies_of_scale.EconomiesOfScale.rst.txt new file mode 100644 index 000000000..b63adbe47 --- /dev/null +++ b/_sources/_autosummary/reV.econ.economies_of_scale.EconomiesOfScale.rst.txt @@ -0,0 +1,41 @@ +reV.econ.economies\_of\_scale.EconomiesOfScale +============================================== + +.. currentmodule:: reV.econ.economies_of_scale + +.. autoclass:: EconomiesOfScale + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~EconomiesOfScale.is_method + ~EconomiesOfScale.is_num + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~EconomiesOfScale.aep + ~EconomiesOfScale.capital_cost_scalar + ~EconomiesOfScale.fcr + ~EconomiesOfScale.foc + ~EconomiesOfScale.raw_capital_cost + ~EconomiesOfScale.raw_lcoe + ~EconomiesOfScale.scaled_capital_cost + ~EconomiesOfScale.scaled_lcoe + ~EconomiesOfScale.system_capacity + ~EconomiesOfScale.vars + ~EconomiesOfScale.voc + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.econ.economies_of_scale.rst.txt b/_sources/_autosummary/reV.econ.economies_of_scale.rst.txt new file mode 100644 index 000000000..4d0a455f1 --- /dev/null +++ b/_sources/_autosummary/reV.econ.economies_of_scale.rst.txt @@ -0,0 +1,31 @@ +reV.econ.economies\_of\_scale +============================= + +.. automodule:: reV.econ.economies_of_scale + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + EconomiesOfScale + + + + + + + + + diff --git a/_sources/_autosummary/reV.econ.rst.txt b/_sources/_autosummary/reV.econ.rst.txt new file mode 100644 index 000000000..081433b17 --- /dev/null +++ b/_sources/_autosummary/reV.econ.rst.txt @@ -0,0 +1,33 @@ +reV.econ +======== + +.. automodule:: reV.econ + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.econ.cli_econ + reV.econ.econ + reV.econ.economies_of_scale + reV.econ.utilities + diff --git a/_sources/_autosummary/reV.econ.utilities.lcoe_fcr.rst.txt b/_sources/_autosummary/reV.econ.utilities.lcoe_fcr.rst.txt new file mode 100644 index 000000000..cfa9529a4 --- /dev/null +++ b/_sources/_autosummary/reV.econ.utilities.lcoe_fcr.rst.txt @@ -0,0 +1,6 @@ +reV.econ.utilities.lcoe\_fcr +============================ + +.. currentmodule:: reV.econ.utilities + +.. autofunction:: lcoe_fcr \ No newline at end of file diff --git a/_sources/_autosummary/reV.econ.utilities.rst.txt b/_sources/_autosummary/reV.econ.utilities.rst.txt new file mode 100644 index 000000000..c8e519174 --- /dev/null +++ b/_sources/_autosummary/reV.econ.utilities.rst.txt @@ -0,0 +1,30 @@ +reV.econ.utilities +================== + +.. automodule:: reV.econ.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + lcoe_fcr + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.generation.base.BaseGen.rst.txt b/_sources/_autosummary/reV.generation.base.BaseGen.rst.txt new file mode 100644 index 000000000..9d1606e96 --- /dev/null +++ b/_sources/_autosummary/reV.generation.base.BaseGen.rst.txt @@ -0,0 +1,56 @@ +reV.generation.base.BaseGen +=========================== + +.. currentmodule:: reV.generation.base + +.. autoclass:: BaseGen + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~BaseGen.add_site_data_to_pp + ~BaseGen.flush + ~BaseGen.get_pc + ~BaseGen.get_sites_per_worker + ~BaseGen.handle_leap_ti + ~BaseGen.site_index + ~BaseGen.unpack_futures + ~BaseGen.unpack_output + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseGen.ECON_ATTRS + ~BaseGen.LCOE_ARGS + ~BaseGen.OPTIONS + ~BaseGen.OUT_ATTRS + ~BaseGen.meta + ~BaseGen.out + ~BaseGen.out_chunk + ~BaseGen.output_request + ~BaseGen.points_control + ~BaseGen.project_points + ~BaseGen.run_attrs + ~BaseGen.sam_configs + ~BaseGen.sam_metas + ~BaseGen.sam_module + ~BaseGen.site_data + ~BaseGen.site_limit + ~BaseGen.site_mem + ~BaseGen.tech + ~BaseGen.time_index + ~BaseGen.year + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.generation.base.rst.txt b/_sources/_autosummary/reV.generation.base.rst.txt new file mode 100644 index 000000000..eeb878402 --- /dev/null +++ b/_sources/_autosummary/reV.generation.base.rst.txt @@ -0,0 +1,31 @@ +reV.generation.base +=================== + +.. automodule:: reV.generation.base + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + BaseGen + + + + + + + + + diff --git a/_sources/_autosummary/reV.generation.cli_gen.rst.txt b/_sources/_autosummary/reV.generation.cli_gen.rst.txt new file mode 100644 index 000000000..85168f5e6 --- /dev/null +++ b/_sources/_autosummary/reV.generation.cli_gen.rst.txt @@ -0,0 +1,23 @@ +reV.generation.cli\_gen +======================= + +.. automodule:: reV.generation.cli_gen + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.generation.generation.Gen.rst.txt b/_sources/_autosummary/reV.generation.generation.Gen.rst.txt new file mode 100644 index 000000000..7c990a04a --- /dev/null +++ b/_sources/_autosummary/reV.generation.generation.Gen.rst.txt @@ -0,0 +1,59 @@ +reV.generation.generation.Gen +============================= + +.. currentmodule:: reV.generation.generation + +.. autoclass:: Gen + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Gen.add_site_data_to_pp + ~Gen.flush + ~Gen.get_pc + ~Gen.get_sites_per_worker + ~Gen.handle_leap_ti + ~Gen.run + ~Gen.site_index + ~Gen.unpack_futures + ~Gen.unpack_output + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Gen.ECON_ATTRS + ~Gen.LCOE_ARGS + ~Gen.OPTIONS + ~Gen.OUT_ATTRS + ~Gen.lr_res_file + ~Gen.meta + ~Gen.out + ~Gen.out_chunk + ~Gen.output_request + ~Gen.points_control + ~Gen.project_points + ~Gen.res_file + ~Gen.run_attrs + ~Gen.sam_configs + ~Gen.sam_metas + ~Gen.sam_module + ~Gen.site_data + ~Gen.site_limit + ~Gen.site_mem + ~Gen.tech + ~Gen.time_index + ~Gen.year + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.generation.generation.rst.txt b/_sources/_autosummary/reV.generation.generation.rst.txt new file mode 100644 index 000000000..118b7df42 --- /dev/null +++ b/_sources/_autosummary/reV.generation.generation.rst.txt @@ -0,0 +1,31 @@ +reV.generation.generation +========================= + +.. automodule:: reV.generation.generation + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Gen + + + + + + + + + diff --git a/_sources/_autosummary/reV.generation.rst.txt b/_sources/_autosummary/reV.generation.rst.txt new file mode 100644 index 000000000..e49064617 --- /dev/null +++ b/_sources/_autosummary/reV.generation.rst.txt @@ -0,0 +1,32 @@ +reV.generation +============== + +.. automodule:: reV.generation + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.generation.base + reV.generation.cli_gen + reV.generation.generation + diff --git a/_sources/_autosummary/reV.handlers.cli_collect.rst.txt b/_sources/_autosummary/reV.handlers.cli_collect.rst.txt new file mode 100644 index 000000000..85e92c92f --- /dev/null +++ b/_sources/_autosummary/reV.handlers.cli_collect.rst.txt @@ -0,0 +1,23 @@ +reV.handlers.cli\_collect +========================= + +.. automodule:: reV.handlers.cli_collect + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.handlers.cli_multi_year.rst.txt b/_sources/_autosummary/reV.handlers.cli_multi_year.rst.txt new file mode 100644 index 000000000..8e2b33b7f --- /dev/null +++ b/_sources/_autosummary/reV.handlers.cli_multi_year.rst.txt @@ -0,0 +1,23 @@ +reV.handlers.cli\_multi\_year +============================= + +.. automodule:: reV.handlers.cli_multi_year + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.handlers.exclusions.ExclusionLayers.rst.txt b/_sources/_autosummary/reV.handlers.exclusions.ExclusionLayers.rst.txt new file mode 100644 index 000000000..185cf2112 --- /dev/null +++ b/_sources/_autosummary/reV.handlers.exclusions.ExclusionLayers.rst.txt @@ -0,0 +1,44 @@ +reV.handlers.exclusions.ExclusionLayers +======================================= + +.. currentmodule:: reV.handlers.exclusions + +.. autoclass:: ExclusionLayers + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExclusionLayers.close + ~ExclusionLayers.get_layer_crs + ~ExclusionLayers.get_layer_description + ~ExclusionLayers.get_layer_profile + ~ExclusionLayers.get_layer_values + ~ExclusionLayers.get_nodata_value + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExclusionLayers.chunks + ~ExclusionLayers.crs + ~ExclusionLayers.h5 + ~ExclusionLayers.iarr + ~ExclusionLayers.latitude + ~ExclusionLayers.layers + ~ExclusionLayers.longitude + ~ExclusionLayers.pixel_area + ~ExclusionLayers.profile + ~ExclusionLayers.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.handlers.exclusions.rst.txt b/_sources/_autosummary/reV.handlers.exclusions.rst.txt new file mode 100644 index 000000000..055b1532c --- /dev/null +++ b/_sources/_autosummary/reV.handlers.exclusions.rst.txt @@ -0,0 +1,31 @@ +reV.handlers.exclusions +======================= + +.. automodule:: reV.handlers.exclusions + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ExclusionLayers + + + + + + + + + diff --git a/_sources/_autosummary/reV.handlers.multi_year.MultiYear.rst.txt b/_sources/_autosummary/reV.handlers.multi_year.MultiYear.rst.txt new file mode 100644 index 000000000..d630c29d4 --- /dev/null +++ b/_sources/_autosummary/reV.handlers.multi_year.MultiYear.rst.txt @@ -0,0 +1,86 @@ +reV.handlers.multi\_year.MultiYear +================================== + +.. currentmodule:: reV.handlers.multi_year + +.. autoclass:: MultiYear + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MultiYear.CV + ~MultiYear.add_dataset + ~MultiYear.close + ~MultiYear.collect + ~MultiYear.collect_means + ~MultiYear.collect_profiles + ~MultiYear.df_str_decode + ~MultiYear.get_SAM_df + ~MultiYear.get_attrs + ~MultiYear.get_config + ~MultiYear.get_dset_properties + ~MultiYear.get_meta_arr + ~MultiYear.get_scale_factor + ~MultiYear.get_units + ~MultiYear.init_h5 + ~MultiYear.is_profile + ~MultiYear.means + ~MultiYear.open_dataset + ~MultiYear.parse_source_files_pattern + ~MultiYear.pass_through + ~MultiYear.preload_SAM + ~MultiYear.set_configs + ~MultiYear.set_version_attr + ~MultiYear.stdev + ~MultiYear.update_dset + ~MultiYear.write_dataset + ~MultiYear.write_means + ~MultiYear.write_profiles + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MultiYear.ADD_ATTR + ~MultiYear.SAM_configs + ~MultiYear.SCALE_ATTR + ~MultiYear.UNIT_ATTR + ~MultiYear.adders + ~MultiYear.attrs + ~MultiYear.chunks + ~MultiYear.coordinates + ~MultiYear.data_version + ~MultiYear.datasets + ~MultiYear.dsets + ~MultiYear.dtypes + ~MultiYear.full_version_record + ~MultiYear.global_attrs + ~MultiYear.groups + ~MultiYear.h5 + ~MultiYear.lat_lon + ~MultiYear.meta + ~MultiYear.package + ~MultiYear.res_dsets + ~MultiYear.resource_datasets + ~MultiYear.run_attrs + ~MultiYear.scale_factors + ~MultiYear.shape + ~MultiYear.shapes + ~MultiYear.source + ~MultiYear.time_index + ~MultiYear.units + ~MultiYear.version + ~MultiYear.writable + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.handlers.multi_year.MultiYearGroup.rst.txt b/_sources/_autosummary/reV.handlers.multi_year.MultiYearGroup.rst.txt new file mode 100644 index 000000000..5309dc2b1 --- /dev/null +++ b/_sources/_autosummary/reV.handlers.multi_year.MultiYearGroup.rst.txt @@ -0,0 +1,32 @@ +reV.handlers.multi\_year.MultiYearGroup +======================================= + +.. currentmodule:: reV.handlers.multi_year + +.. autoclass:: MultiYearGroup + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MultiYearGroup.dsets + ~MultiYearGroup.name + ~MultiYearGroup.pass_through_dsets + ~MultiYearGroup.source_files + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.handlers.multi_year.my_collect_groups.rst.txt b/_sources/_autosummary/reV.handlers.multi_year.my_collect_groups.rst.txt new file mode 100644 index 000000000..d9af35525 --- /dev/null +++ b/_sources/_autosummary/reV.handlers.multi_year.my_collect_groups.rst.txt @@ -0,0 +1,6 @@ +reV.handlers.multi\_year.my\_collect\_groups +============================================ + +.. currentmodule:: reV.handlers.multi_year + +.. autofunction:: my_collect_groups \ No newline at end of file diff --git a/_sources/_autosummary/reV.handlers.multi_year.rst.txt b/_sources/_autosummary/reV.handlers.multi_year.rst.txt new file mode 100644 index 000000000..9ffac0e8e --- /dev/null +++ b/_sources/_autosummary/reV.handlers.multi_year.rst.txt @@ -0,0 +1,39 @@ +reV.handlers.multi\_year +======================== + +.. automodule:: reV.handlers.multi_year + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + my_collect_groups + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + MultiYear + MultiYearGroup + + + + + + + + + diff --git a/_sources/_autosummary/reV.handlers.outputs.Outputs.rst.txt b/_sources/_autosummary/reV.handlers.outputs.Outputs.rst.txt new file mode 100644 index 000000000..de347f94e --- /dev/null +++ b/_sources/_autosummary/reV.handlers.outputs.Outputs.rst.txt @@ -0,0 +1,77 @@ +reV.handlers.outputs.Outputs +============================ + +.. currentmodule:: reV.handlers.outputs + +.. autoclass:: Outputs + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Outputs.add_dataset + ~Outputs.close + ~Outputs.df_str_decode + ~Outputs.get_SAM_df + ~Outputs.get_attrs + ~Outputs.get_config + ~Outputs.get_dset_properties + ~Outputs.get_meta_arr + ~Outputs.get_scale_factor + ~Outputs.get_units + ~Outputs.init_h5 + ~Outputs.open_dataset + ~Outputs.preload_SAM + ~Outputs.set_configs + ~Outputs.set_version_attr + ~Outputs.update_dset + ~Outputs.write_dataset + ~Outputs.write_means + ~Outputs.write_profiles + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Outputs.ADD_ATTR + ~Outputs.SAM_configs + ~Outputs.SCALE_ATTR + ~Outputs.UNIT_ATTR + ~Outputs.adders + ~Outputs.attrs + ~Outputs.chunks + ~Outputs.coordinates + ~Outputs.data_version + ~Outputs.datasets + ~Outputs.dsets + ~Outputs.dtypes + ~Outputs.full_version_record + ~Outputs.global_attrs + ~Outputs.groups + ~Outputs.h5 + ~Outputs.lat_lon + ~Outputs.meta + ~Outputs.package + ~Outputs.res_dsets + ~Outputs.resource_datasets + ~Outputs.run_attrs + ~Outputs.scale_factors + ~Outputs.shape + ~Outputs.shapes + ~Outputs.source + ~Outputs.time_index + ~Outputs.units + ~Outputs.version + ~Outputs.writable + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.handlers.outputs.rst.txt b/_sources/_autosummary/reV.handlers.outputs.rst.txt new file mode 100644 index 000000000..4738dbfeb --- /dev/null +++ b/_sources/_autosummary/reV.handlers.outputs.rst.txt @@ -0,0 +1,31 @@ +reV.handlers.outputs +==================== + +.. automodule:: reV.handlers.outputs + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Outputs + + + + + + + + + diff --git a/_sources/_autosummary/reV.handlers.rst.txt b/_sources/_autosummary/reV.handlers.rst.txt new file mode 100644 index 000000000..02ea393d3 --- /dev/null +++ b/_sources/_autosummary/reV.handlers.rst.txt @@ -0,0 +1,35 @@ +reV.handlers +============ + +.. automodule:: reV.handlers + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.handlers.cli_collect + reV.handlers.cli_multi_year + reV.handlers.exclusions + reV.handlers.multi_year + reV.handlers.outputs + reV.handlers.transmission + diff --git a/_sources/_autosummary/reV.handlers.transmission.TransmissionCosts.rst.txt b/_sources/_autosummary/reV.handlers.transmission.TransmissionCosts.rst.txt new file mode 100644 index 000000000..30c54154b --- /dev/null +++ b/_sources/_autosummary/reV.handlers.transmission.TransmissionCosts.rst.txt @@ -0,0 +1,30 @@ +reV.handlers.transmission.TransmissionCosts +=========================================== + +.. currentmodule:: reV.handlers.transmission + +.. autoclass:: TransmissionCosts + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TransmissionCosts.available_capacity + ~TransmissionCosts.check_availability + ~TransmissionCosts.check_feature_dependencies + ~TransmissionCosts.connect + ~TransmissionCosts.cost + ~TransmissionCosts.feature_capacity + ~TransmissionCosts.feature_costs + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.handlers.transmission.TransmissionFeatures.rst.txt b/_sources/_autosummary/reV.handlers.transmission.TransmissionFeatures.rst.txt new file mode 100644 index 000000000..5f48c9aa9 --- /dev/null +++ b/_sources/_autosummary/reV.handlers.transmission.TransmissionFeatures.rst.txt @@ -0,0 +1,29 @@ +reV.handlers.transmission.TransmissionFeatures +============================================== + +.. currentmodule:: reV.handlers.transmission + +.. autoclass:: TransmissionFeatures + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TransmissionFeatures.available_capacity + ~TransmissionFeatures.check_availability + ~TransmissionFeatures.check_feature_dependencies + ~TransmissionFeatures.connect + ~TransmissionFeatures.cost + ~TransmissionFeatures.feature_capacity + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.handlers.transmission.rst.txt b/_sources/_autosummary/reV.handlers.transmission.rst.txt new file mode 100644 index 000000000..f8fd328ac --- /dev/null +++ b/_sources/_autosummary/reV.handlers.transmission.rst.txt @@ -0,0 +1,32 @@ +reV.handlers.transmission +========================= + +.. automodule:: reV.handlers.transmission + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + TransmissionCosts + TransmissionFeatures + + + + + + + + + diff --git a/_sources/_autosummary/reV.hybrids.cli_hybrids.rst.txt b/_sources/_autosummary/reV.hybrids.cli_hybrids.rst.txt new file mode 100644 index 000000000..0692731dc --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.cli_hybrids.rst.txt @@ -0,0 +1,23 @@ +reV.hybrids.cli\_hybrids +======================== + +.. automodule:: reV.hybrids.cli_hybrids + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity.rst.txt b/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity.rst.txt new file mode 100644 index 000000000..611155dd7 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity.rst.txt @@ -0,0 +1,6 @@ +reV.hybrids.hybrid\_methods.aggregate\_capacity +=============================================== + +.. currentmodule:: reV.hybrids.hybrid_methods + +.. autofunction:: aggregate_capacity \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity_factor.rst.txt b/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity_factor.rst.txt new file mode 100644 index 000000000..d2ad91be9 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity_factor.rst.txt @@ -0,0 +1,6 @@ +reV.hybrids.hybrid\_methods.aggregate\_capacity\_factor +======================================================= + +.. currentmodule:: reV.hybrids.hybrid_methods + +.. autofunction:: aggregate_capacity_factor \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_solar_capacity.rst.txt b/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_solar_capacity.rst.txt new file mode 100644 index 000000000..8c454dcc9 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_solar_capacity.rst.txt @@ -0,0 +1,6 @@ +reV.hybrids.hybrid\_methods.aggregate\_solar\_capacity +====================================================== + +.. currentmodule:: reV.hybrids.hybrid_methods + +.. autofunction:: aggregate_solar_capacity \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_wind_capacity.rst.txt b/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_wind_capacity.rst.txt new file mode 100644 index 000000000..df8616b94 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrid_methods.aggregate_wind_capacity.rst.txt @@ -0,0 +1,6 @@ +reV.hybrids.hybrid\_methods.aggregate\_wind\_capacity +===================================================== + +.. currentmodule:: reV.hybrids.hybrid_methods + +.. autofunction:: aggregate_wind_capacity \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrid_methods.rst.txt b/_sources/_autosummary/reV.hybrids.hybrid_methods.rst.txt new file mode 100644 index 000000000..94f17c65a --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrid_methods.rst.txt @@ -0,0 +1,33 @@ +reV.hybrids.hybrid\_methods +=========================== + +.. automodule:: reV.hybrids.hybrid_methods + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + aggregate_capacity + aggregate_capacity_factor + aggregate_solar_capacity + aggregate_wind_capacity + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.hybrids.hybrids.ColNameFormatter.rst.txt b/_sources/_autosummary/reV.hybrids.hybrids.ColNameFormatter.rst.txt new file mode 100644 index 000000000..f45cf9de9 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrids.ColNameFormatter.rst.txt @@ -0,0 +1,30 @@ +reV.hybrids.hybrids.ColNameFormatter +==================================== + +.. currentmodule:: reV.hybrids.hybrids + +.. autoclass:: ColNameFormatter + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ColNameFormatter.fmt + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ColNameFormatter.ALLOWED + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrids.Hybridization.rst.txt b/_sources/_autosummary/reV.hybrids.hybrids.Hybridization.rst.txt new file mode 100644 index 000000000..8fd354b5e --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrids.Hybridization.rst.txt @@ -0,0 +1,39 @@ +reV.hybrids.hybrids.Hybridization +================================= + +.. currentmodule:: reV.hybrids.hybrids + +.. autoclass:: Hybridization + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Hybridization.run + ~Hybridization.run_meta + ~Hybridization.run_profiles + ~Hybridization.save_profiles + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Hybridization.hybrid_meta + ~Hybridization.hybrid_time_index + ~Hybridization.profiles + ~Hybridization.solar_meta + ~Hybridization.solar_time_index + ~Hybridization.wind_meta + ~Hybridization.wind_time_index + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrids.HybridsData.rst.txt b/_sources/_autosummary/reV.hybrids.hybrids.HybridsData.rst.txt new file mode 100644 index 000000000..7757b2eb4 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrids.HybridsData.rst.txt @@ -0,0 +1,35 @@ +reV.hybrids.hybrids.HybridsData +=============================== + +.. currentmodule:: reV.hybrids.hybrids + +.. autoclass:: HybridsData + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~HybridsData.contains_col + ~HybridsData.validate + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~HybridsData.hybrid_time_index + ~HybridsData.solar_meta + ~HybridsData.solar_time_index + ~HybridsData.wind_meta + ~HybridsData.wind_time_index + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrids.MetaHybridizer.rst.txt b/_sources/_autosummary/reV.hybrids.hybrids.MetaHybridizer.rst.txt new file mode 100644 index 000000000..73316ef2b --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrids.MetaHybridizer.rst.txt @@ -0,0 +1,33 @@ +reV.hybrids.hybrids.MetaHybridizer +================================== + +.. currentmodule:: reV.hybrids.hybrids + +.. autoclass:: MetaHybridizer + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~MetaHybridizer.hybridize + ~MetaHybridizer.validate_input + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~MetaHybridizer.hybrid_meta + ~MetaHybridizer.solar_profile_indices_map + ~MetaHybridizer.wind_profile_indices_map + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrids.RatioColumns.rst.txt b/_sources/_autosummary/reV.hybrids.hybrids.RatioColumns.rst.txt new file mode 100644 index 000000000..6f9e62359 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrids.RatioColumns.rst.txt @@ -0,0 +1,33 @@ +reV.hybrids.hybrids.RatioColumns +================================ + +.. currentmodule:: reV.hybrids.hybrids + +.. autoclass:: RatioColumns + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~RatioColumns.count + ~RatioColumns.index + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~RatioColumns.denom + ~RatioColumns.fixed + ~RatioColumns.num + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.hybrids.hybrids.rst.txt b/_sources/_autosummary/reV.hybrids.hybrids.rst.txt new file mode 100644 index 000000000..7488e10b7 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.hybrids.rst.txt @@ -0,0 +1,35 @@ +reV.hybrids.hybrids +=================== + +.. automodule:: reV.hybrids.hybrids + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ColNameFormatter + Hybridization + HybridsData + MetaHybridizer + RatioColumns + + + + + + + + + diff --git a/_sources/_autosummary/reV.hybrids.rst.txt b/_sources/_autosummary/reV.hybrids.rst.txt new file mode 100644 index 000000000..370056053 --- /dev/null +++ b/_sources/_autosummary/reV.hybrids.rst.txt @@ -0,0 +1,32 @@ +reV.hybrids +=========== + +.. automodule:: reV.hybrids + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.hybrids.cli_hybrids + reV.hybrids.hybrid_methods + reV.hybrids.hybrids + diff --git a/_sources/_autosummary/reV.losses.power_curve.AbstractPowerCurveTransformation.rst.txt b/_sources/_autosummary/reV.losses.power_curve.AbstractPowerCurveTransformation.rst.txt new file mode 100644 index 000000000..cd997ce21 --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.AbstractPowerCurveTransformation.rst.txt @@ -0,0 +1,31 @@ +reV.losses.power\_curve.AbstractPowerCurveTransformation +======================================================== + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: AbstractPowerCurveTransformation + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractPowerCurveTransformation.apply + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractPowerCurveTransformation.bounds + ~AbstractPowerCurveTransformation.optm_bounds + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.ExponentialStretching.rst.txt b/_sources/_autosummary/reV.losses.power_curve.ExponentialStretching.rst.txt new file mode 100644 index 000000000..208348751 --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.ExponentialStretching.rst.txt @@ -0,0 +1,31 @@ +reV.losses.power\_curve.ExponentialStretching +============================================= + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: ExponentialStretching + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExponentialStretching.apply + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExponentialStretching.bounds + ~ExponentialStretching.optm_bounds + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.HorizontalTranslation.rst.txt b/_sources/_autosummary/reV.losses.power_curve.HorizontalTranslation.rst.txt new file mode 100644 index 000000000..4ae3b2808 --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.HorizontalTranslation.rst.txt @@ -0,0 +1,31 @@ +reV.losses.power\_curve.HorizontalTranslation +============================================= + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: HorizontalTranslation + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~HorizontalTranslation.apply + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~HorizontalTranslation.bounds + ~HorizontalTranslation.optm_bounds + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.LinearStretching.rst.txt b/_sources/_autosummary/reV.losses.power_curve.LinearStretching.rst.txt new file mode 100644 index 000000000..b99b2d7ae --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.LinearStretching.rst.txt @@ -0,0 +1,31 @@ +reV.losses.power\_curve.LinearStretching +======================================== + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: LinearStretching + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~LinearStretching.apply + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LinearStretching.bounds + ~LinearStretching.optm_bounds + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.PowerCurve.rst.txt b/_sources/_autosummary/reV.losses.power_curve.PowerCurve.rst.txt new file mode 100644 index 000000000..b018aeb7b --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.PowerCurve.rst.txt @@ -0,0 +1,31 @@ +reV.losses.power\_curve.PowerCurve +================================== + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: PowerCurve + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PowerCurve.cutin_wind_speed + ~PowerCurve.cutoff_wind_speed + ~PowerCurve.rated_power + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.PowerCurveLosses.rst.txt b/_sources/_autosummary/reV.losses.power_curve.PowerCurveLosses.rst.txt new file mode 100644 index 000000000..145ffb9de --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.PowerCurveLosses.rst.txt @@ -0,0 +1,31 @@ +reV.losses.power\_curve.PowerCurveLosses +======================================== + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: PowerCurveLosses + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PowerCurveLosses.annual_losses_with_transformed_power_curve + ~PowerCurveLosses.fit + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PowerCurveLosses.power_gen_no_losses + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.PowerCurveLossesInput.rst.txt b/_sources/_autosummary/reV.losses.power_curve.PowerCurveLossesInput.rst.txt new file mode 100644 index 000000000..caeec8963 --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.PowerCurveLossesInput.rst.txt @@ -0,0 +1,31 @@ +reV.losses.power\_curve.PowerCurveLossesInput +============================================= + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: PowerCurveLossesInput + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PowerCurveLossesInput.REQUIRED_KEYS + ~PowerCurveLossesInput.target + ~PowerCurveLossesInput.transformation + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.PowerCurveLossesMixin.rst.txt b/_sources/_autosummary/reV.losses.power_curve.PowerCurveLossesMixin.rst.txt new file mode 100644 index 000000000..6ab94c8b0 --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.PowerCurveLossesMixin.rst.txt @@ -0,0 +1,32 @@ +reV.losses.power\_curve.PowerCurveLossesMixin +============================================= + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: PowerCurveLossesMixin + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PowerCurveLossesMixin.add_power_curve_losses + ~PowerCurveLossesMixin.wind_resource_from_input + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PowerCurveLossesMixin.POWER_CURVE_CONFIG_KEY + ~PowerCurveLossesMixin.input_power_curve + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.PowerCurveWindResource.rst.txt b/_sources/_autosummary/reV.losses.power_curve.PowerCurveWindResource.rst.txt new file mode 100644 index 000000000..cd344049e --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.PowerCurveWindResource.rst.txt @@ -0,0 +1,30 @@ +reV.losses.power\_curve.PowerCurveWindResource +============================================== + +.. currentmodule:: reV.losses.power_curve + +.. autoclass:: PowerCurveWindResource + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~PowerCurveWindResource.wind_resource_for_site + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PowerCurveWindResource.wind_speeds + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.TRANSFORMATIONS.rst.txt b/_sources/_autosummary/reV.losses.power_curve.TRANSFORMATIONS.rst.txt new file mode 100644 index 000000000..17170a5e6 --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.TRANSFORMATIONS.rst.txt @@ -0,0 +1,6 @@ +reV.losses.power\_curve.TRANSFORMATIONS +======================================= + +.. currentmodule:: reV.losses.power_curve + +.. autodata:: TRANSFORMATIONS \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.adjust_power_curve.rst.txt b/_sources/_autosummary/reV.losses.power_curve.adjust_power_curve.rst.txt new file mode 100644 index 000000000..049db3015 --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.adjust_power_curve.rst.txt @@ -0,0 +1,6 @@ +reV.losses.power\_curve.adjust\_power\_curve +============================================ + +.. currentmodule:: reV.losses.power_curve + +.. autofunction:: adjust_power_curve \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.power_curve.rst.txt b/_sources/_autosummary/reV.losses.power_curve.rst.txt new file mode 100644 index 000000000..909e72c53 --- /dev/null +++ b/_sources/_autosummary/reV.losses.power_curve.rst.txt @@ -0,0 +1,53 @@ +reV.losses.power\_curve +======================= + +.. automodule:: reV.losses.power_curve + + + + .. rubric:: Module attributes + + .. autosummary:: + :toctree: + + TRANSFORMATIONS + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + adjust_power_curve + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + AbstractPowerCurveTransformation + ExponentialStretching + HorizontalTranslation + LinearStretching + PowerCurve + PowerCurveLosses + PowerCurveLossesInput + PowerCurveLossesMixin + PowerCurveWindResource + + + + + + + + + diff --git a/_sources/_autosummary/reV.losses.rst.txt b/_sources/_autosummary/reV.losses.rst.txt new file mode 100644 index 000000000..006bfbb9f --- /dev/null +++ b/_sources/_autosummary/reV.losses.rst.txt @@ -0,0 +1,32 @@ +reV.losses +========== + +.. automodule:: reV.losses + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.losses.power_curve + reV.losses.scheduled + reV.losses.utils + diff --git a/_sources/_autosummary/reV.losses.scheduled.Outage.rst.txt b/_sources/_autosummary/reV.losses.scheduled.Outage.rst.txt new file mode 100644 index 000000000..6ccc4771f --- /dev/null +++ b/_sources/_autosummary/reV.losses.scheduled.Outage.rst.txt @@ -0,0 +1,36 @@ +reV.losses.scheduled.Outage +=========================== + +.. currentmodule:: reV.losses.scheduled + +.. autoclass:: Outage + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Outage.REQUIRED_KEYS + ~Outage.allow_outage_overlap + ~Outage.allowed_months + ~Outage.count + ~Outage.duration + ~Outage.name + ~Outage.percentage_of_capacity_lost + ~Outage.total_available_hours + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.scheduled.OutageScheduler.rst.txt b/_sources/_autosummary/reV.losses.scheduled.OutageScheduler.rst.txt new file mode 100644 index 000000000..05b33a549 --- /dev/null +++ b/_sources/_autosummary/reV.losses.scheduled.OutageScheduler.rst.txt @@ -0,0 +1,24 @@ +reV.losses.scheduled.OutageScheduler +==================================== + +.. currentmodule:: reV.losses.scheduled + +.. autoclass:: OutageScheduler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~OutageScheduler.calculate + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.scheduled.ScheduledLossesMixin.rst.txt b/_sources/_autosummary/reV.losses.scheduled.ScheduledLossesMixin.rst.txt new file mode 100644 index 000000000..3039aaea8 --- /dev/null +++ b/_sources/_autosummary/reV.losses.scheduled.ScheduledLossesMixin.rst.txt @@ -0,0 +1,32 @@ +reV.losses.scheduled.ScheduledLossesMixin +========================================= + +.. currentmodule:: reV.losses.scheduled + +.. autoclass:: ScheduledLossesMixin + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ScheduledLossesMixin.add_scheduled_losses + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ScheduledLossesMixin.OUTAGE_CONFIG_KEY + ~ScheduledLossesMixin.OUTAGE_SEED_CONFIG_KEY + ~ScheduledLossesMixin.outage_seed + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.scheduled.SingleOutageScheduler.rst.txt b/_sources/_autosummary/reV.losses.scheduled.SingleOutageScheduler.rst.txt new file mode 100644 index 000000000..14317acc0 --- /dev/null +++ b/_sources/_autosummary/reV.losses.scheduled.SingleOutageScheduler.rst.txt @@ -0,0 +1,34 @@ +reV.losses.scheduled.SingleOutageScheduler +========================================== + +.. currentmodule:: reV.losses.scheduled + +.. autoclass:: SingleOutageScheduler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SingleOutageScheduler.calculate + ~SingleOutageScheduler.find_random_outage_slice + ~SingleOutageScheduler.schedule_losses + ~SingleOutageScheduler.update_when_can_schedule + ~SingleOutageScheduler.update_when_can_schedule_from_months + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SingleOutageScheduler.MAX_ITER + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.scheduled.rst.txt b/_sources/_autosummary/reV.losses.scheduled.rst.txt new file mode 100644 index 000000000..acaf508aa --- /dev/null +++ b/_sources/_autosummary/reV.losses.scheduled.rst.txt @@ -0,0 +1,34 @@ +reV.losses.scheduled +==================== + +.. automodule:: reV.losses.scheduled + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + Outage + OutageScheduler + ScheduledLossesMixin + SingleOutageScheduler + + + + + + + + + diff --git a/_sources/_autosummary/reV.losses.utils.convert_to_full_month_names.rst.txt b/_sources/_autosummary/reV.losses.utils.convert_to_full_month_names.rst.txt new file mode 100644 index 000000000..51d3c9802 --- /dev/null +++ b/_sources/_autosummary/reV.losses.utils.convert_to_full_month_names.rst.txt @@ -0,0 +1,6 @@ +reV.losses.utils.convert\_to\_full\_month\_names +================================================ + +.. currentmodule:: reV.losses.utils + +.. autofunction:: convert_to_full_month_names \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.utils.filter_unknown_month_names.rst.txt b/_sources/_autosummary/reV.losses.utils.filter_unknown_month_names.rst.txt new file mode 100644 index 000000000..e4fa86b07 --- /dev/null +++ b/_sources/_autosummary/reV.losses.utils.filter_unknown_month_names.rst.txt @@ -0,0 +1,6 @@ +reV.losses.utils.filter\_unknown\_month\_names +============================================== + +.. currentmodule:: reV.losses.utils + +.. autofunction:: filter_unknown_month_names \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.utils.format_month_name.rst.txt b/_sources/_autosummary/reV.losses.utils.format_month_name.rst.txt new file mode 100644 index 000000000..42b12f944 --- /dev/null +++ b/_sources/_autosummary/reV.losses.utils.format_month_name.rst.txt @@ -0,0 +1,6 @@ +reV.losses.utils.format\_month\_name +==================================== + +.. currentmodule:: reV.losses.utils + +.. autofunction:: format_month_name \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.utils.full_month_name_from_abbr.rst.txt b/_sources/_autosummary/reV.losses.utils.full_month_name_from_abbr.rst.txt new file mode 100644 index 000000000..a1213653d --- /dev/null +++ b/_sources/_autosummary/reV.losses.utils.full_month_name_from_abbr.rst.txt @@ -0,0 +1,6 @@ +reV.losses.utils.full\_month\_name\_from\_abbr +============================================== + +.. currentmodule:: reV.losses.utils + +.. autofunction:: full_month_name_from_abbr \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.utils.hourly_indices_for_months.rst.txt b/_sources/_autosummary/reV.losses.utils.hourly_indices_for_months.rst.txt new file mode 100644 index 000000000..16ed9945c --- /dev/null +++ b/_sources/_autosummary/reV.losses.utils.hourly_indices_for_months.rst.txt @@ -0,0 +1,6 @@ +reV.losses.utils.hourly\_indices\_for\_months +============================================= + +.. currentmodule:: reV.losses.utils + +.. autofunction:: hourly_indices_for_months \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.utils.month_index.rst.txt b/_sources/_autosummary/reV.losses.utils.month_index.rst.txt new file mode 100644 index 000000000..302c1f8cf --- /dev/null +++ b/_sources/_autosummary/reV.losses.utils.month_index.rst.txt @@ -0,0 +1,6 @@ +reV.losses.utils.month\_index +============================= + +.. currentmodule:: reV.losses.utils + +.. autofunction:: month_index \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.utils.month_indices.rst.txt b/_sources/_autosummary/reV.losses.utils.month_indices.rst.txt new file mode 100644 index 000000000..728877f86 --- /dev/null +++ b/_sources/_autosummary/reV.losses.utils.month_indices.rst.txt @@ -0,0 +1,6 @@ +reV.losses.utils.month\_indices +=============================== + +.. currentmodule:: reV.losses.utils + +.. autofunction:: month_indices \ No newline at end of file diff --git a/_sources/_autosummary/reV.losses.utils.rst.txt b/_sources/_autosummary/reV.losses.utils.rst.txt new file mode 100644 index 000000000..ee2d8ff45 --- /dev/null +++ b/_sources/_autosummary/reV.losses.utils.rst.txt @@ -0,0 +1,36 @@ +reV.losses.utils +================ + +.. automodule:: reV.losses.utils + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + convert_to_full_month_names + filter_unknown_month_names + format_month_name + full_month_name_from_abbr + hourly_indices_for_months + month_index + month_indices + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.nrwal.cli_nrwal.rst.txt b/_sources/_autosummary/reV.nrwal.cli_nrwal.rst.txt new file mode 100644 index 000000000..bc869077c --- /dev/null +++ b/_sources/_autosummary/reV.nrwal.cli_nrwal.rst.txt @@ -0,0 +1,23 @@ +reV.nrwal.cli\_nrwal +==================== + +.. automodule:: reV.nrwal.cli_nrwal + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.nrwal.nrwal.RevNrwal.rst.txt b/_sources/_autosummary/reV.nrwal.nrwal.RevNrwal.rst.txt new file mode 100644 index 000000000..f8092b452 --- /dev/null +++ b/_sources/_autosummary/reV.nrwal.nrwal.RevNrwal.rst.txt @@ -0,0 +1,42 @@ +reV.nrwal.nrwal.RevNrwal +======================== + +.. currentmodule:: reV.nrwal.nrwal + +.. autoclass:: RevNrwal + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~RevNrwal.check_outputs + ~RevNrwal.run + ~RevNrwal.run_nrwal + ~RevNrwal.save_raw_dsets + ~RevNrwal.write_meta_to_csv + ~RevNrwal.write_to_gen_fpath + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~RevNrwal.DEFAULT_META_COLS + ~RevNrwal.analysis_gids + ~RevNrwal.analysis_mask + ~RevNrwal.gen_dsets + ~RevNrwal.meta_out + ~RevNrwal.meta_source + ~RevNrwal.outputs + ~RevNrwal.time_index + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.nrwal.nrwal.rst.txt b/_sources/_autosummary/reV.nrwal.nrwal.rst.txt new file mode 100644 index 000000000..fbcaaa765 --- /dev/null +++ b/_sources/_autosummary/reV.nrwal.nrwal.rst.txt @@ -0,0 +1,31 @@ +reV.nrwal.nrwal +=============== + +.. automodule:: reV.nrwal.nrwal + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + RevNrwal + + + + + + + + + diff --git a/_sources/_autosummary/reV.nrwal.rst.txt b/_sources/_autosummary/reV.nrwal.rst.txt new file mode 100644 index 000000000..4a28a0150 --- /dev/null +++ b/_sources/_autosummary/reV.nrwal.rst.txt @@ -0,0 +1,31 @@ +reV.nrwal +========= + +.. automodule:: reV.nrwal + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.nrwal.cli_nrwal + reV.nrwal.nrwal + diff --git a/_sources/_autosummary/reV.qa_qc.cli_qa_qc.cli_qa_qc.rst.txt b/_sources/_autosummary/reV.qa_qc.cli_qa_qc.cli_qa_qc.rst.txt new file mode 100644 index 000000000..3aa23c631 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.cli_qa_qc.cli_qa_qc.rst.txt @@ -0,0 +1,6 @@ +reV.qa\_qc.cli\_qa\_qc.cli\_qa\_qc +================================== + +.. currentmodule:: reV.qa_qc.cli_qa_qc + +.. autofunction:: cli_qa_qc \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.cli_qa_qc.rst.txt b/_sources/_autosummary/reV.qa_qc.cli_qa_qc.rst.txt new file mode 100644 index 000000000..527fdb8ba --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.cli_qa_qc.rst.txt @@ -0,0 +1,30 @@ +reV.qa\_qc.cli\_qa\_qc +====================== + +.. automodule:: reV.qa_qc.cli_qa_qc + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + cli_qa_qc + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.qa_qc.qa_qc.QaQc.rst.txt b/_sources/_autosummary/reV.qa_qc.qa_qc.QaQc.rst.txt new file mode 100644 index 000000000..1d33b8352 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.qa_qc.QaQc.rst.txt @@ -0,0 +1,33 @@ +reV.qa\_qc.qa\_qc.QaQc +====================== + +.. currentmodule:: reV.qa_qc.qa_qc + +.. autoclass:: QaQc + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~QaQc.create_scatter_plots + ~QaQc.exclusions_mask + ~QaQc.h5 + ~QaQc.supply_curve + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QaQc.out_dir + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.qa_qc.QaQcModule.rst.txt b/_sources/_autosummary/reV.qa_qc.qa_qc.QaQcModule.rst.txt new file mode 100644 index 000000000..c18e95994 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.qa_qc.QaQcModule.rst.txt @@ -0,0 +1,42 @@ +reV.qa\_qc.qa\_qc.QaQcModule +============================ + +.. currentmodule:: reV.qa_qc.qa_qc + +.. autoclass:: QaQcModule + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~QaQcModule.area_filter_kernel + ~QaQcModule.cmap + ~QaQcModule.columns + ~QaQcModule.dsets + ~QaQcModule.excl_dict + ~QaQcModule.excl_fpath + ~QaQcModule.fpath + ~QaQcModule.group + ~QaQcModule.lcoe + ~QaQcModule.min_area + ~QaQcModule.plot_step + ~QaQcModule.plot_type + ~QaQcModule.process_size + ~QaQcModule.sub_dir + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.qa_qc.rst.txt b/_sources/_autosummary/reV.qa_qc.qa_qc.rst.txt new file mode 100644 index 000000000..6e74e8850 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.qa_qc.rst.txt @@ -0,0 +1,32 @@ +reV.qa\_qc.qa\_qc +================= + +.. automodule:: reV.qa_qc.qa_qc + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + QaQc + QaQcModule + + + + + + + + + diff --git a/_sources/_autosummary/reV.qa_qc.rst.txt b/_sources/_autosummary/reV.qa_qc.rst.txt new file mode 100644 index 000000000..a6cf7613e --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.rst.txt @@ -0,0 +1,32 @@ +reV.qa\_qc +========== + +.. automodule:: reV.qa_qc + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.qa_qc.cli_qa_qc + reV.qa_qc.qa_qc + reV.qa_qc.summary + diff --git a/_sources/_autosummary/reV.qa_qc.summary.ExclusionsMask.rst.txt b/_sources/_autosummary/reV.qa_qc.summary.ExclusionsMask.rst.txt new file mode 100644 index 000000000..17de0d98a --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.summary.ExclusionsMask.rst.txt @@ -0,0 +1,33 @@ +reV.qa\_qc.summary.ExclusionsMask +================================= + +.. currentmodule:: reV.qa_qc.summary + +.. autoclass:: ExclusionsMask + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExclusionsMask.exclusions_plot + ~ExclusionsMask.exclusions_plotly + ~ExclusionsMask.plot + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExclusionsMask.data + ~ExclusionsMask.mask + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.summary.PlotBase.rst.txt b/_sources/_autosummary/reV.qa_qc.summary.PlotBase.rst.txt new file mode 100644 index 000000000..8f12ea7d1 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.summary.PlotBase.rst.txt @@ -0,0 +1,29 @@ +reV.qa\_qc.summary.PlotBase +=========================== + +.. currentmodule:: reV.qa_qc.summary + +.. autoclass:: PlotBase + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~PlotBase.data + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.summary.SummarizeH5.rst.txt b/_sources/_autosummary/reV.qa_qc.summary.SummarizeH5.rst.txt new file mode 100644 index 000000000..f38d1bca5 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.summary.SummarizeH5.rst.txt @@ -0,0 +1,32 @@ +reV.qa\_qc.summary.SummarizeH5 +============================== + +.. currentmodule:: reV.qa_qc.summary + +.. autoclass:: SummarizeH5 + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SummarizeH5.run + ~SummarizeH5.summarize_dset + ~SummarizeH5.summarize_means + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SummarizeH5.h5_file + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.summary.SummarizeSupplyCurve.rst.txt b/_sources/_autosummary/reV.qa_qc.summary.SummarizeSupplyCurve.rst.txt new file mode 100644 index 000000000..1e904d679 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.summary.SummarizeSupplyCurve.rst.txt @@ -0,0 +1,31 @@ +reV.qa\_qc.summary.SummarizeSupplyCurve +======================================= + +.. currentmodule:: reV.qa_qc.summary + +.. autoclass:: SummarizeSupplyCurve + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SummarizeSupplyCurve.run + ~SummarizeSupplyCurve.supply_curve_summary + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SummarizeSupplyCurve.sc_table + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.summary.SummaryPlots.rst.txt b/_sources/_autosummary/reV.qa_qc.summary.SummaryPlots.rst.txt new file mode 100644 index 000000000..dab52eb82 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.summary.SummaryPlots.rst.txt @@ -0,0 +1,37 @@ +reV.qa\_qc.summary.SummaryPlots +=============================== + +.. currentmodule:: reV.qa_qc.summary + +.. autoclass:: SummaryPlots + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SummaryPlots.dist_plot + ~SummaryPlots.dist_plotly + ~SummaryPlots.scatter + ~SummaryPlots.scatter_all + ~SummaryPlots.scatter_plot + ~SummaryPlots.scatter_plotly + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SummaryPlots.columns + ~SummaryPlots.data + ~SummaryPlots.summary + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.summary.SupplyCurvePlot.rst.txt b/_sources/_autosummary/reV.qa_qc.summary.SupplyCurvePlot.rst.txt new file mode 100644 index 000000000..a24dccc59 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.summary.SupplyCurvePlot.rst.txt @@ -0,0 +1,34 @@ +reV.qa\_qc.summary.SupplyCurvePlot +================================== + +.. currentmodule:: reV.qa_qc.summary + +.. autoclass:: SupplyCurvePlot + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SupplyCurvePlot.plot + ~SupplyCurvePlot.supply_curve_plot + ~SupplyCurvePlot.supply_curve_plotly + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SupplyCurvePlot.columns + ~SupplyCurvePlot.data + ~SupplyCurvePlot.sc_table + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.qa_qc.summary.rst.txt b/_sources/_autosummary/reV.qa_qc.summary.rst.txt new file mode 100644 index 000000000..f72796644 --- /dev/null +++ b/_sources/_autosummary/reV.qa_qc.summary.rst.txt @@ -0,0 +1,36 @@ +reV.qa\_qc.summary +================== + +.. automodule:: reV.qa_qc.summary + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ExclusionsMask + PlotBase + SummarizeH5 + SummarizeSupplyCurve + SummaryPlots + SupplyCurvePlot + + + + + + + + + diff --git a/_sources/_autosummary/reV.rep_profiles.cli_rep_profiles.rst.txt b/_sources/_autosummary/reV.rep_profiles.cli_rep_profiles.rst.txt new file mode 100644 index 000000000..67cd0a2c5 --- /dev/null +++ b/_sources/_autosummary/reV.rep_profiles.cli_rep_profiles.rst.txt @@ -0,0 +1,23 @@ +reV.rep\_profiles.cli\_rep\_profiles +==================================== + +.. automodule:: reV.rep_profiles.cli_rep_profiles + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.rep_profiles.rep_profiles.RegionRepProfile.rst.txt b/_sources/_autosummary/reV.rep_profiles.rep_profiles.RegionRepProfile.rst.txt new file mode 100644 index 000000000..f80efeb3a --- /dev/null +++ b/_sources/_autosummary/reV.rep_profiles.rep_profiles.RegionRepProfile.rst.txt @@ -0,0 +1,37 @@ +reV.rep\_profiles.rep\_profiles.RegionRepProfile +================================================ + +.. currentmodule:: reV.rep_profiles.rep_profiles + +.. autoclass:: RegionRepProfile + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~RegionRepProfile.get_region_rep_profile + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~RegionRepProfile.GEN_GID_COL + ~RegionRepProfile.RES_GID_COL + ~RegionRepProfile.i_reps + ~RegionRepProfile.rep_gen_gids + ~RegionRepProfile.rep_profiles + ~RegionRepProfile.rep_res_gids + ~RegionRepProfile.source_profiles + ~RegionRepProfile.weights + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepProfiles.rst.txt b/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepProfiles.rst.txt new file mode 100644 index 000000000..8ed4eba40 --- /dev/null +++ b/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepProfiles.rst.txt @@ -0,0 +1,33 @@ +reV.rep\_profiles.rep\_profiles.RepProfiles +=========================================== + +.. currentmodule:: reV.rep_profiles.rep_profiles + +.. autoclass:: RepProfiles + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~RepProfiles.run + ~RepProfiles.save_profiles + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~RepProfiles.meta + ~RepProfiles.profiles + ~RepProfiles.time_index + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepProfilesBase.rst.txt b/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepProfilesBase.rst.txt new file mode 100644 index 000000000..f6d4dd759 --- /dev/null +++ b/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepProfilesBase.rst.txt @@ -0,0 +1,33 @@ +reV.rep\_profiles.rep\_profiles.RepProfilesBase +=============================================== + +.. currentmodule:: reV.rep_profiles.rep_profiles + +.. autoclass:: RepProfilesBase + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~RepProfilesBase.run + ~RepProfilesBase.save_profiles + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~RepProfilesBase.meta + ~RepProfilesBase.profiles + ~RepProfilesBase.time_index + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepresentativeMethods.rst.txt b/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepresentativeMethods.rst.txt new file mode 100644 index 000000000..42c5716a9 --- /dev/null +++ b/_sources/_autosummary/reV.rep_profiles.rep_profiles.RepresentativeMethods.rst.txt @@ -0,0 +1,37 @@ +reV.rep\_profiles.rep\_profiles.RepresentativeMethods +===================================================== + +.. currentmodule:: reV.rep_profiles.rep_profiles + +.. autoclass:: RepresentativeMethods + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~RepresentativeMethods.mae + ~RepresentativeMethods.mbe + ~RepresentativeMethods.meanoid + ~RepresentativeMethods.medianoid + ~RepresentativeMethods.nargmin + ~RepresentativeMethods.rmse + ~RepresentativeMethods.run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~RepresentativeMethods.err_methods + ~RepresentativeMethods.rep_methods + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.rep_profiles.rep_profiles.rst.txt b/_sources/_autosummary/reV.rep_profiles.rep_profiles.rst.txt new file mode 100644 index 000000000..761b42122 --- /dev/null +++ b/_sources/_autosummary/reV.rep_profiles.rep_profiles.rst.txt @@ -0,0 +1,34 @@ +reV.rep\_profiles.rep\_profiles +=============================== + +.. automodule:: reV.rep_profiles.rep_profiles + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + RegionRepProfile + RepProfiles + RepProfilesBase + RepresentativeMethods + + + + + + + + + diff --git a/_sources/_autosummary/reV.rep_profiles.rst.txt b/_sources/_autosummary/reV.rep_profiles.rst.txt new file mode 100644 index 000000000..84c03b77b --- /dev/null +++ b/_sources/_autosummary/reV.rep_profiles.rst.txt @@ -0,0 +1,31 @@ +reV.rep\_profiles +================= + +.. automodule:: reV.rep_profiles + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.rep_profiles.cli_rep_profiles + reV.rep_profiles.rep_profiles + diff --git a/_sources/_autosummary/reV.rst.txt b/_sources/_autosummary/reV.rst.txt new file mode 100644 index 000000000..0e72f901b --- /dev/null +++ b/_sources/_autosummary/reV.rst.txt @@ -0,0 +1,44 @@ +reV +=== + +.. automodule:: reV + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.SAM + reV.bespoke + reV.cli + reV.config + reV.econ + reV.generation + reV.handlers + reV.hybrids + reV.losses + reV.nrwal + reV.qa_qc + reV.rep_profiles + reV.supply_curve + reV.utilities + reV.version + diff --git a/_sources/_autosummary/reV.supply_curve.aggregation.AbstractAggFileHandler.rst.txt b/_sources/_autosummary/reV.supply_curve.aggregation.AbstractAggFileHandler.rst.txt new file mode 100644 index 000000000..0df5f6de6 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.aggregation.AbstractAggFileHandler.rst.txt @@ -0,0 +1,31 @@ +reV.supply\_curve.aggregation.AbstractAggFileHandler +==================================================== + +.. currentmodule:: reV.supply_curve.aggregation + +.. autoclass:: AbstractAggFileHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractAggFileHandler.close + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractAggFileHandler.exclusions + ~AbstractAggFileHandler.h5 + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.aggregation.AggFileHandler.rst.txt b/_sources/_autosummary/reV.supply_curve.aggregation.AggFileHandler.rst.txt new file mode 100644 index 000000000..8b1d37c0f --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.aggregation.AggFileHandler.rst.txt @@ -0,0 +1,31 @@ +reV.supply\_curve.aggregation.AggFileHandler +============================================ + +.. currentmodule:: reV.supply_curve.aggregation + +.. autoclass:: AggFileHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AggFileHandler.close + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AggFileHandler.exclusions + ~AggFileHandler.h5 + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.aggregation.Aggregation.rst.txt b/_sources/_autosummary/reV.supply_curve.aggregation.Aggregation.rst.txt new file mode 100644 index 000000000..d05bc9e86 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.aggregation.Aggregation.rst.txt @@ -0,0 +1,35 @@ +reV.supply\_curve.aggregation.Aggregation +========================================= + +.. currentmodule:: reV.supply_curve.aggregation + +.. autoclass:: Aggregation + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~Aggregation.aggregate + ~Aggregation.run + ~Aggregation.run_parallel + ~Aggregation.run_serial + ~Aggregation.save_agg_to_h5 + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~Aggregation.gids + ~Aggregation.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.aggregation.BaseAggregation.rst.txt b/_sources/_autosummary/reV.supply_curve.aggregation.BaseAggregation.rst.txt new file mode 100644 index 000000000..3df333f08 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.aggregation.BaseAggregation.rst.txt @@ -0,0 +1,30 @@ +reV.supply\_curve.aggregation.BaseAggregation +============================================= + +.. currentmodule:: reV.supply_curve.aggregation + +.. autoclass:: BaseAggregation + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~BaseAggregation.gids + ~BaseAggregation.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.aggregation.rst.txt b/_sources/_autosummary/reV.supply_curve.aggregation.rst.txt new file mode 100644 index 000000000..76677d6d2 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.aggregation.rst.txt @@ -0,0 +1,34 @@ +reV.supply\_curve.aggregation +============================= + +.. automodule:: reV.supply_curve.aggregation + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + AbstractAggFileHandler + AggFileHandler + Aggregation + BaseAggregation + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.cli_sc_aggregation.rst.txt b/_sources/_autosummary/reV.supply_curve.cli_sc_aggregation.rst.txt new file mode 100644 index 000000000..f0cf19c4a --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.cli_sc_aggregation.rst.txt @@ -0,0 +1,23 @@ +reV.supply\_curve.cli\_sc\_aggregation +====================================== + +.. automodule:: reV.supply_curve.cli_sc_aggregation + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.cli_supply_curve.rst.txt b/_sources/_autosummary/reV.supply_curve.cli_supply_curve.rst.txt new file mode 100644 index 000000000..c63c5663d --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.cli_supply_curve.rst.txt @@ -0,0 +1,23 @@ +reV.supply\_curve.cli\_supply\_curve +==================================== + +.. automodule:: reV.supply_curve.cli_supply_curve + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.rst.txt b/_sources/_autosummary/reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.rst.txt new file mode 100644 index 000000000..6afc29b12 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.rst.txt @@ -0,0 +1,39 @@ +reV.supply\_curve.competitive\_wind\_farms.CompetitiveWindFarms +=============================================================== + +.. currentmodule:: reV.supply_curve.competitive_wind_farms + +.. autoclass:: CompetitiveWindFarms + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~CompetitiveWindFarms.check_sc_gid + ~CompetitiveWindFarms.exclude_sc_point_gid + ~CompetitiveWindFarms.map_downwind + ~CompetitiveWindFarms.map_sc_gid_to_sc_point_gid + ~CompetitiveWindFarms.map_sc_point_gid_to_sc_gid + ~CompetitiveWindFarms.map_upwind + ~CompetitiveWindFarms.remove_noncompetitive_farm + ~CompetitiveWindFarms.run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~CompetitiveWindFarms.mask + ~CompetitiveWindFarms.sc_gids + ~CompetitiveWindFarms.sc_point_gids + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.competitive_wind_farms.rst.txt b/_sources/_autosummary/reV.supply_curve.competitive_wind_farms.rst.txt new file mode 100644 index 000000000..824b264e2 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.competitive_wind_farms.rst.txt @@ -0,0 +1,31 @@ +reV.supply\_curve.competitive\_wind\_farms +========================================== + +.. automodule:: reV.supply_curve.competitive_wind_farms + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + CompetitiveWindFarms + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.exclusions.ExclusionMask.rst.txt b/_sources/_autosummary/reV.supply_curve.exclusions.ExclusionMask.rst.txt new file mode 100644 index 000000000..ac450642b --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.exclusions.ExclusionMask.rst.txt @@ -0,0 +1,41 @@ +reV.supply\_curve.exclusions.ExclusionMask +========================================== + +.. currentmodule:: reV.supply_curve.exclusions + +.. autoclass:: ExclusionMask + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExclusionMask.add_layer + ~ExclusionMask.close + ~ExclusionMask.run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExclusionMask.FILTER_KERNELS + ~ExclusionMask.excl_h5 + ~ExclusionMask.excl_layers + ~ExclusionMask.latitude + ~ExclusionMask.layer_names + ~ExclusionMask.layers + ~ExclusionMask.longitude + ~ExclusionMask.mask + ~ExclusionMask.nodata_lookup + ~ExclusionMask.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.exclusions.ExclusionMaskFromDict.rst.txt b/_sources/_autosummary/reV.supply_curve.exclusions.ExclusionMaskFromDict.rst.txt new file mode 100644 index 000000000..378c84db4 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.exclusions.ExclusionMaskFromDict.rst.txt @@ -0,0 +1,42 @@ +reV.supply\_curve.exclusions.ExclusionMaskFromDict +================================================== + +.. currentmodule:: reV.supply_curve.exclusions + +.. autoclass:: ExclusionMaskFromDict + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ExclusionMaskFromDict.add_layer + ~ExclusionMaskFromDict.close + ~ExclusionMaskFromDict.extract_inclusion_mask + ~ExclusionMaskFromDict.run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ExclusionMaskFromDict.FILTER_KERNELS + ~ExclusionMaskFromDict.excl_h5 + ~ExclusionMaskFromDict.excl_layers + ~ExclusionMaskFromDict.latitude + ~ExclusionMaskFromDict.layer_names + ~ExclusionMaskFromDict.layers + ~ExclusionMaskFromDict.longitude + ~ExclusionMaskFromDict.mask + ~ExclusionMaskFromDict.nodata_lookup + ~ExclusionMaskFromDict.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.exclusions.FrictionMask.rst.txt b/_sources/_autosummary/reV.supply_curve.exclusions.FrictionMask.rst.txt new file mode 100644 index 000000000..8e944d31a --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.exclusions.FrictionMask.rst.txt @@ -0,0 +1,41 @@ +reV.supply\_curve.exclusions.FrictionMask +========================================= + +.. currentmodule:: reV.supply_curve.exclusions + +.. autoclass:: FrictionMask + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~FrictionMask.add_layer + ~FrictionMask.close + ~FrictionMask.run + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~FrictionMask.FILTER_KERNELS + ~FrictionMask.excl_h5 + ~FrictionMask.excl_layers + ~FrictionMask.latitude + ~FrictionMask.layer_names + ~FrictionMask.layers + ~FrictionMask.longitude + ~FrictionMask.mask + ~FrictionMask.nodata_lookup + ~FrictionMask.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.exclusions.LayerMask.rst.txt b/_sources/_autosummary/reV.supply_curve.exclusions.LayerMask.rst.txt new file mode 100644 index 000000000..58bff12aa --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.exclusions.LayerMask.rst.txt @@ -0,0 +1,36 @@ +reV.supply\_curve.exclusions.LayerMask +====================================== + +.. currentmodule:: reV.supply_curve.exclusions + +.. autoclass:: LayerMask + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~LayerMask.exclude_values + ~LayerMask.force_include + ~LayerMask.include_values + ~LayerMask.include_weights + ~LayerMask.mask_type + ~LayerMask.max_value + ~LayerMask.min_value + ~LayerMask.name + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.exclusions.rst.txt b/_sources/_autosummary/reV.supply_curve.exclusions.rst.txt new file mode 100644 index 000000000..f3974aecf --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.exclusions.rst.txt @@ -0,0 +1,34 @@ +reV.supply\_curve.exclusions +============================ + +.. automodule:: reV.supply_curve.exclusions + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ExclusionMask + ExclusionMaskFromDict + FrictionMask + LayerMask + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.extent.SupplyCurveExtent.rst.txt b/_sources/_autosummary/reV.supply_curve.extent.SupplyCurveExtent.rst.txt new file mode 100644 index 000000000..d682ed42d --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.extent.SupplyCurveExtent.rst.txt @@ -0,0 +1,54 @@ +reV.supply\_curve.extent.SupplyCurveExtent +========================================== + +.. currentmodule:: reV.supply_curve.extent + +.. autoclass:: SupplyCurveExtent + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SupplyCurveExtent.close + ~SupplyCurveExtent.get_coord + ~SupplyCurveExtent.get_excl_points + ~SupplyCurveExtent.get_excl_slices + ~SupplyCurveExtent.get_flat_excl_ind + ~SupplyCurveExtent.get_sc_row_col_ind + ~SupplyCurveExtent.get_slice_lookup + ~SupplyCurveExtent.valid_sc_points + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SupplyCurveExtent.col_indices + ~SupplyCurveExtent.cols_of_excl + ~SupplyCurveExtent.excl_col_slices + ~SupplyCurveExtent.excl_cols + ~SupplyCurveExtent.excl_row_slices + ~SupplyCurveExtent.excl_rows + ~SupplyCurveExtent.excl_shape + ~SupplyCurveExtent.exclusions + ~SupplyCurveExtent.lat_lon + ~SupplyCurveExtent.latitude + ~SupplyCurveExtent.longitude + ~SupplyCurveExtent.n_cols + ~SupplyCurveExtent.n_rows + ~SupplyCurveExtent.points + ~SupplyCurveExtent.resolution + ~SupplyCurveExtent.row_indices + ~SupplyCurveExtent.rows_of_excl + ~SupplyCurveExtent.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.extent.rst.txt b/_sources/_autosummary/reV.supply_curve.extent.rst.txt new file mode 100644 index 000000000..2cec25e70 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.extent.rst.txt @@ -0,0 +1,31 @@ +reV.supply\_curve.extent +======================== + +.. automodule:: reV.supply_curve.extent + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SupplyCurveExtent + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.points.AbstractSupplyCurvePoint.rst.txt b/_sources/_autosummary/reV.supply_curve.points.AbstractSupplyCurvePoint.rst.txt new file mode 100644 index 000000000..b3519eaf7 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.points.AbstractSupplyCurvePoint.rst.txt @@ -0,0 +1,34 @@ +reV.supply\_curve.points.AbstractSupplyCurvePoint +================================================= + +.. currentmodule:: reV.supply_curve.points + +.. autoclass:: AbstractSupplyCurvePoint + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AbstractSupplyCurvePoint.get_agg_slices + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AbstractSupplyCurvePoint.cols + ~AbstractSupplyCurvePoint.gid + ~AbstractSupplyCurvePoint.resolution + ~AbstractSupplyCurvePoint.rows + ~AbstractSupplyCurvePoint.sc_point_gid + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.points.AggregationSupplyCurvePoint.rst.txt b/_sources/_autosummary/reV.supply_curve.points.AggregationSupplyCurvePoint.rst.txt new file mode 100644 index 000000000..073054b32 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.points.AggregationSupplyCurvePoint.rst.txt @@ -0,0 +1,62 @@ +reV.supply\_curve.points.AggregationSupplyCurvePoint +==================================================== + +.. currentmodule:: reV.supply_curve.points + +.. autoclass:: AggregationSupplyCurvePoint + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~AggregationSupplyCurvePoint.agg_data_layers + ~AggregationSupplyCurvePoint.aggregate + ~AggregationSupplyCurvePoint.close + ~AggregationSupplyCurvePoint.exclusion_weighted_mean + ~AggregationSupplyCurvePoint.get_agg_slices + ~AggregationSupplyCurvePoint.mean_wind_dirs + ~AggregationSupplyCurvePoint.run + ~AggregationSupplyCurvePoint.sc_mean + ~AggregationSupplyCurvePoint.sc_sum + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~AggregationSupplyCurvePoint.area + ~AggregationSupplyCurvePoint.bool_mask + ~AggregationSupplyCurvePoint.centroid + ~AggregationSupplyCurvePoint.cols + ~AggregationSupplyCurvePoint.country + ~AggregationSupplyCurvePoint.county + ~AggregationSupplyCurvePoint.elevation + ~AggregationSupplyCurvePoint.exclusions + ~AggregationSupplyCurvePoint.gid + ~AggregationSupplyCurvePoint.gid_counts + ~AggregationSupplyCurvePoint.h5 + ~AggregationSupplyCurvePoint.h5_gid_set + ~AggregationSupplyCurvePoint.include_mask + ~AggregationSupplyCurvePoint.include_mask_flat + ~AggregationSupplyCurvePoint.latitude + ~AggregationSupplyCurvePoint.longitude + ~AggregationSupplyCurvePoint.n_gids + ~AggregationSupplyCurvePoint.offshore + ~AggregationSupplyCurvePoint.pixel_area + ~AggregationSupplyCurvePoint.resolution + ~AggregationSupplyCurvePoint.rows + ~AggregationSupplyCurvePoint.sc_point_gid + ~AggregationSupplyCurvePoint.state + ~AggregationSupplyCurvePoint.summary + ~AggregationSupplyCurvePoint.timezone + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.points.GenerationSupplyCurvePoint.rst.txt b/_sources/_autosummary/reV.supply_curve.points.GenerationSupplyCurvePoint.rst.txt new file mode 100644 index 000000000..9aa0f8b4c --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.points.GenerationSupplyCurvePoint.rst.txt @@ -0,0 +1,84 @@ +reV.supply\_curve.points.GenerationSupplyCurvePoint +=================================================== + +.. currentmodule:: reV.supply_curve.points + +.. autoclass:: GenerationSupplyCurvePoint + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~GenerationSupplyCurvePoint.agg_data_layers + ~GenerationSupplyCurvePoint.aggregate + ~GenerationSupplyCurvePoint.close + ~GenerationSupplyCurvePoint.economies_of_scale + ~GenerationSupplyCurvePoint.exclusion_weighted_mean + ~GenerationSupplyCurvePoint.get_agg_slices + ~GenerationSupplyCurvePoint.mean_wind_dirs + ~GenerationSupplyCurvePoint.point_summary + ~GenerationSupplyCurvePoint.run + ~GenerationSupplyCurvePoint.sc_mean + ~GenerationSupplyCurvePoint.sc_sum + ~GenerationSupplyCurvePoint.summarize + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~GenerationSupplyCurvePoint.POWER_DENSITY + ~GenerationSupplyCurvePoint.area + ~GenerationSupplyCurvePoint.bool_mask + ~GenerationSupplyCurvePoint.capacity + ~GenerationSupplyCurvePoint.capacity_ac + ~GenerationSupplyCurvePoint.centroid + ~GenerationSupplyCurvePoint.cols + ~GenerationSupplyCurvePoint.country + ~GenerationSupplyCurvePoint.county + ~GenerationSupplyCurvePoint.elevation + ~GenerationSupplyCurvePoint.exclusions + ~GenerationSupplyCurvePoint.friction_data + ~GenerationSupplyCurvePoint.gen + ~GenerationSupplyCurvePoint.gen_data + ~GenerationSupplyCurvePoint.gen_gid_set + ~GenerationSupplyCurvePoint.gid + ~GenerationSupplyCurvePoint.gid_counts + ~GenerationSupplyCurvePoint.h5 + ~GenerationSupplyCurvePoint.h5_dsets_data + ~GenerationSupplyCurvePoint.h5_gid_set + ~GenerationSupplyCurvePoint.include_mask + ~GenerationSupplyCurvePoint.include_mask_flat + ~GenerationSupplyCurvePoint.latitude + ~GenerationSupplyCurvePoint.lcoe_data + ~GenerationSupplyCurvePoint.longitude + ~GenerationSupplyCurvePoint.mean_cf + ~GenerationSupplyCurvePoint.mean_friction + ~GenerationSupplyCurvePoint.mean_h5_dsets_data + ~GenerationSupplyCurvePoint.mean_lcoe + ~GenerationSupplyCurvePoint.mean_lcoe_friction + ~GenerationSupplyCurvePoint.mean_res + ~GenerationSupplyCurvePoint.n_gids + ~GenerationSupplyCurvePoint.offshore + ~GenerationSupplyCurvePoint.pixel_area + ~GenerationSupplyCurvePoint.power_density + ~GenerationSupplyCurvePoint.power_density_ac + ~GenerationSupplyCurvePoint.res_data + ~GenerationSupplyCurvePoint.res_gid_set + ~GenerationSupplyCurvePoint.resolution + ~GenerationSupplyCurvePoint.rows + ~GenerationSupplyCurvePoint.sc_point_gid + ~GenerationSupplyCurvePoint.state + ~GenerationSupplyCurvePoint.summary + ~GenerationSupplyCurvePoint.timezone + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.points.SupplyCurvePoint.rst.txt b/_sources/_autosummary/reV.supply_curve.points.SupplyCurvePoint.rst.txt new file mode 100644 index 000000000..ac0973d1e --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.points.SupplyCurvePoint.rst.txt @@ -0,0 +1,53 @@ +reV.supply\_curve.points.SupplyCurvePoint +========================================= + +.. currentmodule:: reV.supply_curve.points + +.. autoclass:: SupplyCurvePoint + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SupplyCurvePoint.agg_data_layers + ~SupplyCurvePoint.aggregate + ~SupplyCurvePoint.close + ~SupplyCurvePoint.exclusion_weighted_mean + ~SupplyCurvePoint.get_agg_slices + ~SupplyCurvePoint.mean_wind_dirs + ~SupplyCurvePoint.sc_mean + ~SupplyCurvePoint.sc_sum + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SupplyCurvePoint.area + ~SupplyCurvePoint.bool_mask + ~SupplyCurvePoint.centroid + ~SupplyCurvePoint.cols + ~SupplyCurvePoint.exclusions + ~SupplyCurvePoint.gid + ~SupplyCurvePoint.h5 + ~SupplyCurvePoint.include_mask + ~SupplyCurvePoint.include_mask_flat + ~SupplyCurvePoint.latitude + ~SupplyCurvePoint.longitude + ~SupplyCurvePoint.n_gids + ~SupplyCurvePoint.pixel_area + ~SupplyCurvePoint.resolution + ~SupplyCurvePoint.rows + ~SupplyCurvePoint.sc_point_gid + ~SupplyCurvePoint.summary + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.points.rst.txt b/_sources/_autosummary/reV.supply_curve.points.rst.txt new file mode 100644 index 000000000..b5241fd88 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.points.rst.txt @@ -0,0 +1,34 @@ +reV.supply\_curve.points +======================== + +.. automodule:: reV.supply_curve.points + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + AbstractSupplyCurvePoint + AggregationSupplyCurvePoint + GenerationSupplyCurvePoint + SupplyCurvePoint + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.rst.txt b/_sources/_autosummary/reV.supply_curve.rst.txt new file mode 100644 index 000000000..26ec6df36 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.rst.txt @@ -0,0 +1,39 @@ +reV.supply\_curve +================= + +.. automodule:: reV.supply_curve + + + + + + + + + + + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.supply_curve.aggregation + reV.supply_curve.cli_sc_aggregation + reV.supply_curve.cli_supply_curve + reV.supply_curve.competitive_wind_farms + reV.supply_curve.exclusions + reV.supply_curve.extent + reV.supply_curve.points + reV.supply_curve.sc_aggregation + reV.supply_curve.supply_curve + reV.supply_curve.tech_mapping + diff --git a/_sources/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.rst.txt b/_sources/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.rst.txt new file mode 100644 index 000000000..b8193b539 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.rst.txt @@ -0,0 +1,35 @@ +reV.supply\_curve.sc\_aggregation.SupplyCurveAggFileHandler +=========================================================== + +.. currentmodule:: reV.supply_curve.sc_aggregation + +.. autoclass:: SupplyCurveAggFileHandler + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SupplyCurveAggFileHandler.close + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SupplyCurveAggFileHandler.data_layers + ~SupplyCurveAggFileHandler.exclusions + ~SupplyCurveAggFileHandler.friction_layer + ~SupplyCurveAggFileHandler.gen + ~SupplyCurveAggFileHandler.h5 + ~SupplyCurveAggFileHandler.power_density + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggregation.rst.txt b/_sources/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggregation.rst.txt new file mode 100644 index 000000000..26b92f452 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggregation.rst.txt @@ -0,0 +1,34 @@ +reV.supply\_curve.sc\_aggregation.SupplyCurveAggregation +======================================================== + +.. currentmodule:: reV.supply_curve.sc_aggregation + +.. autoclass:: SupplyCurveAggregation + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SupplyCurveAggregation.run + ~SupplyCurveAggregation.run_parallel + ~SupplyCurveAggregation.run_serial + ~SupplyCurveAggregation.summarize + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SupplyCurveAggregation.gids + ~SupplyCurveAggregation.shape + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.sc_aggregation.rst.txt b/_sources/_autosummary/reV.supply_curve.sc_aggregation.rst.txt new file mode 100644 index 000000000..35e3ec354 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.sc_aggregation.rst.txt @@ -0,0 +1,32 @@ +reV.supply\_curve.sc\_aggregation +================================= + +.. automodule:: reV.supply_curve.sc_aggregation + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SupplyCurveAggFileHandler + SupplyCurveAggregation + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.supply_curve.SupplyCurve.rst.txt b/_sources/_autosummary/reV.supply_curve.supply_curve.SupplyCurve.rst.txt new file mode 100644 index 000000000..66ec580d9 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.supply_curve.SupplyCurve.rst.txt @@ -0,0 +1,28 @@ +reV.supply\_curve.supply\_curve.SupplyCurve +=========================================== + +.. currentmodule:: reV.supply_curve.supply_curve + +.. autoclass:: SupplyCurve + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SupplyCurve.add_sum_cols + ~SupplyCurve.compute_total_lcoe + ~SupplyCurve.full_sort + ~SupplyCurve.run + ~SupplyCurve.simple_sort + + + + + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.supply_curve.rst.txt b/_sources/_autosummary/reV.supply_curve.supply_curve.rst.txt new file mode 100644 index 000000000..f7503a8e6 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.supply_curve.rst.txt @@ -0,0 +1,31 @@ +reV.supply\_curve.supply\_curve +=============================== + +.. automodule:: reV.supply_curve.supply_curve + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SupplyCurve + + + + + + + + + diff --git a/_sources/_autosummary/reV.supply_curve.tech_mapping.TechMapping.rst.txt b/_sources/_autosummary/reV.supply_curve.tech_mapping.TechMapping.rst.txt new file mode 100644 index 000000000..42cf9159b --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.tech_mapping.TechMapping.rst.txt @@ -0,0 +1,33 @@ +reV.supply\_curve.tech\_mapping.TechMapping +=========================================== + +.. currentmodule:: reV.supply_curve.tech_mapping + +.. autoclass:: TechMapping + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~TechMapping.map_resource + ~TechMapping.map_resource_gids + ~TechMapping.run + ~TechMapping.save_tech_map + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~TechMapping.distance_threshold + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.supply_curve.tech_mapping.rst.txt b/_sources/_autosummary/reV.supply_curve.tech_mapping.rst.txt new file mode 100644 index 000000000..8c6f05d65 --- /dev/null +++ b/_sources/_autosummary/reV.supply_curve.tech_mapping.rst.txt @@ -0,0 +1,31 @@ +reV.supply\_curve.tech\_mapping +=============================== + +.. automodule:: reV.supply_curve.tech_mapping + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + TechMapping + + + + + + + + + diff --git a/_sources/_autosummary/reV.utilities.ModuleName.rst.txt b/_sources/_autosummary/reV.utilities.ModuleName.rst.txt new file mode 100644 index 000000000..7ff770ca1 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.ModuleName.rst.txt @@ -0,0 +1,40 @@ +reV.utilities.ModuleName +======================== + +.. currentmodule:: reV.utilities + +.. autoclass:: ModuleName + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~ModuleName.all_names + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~ModuleName.BESPOKE + ~ModuleName.COLLECT + ~ModuleName.ECON + ~ModuleName.GENERATION + ~ModuleName.HYBRIDS + ~ModuleName.MULTI_YEAR + ~ModuleName.NRWAL + ~ModuleName.QA_QC + ~ModuleName.REP_PROFILES + ~ModuleName.SUPPLY_CURVE + ~ModuleName.SUPPLY_CURVE_AGGREGATION + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.cli_functions.format_analysis_years.rst.txt b/_sources/_autosummary/reV.utilities.cli_functions.format_analysis_years.rst.txt new file mode 100644 index 000000000..7c0e1e2c4 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.cli_functions.format_analysis_years.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.cli\_functions.format\_analysis\_years +==================================================== + +.. currentmodule:: reV.utilities.cli_functions + +.. autofunction:: format_analysis_years \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.cli_functions.init_cli_logging.rst.txt b/_sources/_autosummary/reV.utilities.cli_functions.init_cli_logging.rst.txt new file mode 100644 index 000000000..335d179f6 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.cli_functions.init_cli_logging.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.cli\_functions.init\_cli\_logging +=============================================== + +.. currentmodule:: reV.utilities.cli_functions + +.. autofunction:: init_cli_logging \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.cli_functions.parse_from_pipeline.rst.txt b/_sources/_autosummary/reV.utilities.cli_functions.parse_from_pipeline.rst.txt new file mode 100644 index 000000000..f2f376421 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.cli_functions.parse_from_pipeline.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.cli\_functions.parse\_from\_pipeline +================================================== + +.. currentmodule:: reV.utilities.cli_functions + +.. autofunction:: parse_from_pipeline \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.cli_functions.rst.txt b/_sources/_autosummary/reV.utilities.cli_functions.rst.txt new file mode 100644 index 000000000..dd42b9c67 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.cli_functions.rst.txt @@ -0,0 +1,32 @@ +reV.utilities.cli\_functions +============================ + +.. automodule:: reV.utilities.cli_functions + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + format_analysis_years + init_cli_logging + parse_from_pipeline + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.utilities.curtailment.curtail.rst.txt b/_sources/_autosummary/reV.utilities.curtailment.curtail.rst.txt new file mode 100644 index 000000000..924444743 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.curtailment.curtail.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.curtailment.curtail +================================= + +.. currentmodule:: reV.utilities.curtailment + +.. autofunction:: curtail \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.curtailment.rst.txt b/_sources/_autosummary/reV.utilities.curtailment.rst.txt new file mode 100644 index 000000000..3f4e91ea8 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.curtailment.rst.txt @@ -0,0 +1,30 @@ +reV.utilities.curtailment +========================= + +.. automodule:: reV.utilities.curtailment + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + curtail + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.utilities.exceptions.CollectionRuntimeError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.CollectionRuntimeError.rst.txt new file mode 100644 index 000000000..901adda3e --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.CollectionRuntimeError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.CollectionRuntimeError +=============================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: CollectionRuntimeError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.CollectionValueError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.CollectionValueError.rst.txt new file mode 100644 index 000000000..ac28f40c5 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.CollectionValueError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.CollectionValueError +============================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: CollectionValueError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.CollectionWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.CollectionWarning.rst.txt new file mode 100644 index 000000000..d9de29fb2 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.CollectionWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.CollectionWarning +========================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: CollectionWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.ConfigError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.ConfigError.rst.txt new file mode 100644 index 000000000..c25e7a357 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.ConfigError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.ConfigError +==================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: ConfigError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.ConfigWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.ConfigWarning.rst.txt new file mode 100644 index 000000000..e2b52a5ad --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.ConfigWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.ConfigWarning +====================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: ConfigWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.DataShapeError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.DataShapeError.rst.txt new file mode 100644 index 000000000..024169ac6 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.DataShapeError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.DataShapeError +======================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: DataShapeError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.EmptySupplyCurvePointError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.EmptySupplyCurvePointError.rst.txt new file mode 100644 index 000000000..f949513ed --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.EmptySupplyCurvePointError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.EmptySupplyCurvePointError +=================================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: EmptySupplyCurvePointError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.ExclusionLayerError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.ExclusionLayerError.rst.txt new file mode 100644 index 000000000..b92218b50 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.ExclusionLayerError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.ExclusionLayerError +============================================ + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: ExclusionLayerError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.ExecutionError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.ExecutionError.rst.txt new file mode 100644 index 000000000..539e698ac --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.ExecutionError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.ExecutionError +======================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: ExecutionError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.ExtrapolationWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.ExtrapolationWarning.rst.txt new file mode 100644 index 000000000..339282598 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.ExtrapolationWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.ExtrapolationWarning +============================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: ExtrapolationWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.FileInputError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.FileInputError.rst.txt new file mode 100644 index 000000000..bf6391e8a --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.FileInputError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.FileInputError +======================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: FileInputError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.FileInputWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.FileInputWarning.rst.txt new file mode 100644 index 000000000..d953ff4e3 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.FileInputWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.FileInputWarning +========================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: FileInputWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.HandlerKeyError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.HandlerKeyError.rst.txt new file mode 100644 index 000000000..db81b8e24 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.HandlerKeyError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.HandlerKeyError +======================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: HandlerKeyError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.HandlerRuntimeError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.HandlerRuntimeError.rst.txt new file mode 100644 index 000000000..7cb9c5196 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.HandlerRuntimeError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.HandlerRuntimeError +============================================ + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: HandlerRuntimeError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.HandlerValueError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.HandlerValueError.rst.txt new file mode 100644 index 000000000..4ed1536b4 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.HandlerValueError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.HandlerValueError +========================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: HandlerValueError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.HandlerWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.HandlerWarning.rst.txt new file mode 100644 index 000000000..3ab905f1a --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.HandlerWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.HandlerWarning +======================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: HandlerWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.InputError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.InputError.rst.txt new file mode 100644 index 000000000..1a53e4f06 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.InputError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.InputError +=================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: InputError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.InputWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.InputWarning.rst.txt new file mode 100644 index 000000000..345142c26 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.InputWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.InputWarning +===================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: InputWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.JSONError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.JSONError.rst.txt new file mode 100644 index 000000000..fe121edc7 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.JSONError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.JSONError +================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: JSONError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.MultiFileExclusionError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.MultiFileExclusionError.rst.txt new file mode 100644 index 000000000..53f57a05d --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.MultiFileExclusionError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.MultiFileExclusionError +================================================ + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: MultiFileExclusionError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.NearestNeighborError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.NearestNeighborError.rst.txt new file mode 100644 index 000000000..3bb7fb5a3 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.NearestNeighborError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.NearestNeighborError +============================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: NearestNeighborError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.OffshoreWindInputError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.OffshoreWindInputError.rst.txt new file mode 100644 index 000000000..18d4d0570 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.OffshoreWindInputError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.OffshoreWindInputError +=============================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: OffshoreWindInputError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.OffshoreWindInputWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.OffshoreWindInputWarning.rst.txt new file mode 100644 index 000000000..5c556bc0a --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.OffshoreWindInputWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.OffshoreWindInputWarning +================================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: OffshoreWindInputWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.OutputWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.OutputWarning.rst.txt new file mode 100644 index 000000000..1ebf0e94b --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.OutputWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.OutputWarning +====================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: OutputWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.ParallelExecutionWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.ParallelExecutionWarning.rst.txt new file mode 100644 index 000000000..9835396ed --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.ParallelExecutionWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.ParallelExecutionWarning +================================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: ParallelExecutionWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.PipelineError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.PipelineError.rst.txt new file mode 100644 index 000000000..bec524cf2 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.PipelineError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.PipelineError +====================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: PipelineError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.ProjectPointsValueError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.ProjectPointsValueError.rst.txt new file mode 100644 index 000000000..c1e830d47 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.ProjectPointsValueError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.ProjectPointsValueError +================================================ + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: ProjectPointsValueError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.PySAMVersionError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.PySAMVersionError.rst.txt new file mode 100644 index 000000000..95bdd682e --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.PySAMVersionError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.PySAMVersionError +========================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: PySAMVersionError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.PySAMVersionWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.PySAMVersionWarning.rst.txt new file mode 100644 index 000000000..748528092 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.PySAMVersionWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.PySAMVersionWarning +============================================ + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: PySAMVersionWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.ResourceError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.ResourceError.rst.txt new file mode 100644 index 000000000..ef260032a --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.ResourceError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.ResourceError +====================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: ResourceError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.SAMExecutionError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.SAMExecutionError.rst.txt new file mode 100644 index 000000000..94f41b632 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.SAMExecutionError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.SAMExecutionError +========================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: SAMExecutionError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.SAMExecutionWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.SAMExecutionWarning.rst.txt new file mode 100644 index 000000000..f9d9b20e5 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.SAMExecutionWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.SAMExecutionWarning +============================================ + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: SAMExecutionWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.SAMInputError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.SAMInputError.rst.txt new file mode 100644 index 000000000..19a4b92cb --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.SAMInputError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.SAMInputError +====================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: SAMInputError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.SAMInputWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.SAMInputWarning.rst.txt new file mode 100644 index 000000000..de7d1953a --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.SAMInputWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.SAMInputWarning +======================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: SAMInputWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.SlurmWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.SlurmWarning.rst.txt new file mode 100644 index 000000000..59922f335 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.SlurmWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.SlurmWarning +===================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: SlurmWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.SupplyCurveError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.SupplyCurveError.rst.txt new file mode 100644 index 000000000..050c4194d --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.SupplyCurveError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.SupplyCurveError +========================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: SupplyCurveError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.SupplyCurveInputError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.SupplyCurveInputError.rst.txt new file mode 100644 index 000000000..c3f8d1daf --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.SupplyCurveInputError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.SupplyCurveInputError +============================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: SupplyCurveInputError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.WhileLoopPackingError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.WhileLoopPackingError.rst.txt new file mode 100644 index 000000000..c344af756 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.WhileLoopPackingError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.WhileLoopPackingError +============================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: WhileLoopPackingError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.reVDeprecationWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.reVDeprecationWarning.rst.txt new file mode 100644 index 000000000..871602e3e --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.reVDeprecationWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.reVDeprecationWarning +============================================== + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: reVDeprecationWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.reVError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.reVError.rst.txt new file mode 100644 index 000000000..71b93d08f --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.reVError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.reVError +================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: reVError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.reVLossesValueError.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.reVLossesValueError.rst.txt new file mode 100644 index 000000000..010ca1533 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.reVLossesValueError.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.reVLossesValueError +============================================ + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: reVLossesValueError \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.reVLossesWarning.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.reVLossesWarning.rst.txt new file mode 100644 index 000000000..6fc779759 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.reVLossesWarning.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.exceptions.reVLossesWarning +========================================= + +.. currentmodule:: reV.utilities.exceptions + +.. autoexception:: reVLossesWarning \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.exceptions.rst.txt b/_sources/_autosummary/reV.utilities.exceptions.rst.txt new file mode 100644 index 000000000..bce0aae8f --- /dev/null +++ b/_sources/_autosummary/reV.utilities.exceptions.rst.txt @@ -0,0 +1,71 @@ +reV.utilities.exceptions +======================== + +.. automodule:: reV.utilities.exceptions + + + + + + + + + + + + + + + + .. rubric:: Exceptions + + .. autosummary:: + :toctree: + + CollectionRuntimeError + CollectionValueError + CollectionWarning + ConfigError + ConfigWarning + DataShapeError + EmptySupplyCurvePointError + ExclusionLayerError + ExecutionError + ExtrapolationWarning + FileInputError + FileInputWarning + HandlerKeyError + HandlerRuntimeError + HandlerValueError + HandlerWarning + InputError + InputWarning + JSONError + MultiFileExclusionError + NearestNeighborError + OffshoreWindInputError + OffshoreWindInputWarning + OutputWarning + ParallelExecutionWarning + PipelineError + ProjectPointsValueError + PySAMVersionError + PySAMVersionWarning + ResourceError + SAMExecutionError + SAMExecutionWarning + SAMInputError + SAMInputWarning + SlurmWarning + SupplyCurveError + SupplyCurveInputError + WhileLoopPackingError + reVDeprecationWarning + reVError + reVLossesValueError + reVLossesWarning + + + + + diff --git a/_sources/_autosummary/reV.utilities.log_versions.rst.txt b/_sources/_autosummary/reV.utilities.log_versions.rst.txt new file mode 100644 index 000000000..c1c0d1c01 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.log_versions.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.log\_versions +=========================== + +.. currentmodule:: reV.utilities + +.. autofunction:: log_versions \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.pytest_utils.make_fake_h5_chunks.rst.txt b/_sources/_autosummary/reV.utilities.pytest_utils.make_fake_h5_chunks.rst.txt new file mode 100644 index 000000000..1ef11b943 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.pytest_utils.make_fake_h5_chunks.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.pytest\_utils.make\_fake\_h5\_chunks +================================================== + +.. currentmodule:: reV.utilities.pytest_utils + +.. autofunction:: make_fake_h5_chunks \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.pytest_utils.pd_date_range.rst.txt b/_sources/_autosummary/reV.utilities.pytest_utils.pd_date_range.rst.txt new file mode 100644 index 000000000..890fcc382 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.pytest_utils.pd_date_range.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.pytest\_utils.pd\_date\_range +=========================================== + +.. currentmodule:: reV.utilities.pytest_utils + +.. autofunction:: pd_date_range \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.pytest_utils.rst.txt b/_sources/_autosummary/reV.utilities.pytest_utils.rst.txt new file mode 100644 index 000000000..11fe1f7e9 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.pytest_utils.rst.txt @@ -0,0 +1,32 @@ +reV.utilities.pytest\_utils +=========================== + +.. automodule:: reV.utilities.pytest_utils + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + make_fake_h5_chunks + pd_date_range + write_chunk + + + + + + + + + + + + + diff --git a/_sources/_autosummary/reV.utilities.pytest_utils.write_chunk.rst.txt b/_sources/_autosummary/reV.utilities.pytest_utils.write_chunk.rst.txt new file mode 100644 index 000000000..2396f74a5 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.pytest_utils.write_chunk.rst.txt @@ -0,0 +1,6 @@ +reV.utilities.pytest\_utils.write\_chunk +======================================== + +.. currentmodule:: reV.utilities.pytest_utils + +.. autofunction:: write_chunk \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.rst.txt b/_sources/_autosummary/reV.utilities.rst.txt new file mode 100644 index 000000000..18c1c8814 --- /dev/null +++ b/_sources/_autosummary/reV.utilities.rst.txt @@ -0,0 +1,49 @@ +reV.utilities +============= + +.. automodule:: reV.utilities + + + + + + + + .. rubric:: Functions + + .. autosummary:: + :toctree: + + log_versions + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + ModuleName + + + + + + + + + +.. autosummary:: + :toctree: + :template: custom-module-template.rst + :recursive: + + reV.utilities.cli_functions + reV.utilities.curtailment + reV.utilities.exceptions + reV.utilities.pytest_utils + reV.utilities.slots + diff --git a/_sources/_autosummary/reV.utilities.slots.SlottedDict.rst.txt b/_sources/_autosummary/reV.utilities.slots.SlottedDict.rst.txt new file mode 100644 index 000000000..a2ed09bfd --- /dev/null +++ b/_sources/_autosummary/reV.utilities.slots.SlottedDict.rst.txt @@ -0,0 +1,33 @@ +reV.utilities.slots.SlottedDict +=============================== + +.. currentmodule:: reV.utilities.slots + +.. autoclass:: SlottedDict + :members: + :show-inheritance: + :inherited-members: + :special-members: __call__, __add__, __mul__ + + + + .. rubric:: Methods + + .. autosummary:: + + ~SlottedDict.items + ~SlottedDict.keys + ~SlottedDict.update + ~SlottedDict.values + + + + + + .. rubric:: Attributes + + .. autosummary:: + + ~SlottedDict.var_list + + \ No newline at end of file diff --git a/_sources/_autosummary/reV.utilities.slots.rst.txt b/_sources/_autosummary/reV.utilities.slots.rst.txt new file mode 100644 index 000000000..7f2b8ed4d --- /dev/null +++ b/_sources/_autosummary/reV.utilities.slots.rst.txt @@ -0,0 +1,31 @@ +reV.utilities.slots +=================== + +.. automodule:: reV.utilities.slots + + + + + + + + + + + + .. rubric:: Classes + + .. autosummary:: + :toctree: + :template: custom-class-template.rst + + SlottedDict + + + + + + + + + diff --git a/_sources/_autosummary/reV.version.rst.txt b/_sources/_autosummary/reV.version.rst.txt new file mode 100644 index 000000000..51e364ebd --- /dev/null +++ b/_sources/_autosummary/reV.version.rst.txt @@ -0,0 +1,23 @@ +reV.version +=========== + +.. automodule:: reV.version + + + + + + + + + + + + + + + + + + + diff --git a/_sources/_cli/cli.rst.txt b/_sources/_cli/cli.rst.txt new file mode 100644 index 000000000..937b37258 --- /dev/null +++ b/_sources/_cli/cli.rst.txt @@ -0,0 +1,26 @@ +.. _cli-docs: + +Command Line Interfaces (CLIs) +============================== + +.. toctree:: + + reV + reV template-configs + reV batch + reV pipeline + reV project-points + reV bespoke + reV generation + reV econ + reV collect + reV multiyear + reV supply-curve-aggregation + reV supply-curve + reV rep-profiles + reV hybrids + reV nrwal + reV qa-qc + reV script + reV status + reV reset-status diff --git a/_sources/_cli/reV batch.rst.txt b/_sources/_cli/reV batch.rst.txt new file mode 100644 index 000000000..7526366fe --- /dev/null +++ b/_sources/_cli/reV batch.rst.txt @@ -0,0 +1,5 @@ +.. _rev-batch: + +.. click:: reV.cli:batch + :prog: reV batch + :nested: none diff --git a/_sources/_cli/reV bespoke.rst.txt b/_sources/_cli/reV bespoke.rst.txt new file mode 100644 index 000000000..c3b693804 --- /dev/null +++ b/_sources/_cli/reV bespoke.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.bespoke.cli_bespoke:main + :prog: reV bespoke + :nested: full diff --git a/_sources/_cli/reV collect.rst.txt b/_sources/_cli/reV collect.rst.txt new file mode 100644 index 000000000..d20c35bca --- /dev/null +++ b/_sources/_cli/reV collect.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.handlers.cli_collect:main + :prog: reV collect + :nested: full diff --git a/_sources/_cli/reV econ.rst.txt b/_sources/_cli/reV econ.rst.txt new file mode 100644 index 000000000..27489ccf7 --- /dev/null +++ b/_sources/_cli/reV econ.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.econ.cli_econ:main + :prog: reV econ + :nested: full diff --git a/_sources/_cli/reV generation.rst.txt b/_sources/_cli/reV generation.rst.txt new file mode 100644 index 000000000..6d301d418 --- /dev/null +++ b/_sources/_cli/reV generation.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.generation.cli_gen:main + :prog: reV generation + :nested: full diff --git a/_sources/_cli/reV hybrids.rst.txt b/_sources/_cli/reV hybrids.rst.txt new file mode 100644 index 000000000..30e070480 --- /dev/null +++ b/_sources/_cli/reV hybrids.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.hybrids.cli_hybrids:main + :prog: reV hybrids + :nested: full diff --git a/_sources/_cli/reV multiyear.rst.txt b/_sources/_cli/reV multiyear.rst.txt new file mode 100644 index 000000000..3418ae20c --- /dev/null +++ b/_sources/_cli/reV multiyear.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.handlers.cli_multi_year:main + :prog: reV multiyear + :nested: full diff --git a/_sources/_cli/reV nrwal.rst.txt b/_sources/_cli/reV nrwal.rst.txt new file mode 100644 index 000000000..36c74713b --- /dev/null +++ b/_sources/_cli/reV nrwal.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.nrwal.cli_nrwal:main + :prog: reV nrwal + :nested: full diff --git a/_sources/_cli/reV pipeline.rst.txt b/_sources/_cli/reV pipeline.rst.txt new file mode 100644 index 000000000..47c71dd76 --- /dev/null +++ b/_sources/_cli/reV pipeline.rst.txt @@ -0,0 +1,5 @@ +.. _rev-pipeline: + +.. click:: reV.cli:pipeline + :prog: reV pipeline + :nested: none diff --git a/_sources/_cli/reV project-points.rst.txt b/_sources/_cli/reV project-points.rst.txt new file mode 100644 index 000000000..38807fbde --- /dev/null +++ b/_sources/_cli/reV project-points.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.config.cli_project_points:project_points + :prog: reV project-points + :nested: full \ No newline at end of file diff --git a/_sources/_cli/reV qa-qc.rst.txt b/_sources/_cli/reV qa-qc.rst.txt new file mode 100644 index 000000000..4b1031a2e --- /dev/null +++ b/_sources/_cli/reV qa-qc.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.qa_qc.cli_qa_qc:main + :prog: reV qa-qc + :nested: full diff --git a/_sources/_cli/reV rep-profiles.rst.txt b/_sources/_cli/reV rep-profiles.rst.txt new file mode 100644 index 000000000..e349247b3 --- /dev/null +++ b/_sources/_cli/reV rep-profiles.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.rep_profiles.cli_rep_profiles:main + :prog: reV rep-profiles + :nested: full diff --git a/_sources/_cli/reV reset-status.rst.txt b/_sources/_cli/reV reset-status.rst.txt new file mode 100644 index 000000000..9841f1fd8 --- /dev/null +++ b/_sources/_cli/reV reset-status.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.cli:reset_status + :prog: reV reset-status + :nested: none diff --git a/_sources/_cli/reV script.rst.txt b/_sources/_cli/reV script.rst.txt new file mode 100644 index 000000000..715e92ccb --- /dev/null +++ b/_sources/_cli/reV script.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.cli:script + :prog: reV script + :nested: none diff --git a/_sources/_cli/reV status.rst.txt b/_sources/_cli/reV status.rst.txt new file mode 100644 index 000000000..0003f7a5e --- /dev/null +++ b/_sources/_cli/reV status.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.cli:status + :prog: reV status + :nested: none diff --git a/_sources/_cli/reV supply-curve-aggregation.rst.txt b/_sources/_cli/reV supply-curve-aggregation.rst.txt new file mode 100644 index 000000000..dd207c7d3 --- /dev/null +++ b/_sources/_cli/reV supply-curve-aggregation.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.supply_curve.cli_sc_aggregation:main + :prog: reV supply-curve-aggregation + :nested: full diff --git a/_sources/_cli/reV supply-curve.rst.txt b/_sources/_cli/reV supply-curve.rst.txt new file mode 100644 index 000000000..443787a7b --- /dev/null +++ b/_sources/_cli/reV supply-curve.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.supply_curve.cli_supply_curve:main + :prog: reV supply-curve + :nested: full diff --git a/_sources/_cli/reV template-configs.rst.txt b/_sources/_cli/reV template-configs.rst.txt new file mode 100644 index 000000000..6491713a3 --- /dev/null +++ b/_sources/_cli/reV template-configs.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.cli:template_configs + :prog: reV template-configs + :nested: none diff --git a/_sources/_cli/reV.rst.txt b/_sources/_cli/reV.rst.txt new file mode 100644 index 000000000..6fbaf4e9e --- /dev/null +++ b/_sources/_cli/reV.rst.txt @@ -0,0 +1,3 @@ +.. click:: reV.cli:main + :prog: reV + :nested: none \ No newline at end of file diff --git a/_sources/api.rst.txt b/_sources/api.rst.txt new file mode 100644 index 000000000..8fad34b8b --- /dev/null +++ b/_sources/api.rst.txt @@ -0,0 +1,9 @@ +.. _rev-main: + +.. autosummary:: + + :toctree: _autosummary + :template: custom-module-template.rst + :recursive: + + reV diff --git a/_sources/index.rst.txt b/_sources/index.rst.txt new file mode 100644 index 000000000..96384e0ad --- /dev/null +++ b/_sources/index.rst.txt @@ -0,0 +1,29 @@ +.. toctree:: + :hidden: + + Home page + Installation and Usage + Examples + API reference <_autosummary/reV> + CLI reference <_cli/cli> + +reV documentation +***************** + +What is reV? +============ + +.. include:: ../../README.rst + :start-after: inclusion-intro + :end-before: .. inclusion-flowchart + + +.. image:: _static/rev_flow_chart.png + :align: center + :alt: Typical reV workflow + + +| + +.. include:: ../../README.rst + :start-after: inclusion-get-started diff --git a/_sources/misc/examples.advanced_econ_modeling.rst.txt b/_sources/misc/examples.advanced_econ_modeling.rst.txt new file mode 100644 index 000000000..037c34264 --- /dev/null +++ b/_sources/misc/examples.advanced_econ_modeling.rst.txt @@ -0,0 +1,5 @@ +SAM Single Owner Modeling +========================= + +.. include:: ../../../examples/advanced_econ_modeling/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.aws_pcluster.rst.txt b/_sources/misc/examples.aws_pcluster.rst.txt new file mode 100644 index 000000000..9cb6ec1bf --- /dev/null +++ b/_sources/misc/examples.aws_pcluster.rst.txt @@ -0,0 +1,5 @@ +Running reV on an AWS Parallel Cluster +====================================== + +.. include:: ../../../examples/aws_pcluster/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.batched_execution.rst.txt b/_sources/misc/examples.batched_execution.rst.txt new file mode 100644 index 000000000..f17391d51 --- /dev/null +++ b/_sources/misc/examples.batched_execution.rst.txt @@ -0,0 +1,5 @@ +Batched Execution +================= + +.. include:: ../../../examples/batched_execution/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.eagle_node_requests.rst.txt b/_sources/misc/examples.eagle_node_requests.rst.txt new file mode 100644 index 000000000..22b7881f6 --- /dev/null +++ b/_sources/misc/examples.eagle_node_requests.rst.txt @@ -0,0 +1,7 @@ +.. _eagle_node_requests: + +Eagle Node Requests +=================== + +.. include:: ../../../examples/eagle_node_requests/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.full_pipeline_execution.rst.txt b/_sources/misc/examples.full_pipeline_execution.rst.txt new file mode 100644 index 000000000..4a52f7c18 --- /dev/null +++ b/_sources/misc/examples.full_pipeline_execution.rst.txt @@ -0,0 +1,7 @@ +.. _full_pipeline_execution: + +Full Pipeline Execution +======================= + +.. include:: ../../../examples/full_pipeline_execution/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.marine_energy.rst.txt b/_sources/misc/examples.marine_energy.rst.txt new file mode 100644 index 000000000..986b18574 --- /dev/null +++ b/_sources/misc/examples.marine_energy.rst.txt @@ -0,0 +1,5 @@ +reV Marine Energy +================= + +.. include:: ../../../examples/marine_energy/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.offshore_wind.rst.txt b/_sources/misc/examples.offshore_wind.rst.txt new file mode 100644 index 000000000..8d6c98f78 --- /dev/null +++ b/_sources/misc/examples.offshore_wind.rst.txt @@ -0,0 +1,5 @@ +Offshore Wind Modeling +====================== + +.. include:: ../../../examples/offshore_wind/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.project_points.rst.txt b/_sources/misc/examples.project_points.rst.txt new file mode 100644 index 000000000..83e4d48f9 --- /dev/null +++ b/_sources/misc/examples.project_points.rst.txt @@ -0,0 +1,5 @@ +reV Project Points +================== + +.. include:: ../../../examples/project_points/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.rev_losses.rst.txt b/_sources/misc/examples.rev_losses.rst.txt new file mode 100644 index 000000000..1fbf92c74 --- /dev/null +++ b/_sources/misc/examples.rev_losses.rst.txt @@ -0,0 +1,5 @@ +reV Losses +========== + +.. include:: ../../../examples/rev_losses/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.rst.txt b/_sources/misc/examples.rst.txt new file mode 100644 index 000000000..42c0e0da4 --- /dev/null +++ b/_sources/misc/examples.rst.txt @@ -0,0 +1,19 @@ +.. _examples: + +Examples +======== +.. toctree:: + + examples.project_points + examples.advanced_econ_modeling + examples.marine_energy + examples.offshore_wind + examples.rev_losses + examples.single_module_execution + examples.full_pipeline_execution + examples.batched_execution + examples.eagle_node_requests + examples.running_locally + examples.aws_pcluster + examples.running_with_hsds + diff --git a/_sources/misc/examples.running_locally.rst.txt b/_sources/misc/examples.running_locally.rst.txt new file mode 100644 index 000000000..577b8bbe7 --- /dev/null +++ b/_sources/misc/examples.running_locally.rst.txt @@ -0,0 +1,5 @@ +Run reV locally +=============== + +.. include:: ../../../examples/running_locally/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.running_with_hsds.rst.txt b/_sources/misc/examples.running_with_hsds.rst.txt new file mode 100644 index 000000000..83c3801eb --- /dev/null +++ b/_sources/misc/examples.running_with_hsds.rst.txt @@ -0,0 +1,5 @@ +Running with HSDS +================= + +.. include:: ../../../examples/running_with_hsds/README.rst + :start-line: 2 diff --git a/_sources/misc/examples.single_module_execution.rst.txt b/_sources/misc/examples.single_module_execution.rst.txt new file mode 100644 index 000000000..478222528 --- /dev/null +++ b/_sources/misc/examples.single_module_execution.rst.txt @@ -0,0 +1,5 @@ +Single Module Execution +======================= + +.. include:: ../../../examples/single_module_execution/README.rst + :start-line: 2 diff --git a/_sources/misc/installation.rst.txt b/_sources/misc/installation.rst.txt new file mode 100644 index 000000000..3ebdfee97 --- /dev/null +++ b/_sources/misc/installation.rst.txt @@ -0,0 +1,15 @@ +.. _installation: + +Installation +============ + +.. include:: ../../../README.rst + :start-after: Installing reV + + +Command Line Tools +================== + +.. include:: ../../../README.rst + :start-after: reV command line tools + :end-before: Launching a run diff --git a/_sources/misc/installation_usage.rst.txt b/_sources/misc/installation_usage.rst.txt new file mode 100644 index 000000000..966172431 --- /dev/null +++ b/_sources/misc/installation_usage.rst.txt @@ -0,0 +1,5 @@ +Installation and Usage +====================== +.. toctree:: + + installation diff --git a/_static/RD100_2023_Winner_Logo.png b/_static/RD100_2023_Winner_Logo.png new file mode 100644 index 000000000..2cabecea5 Binary files /dev/null and b/_static/RD100_2023_Winner_Logo.png differ diff --git a/_static/_sphinx_javascript_frameworks_compat.js b/_static/_sphinx_javascript_frameworks_compat.js new file mode 100644 index 000000000..81415803e --- /dev/null +++ b/_static/_sphinx_javascript_frameworks_compat.js @@ -0,0 +1,123 @@ +/* Compatability shim for jQuery and underscores.js. + * + * Copyright Sphinx contributors + * Released under the two clause BSD licence + */ + +/** + * small helper function to urldecode strings + * + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL + */ +jQuery.urldecode = function(x) { + if (!x) { + return x + } + return decodeURIComponent(x.replace(/\+/g, ' ')); +}; + +/** + * small helper function to urlencode strings + */ +jQuery.urlencode = encodeURIComponent; + +/** + * This function returns the parsed url parameters of the + * current request. Multiple values per key are supported, + * it will always return arrays of strings for the value parts. + */ +jQuery.getQueryParameters = function(s) { + if (typeof s === 'undefined') + s = document.location.search; + var parts = s.substr(s.indexOf('?') + 1).split('&'); + var result = {}; + for (var i = 0; i < parts.length; i++) { + var tmp = parts[i].split('=', 2); + var key = jQuery.urldecode(tmp[0]); + var value = jQuery.urldecode(tmp[1]); + if (key in result) + result[key].push(value); + else + result[key] = [value]; + } + return result; +}; + +/** + * highlight a given string on a jquery object by wrapping it in + * span elements with the given class name. + */ +jQuery.fn.highlightText = function(text, className) { + function highlight(node, addItems) { + if (node.nodeType === 3) { + var val = node.nodeValue; + var pos = val.toLowerCase().indexOf(text); + if (pos >= 0 && + !jQuery(node.parentNode).hasClass(className) && + !jQuery(node.parentNode).hasClass("nohighlight")) { + var span; + var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.className = className; + } + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + if (isInSVG) { + var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); + var bbox = node.parentElement.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute('class', className); + addItems.push({ + "parent": node.parentNode, + "target": rect}); + } + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this, addItems); + }); + } + } + var addItems = []; + var result = this.each(function() { + highlight(this, addItems); + }); + for (var i = 0; i < addItems.length; ++i) { + jQuery(addItems[i].parent).before(addItems[i].target); + } + return result; +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} diff --git a/_static/basic.css b/_static/basic.css new file mode 100644 index 000000000..cfc60b86c --- /dev/null +++ b/_static/basic.css @@ -0,0 +1,921 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: 360px; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +.translated { + background-color: rgba(207, 255, 207, 0.2) +} + +.untranslated { + background-color: rgba(255, 207, 207, 0.2) +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/_static/check-solid.svg b/_static/check-solid.svg new file mode 100644 index 000000000..92fad4b5c --- /dev/null +++ b/_static/check-solid.svg @@ -0,0 +1,4 @@ + + + + diff --git a/_static/clipboard.min.js b/_static/clipboard.min.js new file mode 100644 index 000000000..54b3c4638 --- /dev/null +++ b/_static/clipboard.min.js @@ -0,0 +1,7 @@ +/*! + * clipboard.js v2.0.8 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */ +!function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.ClipboardJS=e():t.ClipboardJS=e()}(this,function(){return n={686:function(t,e,n){"use strict";n.d(e,{default:function(){return o}});var e=n(279),i=n.n(e),e=n(370),u=n.n(e),e=n(817),c=n.n(e);function a(t){try{return document.execCommand(t)}catch(t){return}}var f=function(t){t=c()(t);return a("cut"),t};var l=function(t){var e,n,o,r=1 + + + + diff --git a/_static/copybutton.css b/_static/copybutton.css new file mode 100644 index 000000000..f1916ec7d --- /dev/null +++ b/_static/copybutton.css @@ -0,0 +1,94 @@ +/* Copy buttons */ +button.copybtn { + position: absolute; + display: flex; + top: .3em; + right: .3em; + width: 1.7em; + height: 1.7em; + opacity: 0; + transition: opacity 0.3s, border .3s, background-color .3s; + user-select: none; + padding: 0; + border: none; + outline: none; + border-radius: 0.4em; + /* The colors that GitHub uses */ + border: #1b1f2426 1px solid; + background-color: #f6f8fa; + color: #57606a; +} + +button.copybtn.success { + border-color: #22863a; + color: #22863a; +} + +button.copybtn svg { + stroke: currentColor; + width: 1.5em; + height: 1.5em; + padding: 0.1em; +} + +div.highlight { + position: relative; +} + +/* Show the copybutton */ +.highlight:hover button.copybtn, button.copybtn.success { + opacity: 1; +} + +.highlight button.copybtn:hover { + background-color: rgb(235, 235, 235); +} + +.highlight button.copybtn:active { + background-color: rgb(187, 187, 187); +} + +/** + * A minimal CSS-only tooltip copied from: + * https://codepen.io/mildrenben/pen/rVBrpK + * + * To use, write HTML like the following: + * + *

Short

+ */ + .o-tooltip--left { + position: relative; + } + + .o-tooltip--left:after { + opacity: 0; + visibility: hidden; + position: absolute; + content: attr(data-tooltip); + padding: .2em; + font-size: .8em; + left: -.2em; + background: grey; + color: white; + white-space: nowrap; + z-index: 2; + border-radius: 2px; + transform: translateX(-102%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); +} + +.o-tooltip--left:hover:after { + display: block; + opacity: 1; + visibility: visible; + transform: translateX(-100%) translateY(0); + transition: opacity 0.2s cubic-bezier(0.64, 0.09, 0.08, 1), transform 0.2s cubic-bezier(0.64, 0.09, 0.08, 1); + transition-delay: .5s; +} + +/* By default the copy button shouldn't show up when printing a page */ +@media print { + button.copybtn { + display: none; + } +} diff --git a/_static/copybutton.js b/_static/copybutton.js new file mode 100644 index 000000000..2ea7ff3e2 --- /dev/null +++ b/_static/copybutton.js @@ -0,0 +1,248 @@ +// Localization support +const messages = { + 'en': { + 'copy': 'Copy', + 'copy_to_clipboard': 'Copy to clipboard', + 'copy_success': 'Copied!', + 'copy_failure': 'Failed to copy', + }, + 'es' : { + 'copy': 'Copiar', + 'copy_to_clipboard': 'Copiar al portapapeles', + 'copy_success': '¡Copiado!', + 'copy_failure': 'Error al copiar', + }, + 'de' : { + 'copy': 'Kopieren', + 'copy_to_clipboard': 'In die Zwischenablage kopieren', + 'copy_success': 'Kopiert!', + 'copy_failure': 'Fehler beim Kopieren', + }, + 'fr' : { + 'copy': 'Copier', + 'copy_to_clipboard': 'Copier dans le presse-papier', + 'copy_success': 'Copié !', + 'copy_failure': 'Échec de la copie', + }, + 'ru': { + 'copy': 'Скопировать', + 'copy_to_clipboard': 'Скопировать в буфер', + 'copy_success': 'Скопировано!', + 'copy_failure': 'Не удалось скопировать', + }, + 'zh-CN': { + 'copy': '复制', + 'copy_to_clipboard': '复制到剪贴板', + 'copy_success': '复制成功!', + 'copy_failure': '复制失败', + }, + 'it' : { + 'copy': 'Copiare', + 'copy_to_clipboard': 'Copiato negli appunti', + 'copy_success': 'Copiato!', + 'copy_failure': 'Errore durante la copia', + } +} + +let locale = 'en' +if( document.documentElement.lang !== undefined + && messages[document.documentElement.lang] !== undefined ) { + locale = document.documentElement.lang +} + +let doc_url_root = DOCUMENTATION_OPTIONS.URL_ROOT; +if (doc_url_root == '#') { + doc_url_root = ''; +} + +/** + * SVG files for our copy buttons + */ +let iconCheck = ` + ${messages[locale]['copy_success']} + + +` + +// If the user specified their own SVG use that, otherwise use the default +let iconCopy = ``; +if (!iconCopy) { + iconCopy = ` + ${messages[locale]['copy_to_clipboard']} + + + +` +} + +/** + * Set up copy/paste for code blocks + */ + +const runWhenDOMLoaded = cb => { + if (document.readyState != 'loading') { + cb() + } else if (document.addEventListener) { + document.addEventListener('DOMContentLoaded', cb) + } else { + document.attachEvent('onreadystatechange', function() { + if (document.readyState == 'complete') cb() + }) + } +} + +const codeCellId = index => `codecell${index}` + +// Clears selected text since ClipboardJS will select the text when copying +const clearSelection = () => { + if (window.getSelection) { + window.getSelection().removeAllRanges() + } else if (document.selection) { + document.selection.empty() + } +} + +// Changes tooltip text for a moment, then changes it back +// We want the timeout of our `success` class to be a bit shorter than the +// tooltip and icon change, so that we can hide the icon before changing back. +var timeoutIcon = 2000; +var timeoutSuccessClass = 1500; + +const temporarilyChangeTooltip = (el, oldText, newText) => { + el.setAttribute('data-tooltip', newText) + el.classList.add('success') + // Remove success a little bit sooner than we change the tooltip + // So that we can use CSS to hide the copybutton first + setTimeout(() => el.classList.remove('success'), timeoutSuccessClass) + setTimeout(() => el.setAttribute('data-tooltip', oldText), timeoutIcon) +} + +// Changes the copy button icon for two seconds, then changes it back +const temporarilyChangeIcon = (el) => { + el.innerHTML = iconCheck; + setTimeout(() => {el.innerHTML = iconCopy}, timeoutIcon) +} + +const addCopyButtonToCodeCells = () => { + // If ClipboardJS hasn't loaded, wait a bit and try again. This + // happens because we load ClipboardJS asynchronously. + if (window.ClipboardJS === undefined) { + setTimeout(addCopyButtonToCodeCells, 250) + return + } + + // Add copybuttons to all of our code cells + const COPYBUTTON_SELECTOR = 'div.highlight pre'; + const codeCells = document.querySelectorAll(COPYBUTTON_SELECTOR) + codeCells.forEach((codeCell, index) => { + const id = codeCellId(index) + codeCell.setAttribute('id', id) + + const clipboardButton = id => + `` + codeCell.insertAdjacentHTML('afterend', clipboardButton(id)) + }) + +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} + + +var copyTargetText = (trigger) => { + var target = document.querySelector(trigger.attributes['data-clipboard-target'].value); + + // get filtered text + let exclude = '.linenos'; + + let text = filterText(target, exclude); + return formatCopyText(text, '', false, true, true, true, '', '') +} + + // Initialize with a callback so we can modify the text before copy + const clipboard = new ClipboardJS('.copybtn', {text: copyTargetText}) + + // Update UI with error/success messages + clipboard.on('success', event => { + clearSelection() + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_success']) + temporarilyChangeIcon(event.trigger) + }) + + clipboard.on('error', event => { + temporarilyChangeTooltip(event.trigger, messages[locale]['copy'], messages[locale]['copy_failure']) + }) +} + +runWhenDOMLoaded(addCopyButtonToCodeCells) \ No newline at end of file diff --git a/_static/copybutton_funcs.js b/_static/copybutton_funcs.js new file mode 100644 index 000000000..dbe1aaad7 --- /dev/null +++ b/_static/copybutton_funcs.js @@ -0,0 +1,73 @@ +function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string +} + +/** + * Removes excluded text from a Node. + * + * @param {Node} target Node to filter. + * @param {string} exclude CSS selector of nodes to exclude. + * @returns {DOMString} Text from `target` with text removed. + */ +export function filterText(target, exclude) { + const clone = target.cloneNode(true); // clone as to not modify the live DOM + if (exclude) { + // remove excluded nodes + clone.querySelectorAll(exclude).forEach(node => node.remove()); + } + return clone.innerText; +} + +// Callback when a copy button is clicked. Will be passed the node that was clicked +// should then grab the text and replace pieces of text that shouldn't be used in output +export function formatCopyText(textContent, copybuttonPromptText, isRegexp = false, onlyCopyPromptLines = true, removePrompts = true, copyEmptyLines = true, lineContinuationChar = "", hereDocDelim = "") { + var regexp; + var match; + + // Do we check for line continuation characters and "HERE-documents"? + var useLineCont = !!lineContinuationChar + var useHereDoc = !!hereDocDelim + + // create regexp to capture prompt and remaining line + if (isRegexp) { + regexp = new RegExp('^(' + copybuttonPromptText + ')(.*)') + } else { + regexp = new RegExp('^(' + escapeRegExp(copybuttonPromptText) + ')(.*)') + } + + const outputLines = []; + var promptFound = false; + var gotLineCont = false; + var gotHereDoc = false; + const lineGotPrompt = []; + for (const line of textContent.split('\n')) { + match = line.match(regexp) + if (match || gotLineCont || gotHereDoc) { + promptFound = regexp.test(line) + lineGotPrompt.push(promptFound) + if (removePrompts && promptFound) { + outputLines.push(match[2]) + } else { + outputLines.push(line) + } + gotLineCont = line.endsWith(lineContinuationChar) & useLineCont + if (line.includes(hereDocDelim) & useHereDoc) + gotHereDoc = !gotHereDoc + } else if (!onlyCopyPromptLines) { + outputLines.push(line) + } else if (copyEmptyLines && line.trim() === '') { + outputLines.push(line) + } + } + + // If no lines with the prompt were found then just use original lines + if (lineGotPrompt.some(v => v === true)) { + textContent = outputLines.join('\n'); + } + + // Remove a trailing newline to avoid auto-running when pasting + if (textContent.endsWith("\n")) { + textContent = textContent.slice(0, -1) + } + return textContent +} diff --git a/_static/css/badge_only.css b/_static/css/badge_only.css new file mode 100644 index 000000000..c718cee44 --- /dev/null +++ b/_static/css/badge_only.css @@ -0,0 +1 @@ +.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-style:normal;font-weight:400;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#FontAwesome) format("svg")}.fa:before{font-family:FontAwesome;font-style:normal;font-weight:400;line-height:1}.fa:before,a .fa{text-decoration:inherit}.fa:before,a .fa,li .fa{display:inline-block}li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before,.icon-book:before{content:"\f02d"}.fa-caret-down:before,.icon-caret-down:before{content:"\f0d7"}.fa-caret-up:before,.icon-caret-up:before{content:"\f0d8"}.fa-caret-left:before,.icon-caret-left:before{content:"\f0d9"}.fa-caret-right:before,.icon-caret-right:before{content:"\f0da"}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60}.rst-versions .rst-current-version:after{clear:both;content:"";display:block}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} \ No newline at end of file diff --git a/_static/css/fonts/Roboto-Slab-Bold.woff b/_static/css/fonts/Roboto-Slab-Bold.woff new file mode 100644 index 000000000..6cb600001 Binary files /dev/null and b/_static/css/fonts/Roboto-Slab-Bold.woff differ diff --git a/_static/css/fonts/Roboto-Slab-Bold.woff2 b/_static/css/fonts/Roboto-Slab-Bold.woff2 new file mode 100644 index 000000000..7059e2314 Binary files /dev/null and b/_static/css/fonts/Roboto-Slab-Bold.woff2 differ diff --git a/_static/css/fonts/Roboto-Slab-Regular.woff b/_static/css/fonts/Roboto-Slab-Regular.woff new file mode 100644 index 000000000..f815f63f9 Binary files /dev/null and b/_static/css/fonts/Roboto-Slab-Regular.woff differ diff --git a/_static/css/fonts/Roboto-Slab-Regular.woff2 b/_static/css/fonts/Roboto-Slab-Regular.woff2 new file mode 100644 index 000000000..f2c76e5bd Binary files /dev/null and b/_static/css/fonts/Roboto-Slab-Regular.woff2 differ diff --git a/_static/css/fonts/fontawesome-webfont.eot b/_static/css/fonts/fontawesome-webfont.eot new file mode 100644 index 000000000..e9f60ca95 Binary files /dev/null and b/_static/css/fonts/fontawesome-webfont.eot differ diff --git a/_static/css/fonts/fontawesome-webfont.svg b/_static/css/fonts/fontawesome-webfont.svg new file mode 100644 index 000000000..855c845e5 --- /dev/null +++ b/_static/css/fonts/fontawesome-webfont.svg @@ -0,0 +1,2671 @@ + + + + +Created by FontForge 20120731 at Mon Oct 24 17:37:40 2016 + By ,,, +Copyright Dave Gandy 2016. All rights reserved. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/_static/css/fonts/fontawesome-webfont.ttf b/_static/css/fonts/fontawesome-webfont.ttf new file mode 100644 index 000000000..35acda2fa Binary files /dev/null and b/_static/css/fonts/fontawesome-webfont.ttf differ diff --git a/_static/css/fonts/fontawesome-webfont.woff b/_static/css/fonts/fontawesome-webfont.woff new file mode 100644 index 000000000..400014a4b Binary files /dev/null and b/_static/css/fonts/fontawesome-webfont.woff differ diff --git a/_static/css/fonts/fontawesome-webfont.woff2 b/_static/css/fonts/fontawesome-webfont.woff2 new file mode 100644 index 000000000..4d13fc604 Binary files /dev/null and b/_static/css/fonts/fontawesome-webfont.woff2 differ diff --git a/_static/css/fonts/lato-bold-italic.woff b/_static/css/fonts/lato-bold-italic.woff new file mode 100644 index 000000000..88ad05b9f Binary files /dev/null and b/_static/css/fonts/lato-bold-italic.woff differ diff --git a/_static/css/fonts/lato-bold-italic.woff2 b/_static/css/fonts/lato-bold-italic.woff2 new file mode 100644 index 000000000..c4e3d804b Binary files /dev/null and b/_static/css/fonts/lato-bold-italic.woff2 differ diff --git a/_static/css/fonts/lato-bold.woff b/_static/css/fonts/lato-bold.woff new file mode 100644 index 000000000..c6dff51f0 Binary files /dev/null and b/_static/css/fonts/lato-bold.woff differ diff --git a/_static/css/fonts/lato-bold.woff2 b/_static/css/fonts/lato-bold.woff2 new file mode 100644 index 000000000..bb195043c Binary files /dev/null and b/_static/css/fonts/lato-bold.woff2 differ diff --git a/_static/css/fonts/lato-normal-italic.woff b/_static/css/fonts/lato-normal-italic.woff new file mode 100644 index 000000000..76114bc03 Binary files /dev/null and b/_static/css/fonts/lato-normal-italic.woff differ diff --git a/_static/css/fonts/lato-normal-italic.woff2 b/_static/css/fonts/lato-normal-italic.woff2 new file mode 100644 index 000000000..3404f37e2 Binary files /dev/null and b/_static/css/fonts/lato-normal-italic.woff2 differ diff --git a/_static/css/fonts/lato-normal.woff b/_static/css/fonts/lato-normal.woff new file mode 100644 index 000000000..ae1307ff5 Binary files /dev/null and b/_static/css/fonts/lato-normal.woff differ diff --git a/_static/css/fonts/lato-normal.woff2 b/_static/css/fonts/lato-normal.woff2 new file mode 100644 index 000000000..3bf984332 Binary files /dev/null and b/_static/css/fonts/lato-normal.woff2 differ diff --git a/_static/css/theme.css b/_static/css/theme.css new file mode 100644 index 000000000..19a446a0e --- /dev/null +++ b/_static/css/theme.css @@ -0,0 +1,4 @@ +html{box-sizing:border-box}*,:after,:before{box-sizing:inherit}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}[hidden],audio:not([controls]){display:none}*{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}body{margin:0}a:active,a:hover{outline:0}abbr[title]{border-bottom:1px dotted}b,strong{font-weight:700}blockquote{margin:0}dfn{font-style:italic}ins{background:#ff9;text-decoration:none}ins,mark{color:#000}mark{background:#ff0;font-style:italic;font-weight:700}.rst-content code,.rst-content tt,code,kbd,pre,samp{font-family:monospace,serif;_font-family:courier new,monospace;font-size:1em}pre{white-space:pre}q{quotes:none}q:after,q:before{content:"";content:none}small{font-size:85%}sub,sup{font-size:75%;line-height:0;position:relative;vertical-align:baseline}sup{top:-.5em}sub{bottom:-.25em}dl,ol,ul{margin:0;padding:0;list-style:none;list-style-image:none}li{list-style:none}dd{margin:0}img{border:0;-ms-interpolation-mode:bicubic;vertical-align:middle;max-width:100%}svg:not(:root){overflow:hidden}figure,form{margin:0}label{cursor:pointer}button,input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}button,input{line-height:normal}button,input[type=button],input[type=reset],input[type=submit]{cursor:pointer;-webkit-appearance:button;*overflow:visible}button[disabled],input[disabled]{cursor:default}input[type=search]{-webkit-appearance:textfield;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;box-sizing:content-box}textarea{resize:vertical}table{border-collapse:collapse;border-spacing:0}td{vertical-align:top}.chromeframe{margin:.2em 0;background:#ccc;color:#000;padding:.2em 0}.ir{display:block;border:0;text-indent:-999em;overflow:hidden;background-color:transparent;background-repeat:no-repeat;text-align:left;direction:ltr;*line-height:0}.ir br{display:none}.hidden{display:none!important;visibility:hidden}.visuallyhidden{border:0;clip:rect(0 0 0 0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.visuallyhidden.focusable:active,.visuallyhidden.focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}.invisible{visibility:hidden}.relative{position:relative}big,small{font-size:100%}@media print{body,html,section{background:none!important}*{box-shadow:none!important;text-shadow:none!important;filter:none!important;-ms-filter:none!important}a,a:visited{text-decoration:underline}.ir a:after,a[href^="#"]:after,a[href^="javascript:"]:after{content:""}blockquote,pre{page-break-inside:avoid}thead{display:table-header-group}img,tr{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}.rst-content .toctree-wrapper>p.caption,h2,h3,p{orphans:3;widows:3}.rst-content .toctree-wrapper>p.caption,h2,h3{page-break-after:avoid}}.btn,.fa:before,.icon:before,.rst-content .admonition,.rst-content .admonition-title:before,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .code-block-caption .headerlink:before,.rst-content .danger,.rst-content .eqno .headerlink:before,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-alert,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before,input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week],select,textarea{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{display:table;content:""}.clearfix:after{clear:both}/*! + * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome + * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) + */@font-face{font-family:FontAwesome;src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713);src:url(fonts/fontawesome-webfont.eot?674f50d287a8c48dc19ba404d20fe713?#iefix&v=4.7.0) format("embedded-opentype"),url(fonts/fontawesome-webfont.woff2?af7ae505a9eed503f8b8e6982036873e) format("woff2"),url(fonts/fontawesome-webfont.woff?fee66e712a8a08eef5805a46892932ad) format("woff"),url(fonts/fontawesome-webfont.ttf?b06871f281fee6b241d60582ae9369b9) format("truetype"),url(fonts/fontawesome-webfont.svg?912ec66d7572ff821749319396470bde#fontawesomeregular) format("svg");font-weight:400;font-style:normal}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{display:inline-block;font:normal normal normal 14px/1 FontAwesome;font-size:inherit;text-rendering:auto;-webkit-font-smoothing:antialiased;-moz-osx-font-smoothing:grayscale}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-15%}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-fw{width:1.28571em;text-align:center}.fa-ul{padding-left:0;margin-left:2.14286em;list-style-type:none}.fa-ul>li{position:relative}.fa-li{position:absolute;left:-2.14286em;width:2.14286em;top:.14286em;text-align:center}.fa-li.fa-lg{left:-1.85714em}.fa-border{padding:.2em .25em .15em;border:.08em solid #eee;border-radius:.1em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa-pull-left.icon,.fa.fa-pull-left,.rst-content .code-block-caption .fa-pull-left.headerlink,.rst-content .eqno .fa-pull-left.headerlink,.rst-content .fa-pull-left.admonition-title,.rst-content code.download span.fa-pull-left:first-child,.rst-content dl dt .fa-pull-left.headerlink,.rst-content h1 .fa-pull-left.headerlink,.rst-content h2 .fa-pull-left.headerlink,.rst-content h3 .fa-pull-left.headerlink,.rst-content h4 .fa-pull-left.headerlink,.rst-content h5 .fa-pull-left.headerlink,.rst-content h6 .fa-pull-left.headerlink,.rst-content p .fa-pull-left.headerlink,.rst-content table>caption .fa-pull-left.headerlink,.rst-content tt.download span.fa-pull-left:first-child,.wy-menu-vertical li.current>a button.fa-pull-left.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-left.toctree-expand,.wy-menu-vertical li button.fa-pull-left.toctree-expand{margin-right:.3em}.fa-pull-right.icon,.fa.fa-pull-right,.rst-content .code-block-caption .fa-pull-right.headerlink,.rst-content .eqno .fa-pull-right.headerlink,.rst-content .fa-pull-right.admonition-title,.rst-content code.download span.fa-pull-right:first-child,.rst-content dl dt .fa-pull-right.headerlink,.rst-content h1 .fa-pull-right.headerlink,.rst-content h2 .fa-pull-right.headerlink,.rst-content h3 .fa-pull-right.headerlink,.rst-content h4 .fa-pull-right.headerlink,.rst-content h5 .fa-pull-right.headerlink,.rst-content h6 .fa-pull-right.headerlink,.rst-content p .fa-pull-right.headerlink,.rst-content table>caption .fa-pull-right.headerlink,.rst-content tt.download span.fa-pull-right:first-child,.wy-menu-vertical li.current>a button.fa-pull-right.toctree-expand,.wy-menu-vertical li.on a button.fa-pull-right.toctree-expand,.wy-menu-vertical li button.fa-pull-right.toctree-expand{margin-left:.3em}.pull-right{float:right}.pull-left{float:left}.fa.pull-left,.pull-left.icon,.rst-content .code-block-caption .pull-left.headerlink,.rst-content .eqno .pull-left.headerlink,.rst-content .pull-left.admonition-title,.rst-content code.download span.pull-left:first-child,.rst-content dl dt .pull-left.headerlink,.rst-content h1 .pull-left.headerlink,.rst-content h2 .pull-left.headerlink,.rst-content h3 .pull-left.headerlink,.rst-content h4 .pull-left.headerlink,.rst-content h5 .pull-left.headerlink,.rst-content h6 .pull-left.headerlink,.rst-content p .pull-left.headerlink,.rst-content table>caption .pull-left.headerlink,.rst-content tt.download span.pull-left:first-child,.wy-menu-vertical li.current>a button.pull-left.toctree-expand,.wy-menu-vertical li.on a button.pull-left.toctree-expand,.wy-menu-vertical li button.pull-left.toctree-expand{margin-right:.3em}.fa.pull-right,.pull-right.icon,.rst-content .code-block-caption .pull-right.headerlink,.rst-content .eqno .pull-right.headerlink,.rst-content .pull-right.admonition-title,.rst-content code.download span.pull-right:first-child,.rst-content dl dt .pull-right.headerlink,.rst-content h1 .pull-right.headerlink,.rst-content h2 .pull-right.headerlink,.rst-content h3 .pull-right.headerlink,.rst-content h4 .pull-right.headerlink,.rst-content h5 .pull-right.headerlink,.rst-content h6 .pull-right.headerlink,.rst-content p .pull-right.headerlink,.rst-content table>caption .pull-right.headerlink,.rst-content tt.download span.pull-right:first-child,.wy-menu-vertical li.current>a button.pull-right.toctree-expand,.wy-menu-vertical li.on a button.pull-right.toctree-expand,.wy-menu-vertical li button.pull-right.toctree-expand{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);-ms-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);-ms-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);-ms-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);-ms-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)";-webkit-transform:scaleY(-1);-ms-transform:scaleY(-1);transform:scaleY(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{position:relative;display:inline-block;width:2em;height:2em;line-height:2em;vertical-align:middle}.fa-stack-1x,.fa-stack-2x{position:absolute;left:0;width:100%;text-align:center}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-glass:before{content:""}.fa-music:before{content:""}.fa-search:before,.icon-search:before{content:""}.fa-envelope-o:before{content:""}.fa-heart:before{content:""}.fa-star:before{content:""}.fa-star-o:before{content:""}.fa-user:before{content:""}.fa-film:before{content:""}.fa-th-large:before{content:""}.fa-th:before{content:""}.fa-th-list:before{content:""}.fa-check:before{content:""}.fa-close:before,.fa-remove:before,.fa-times:before{content:""}.fa-search-plus:before{content:""}.fa-search-minus:before{content:""}.fa-power-off:before{content:""}.fa-signal:before{content:""}.fa-cog:before,.fa-gear:before{content:""}.fa-trash-o:before{content:""}.fa-home:before,.icon-home:before{content:""}.fa-file-o:before{content:""}.fa-clock-o:before{content:""}.fa-road:before{content:""}.fa-download:before,.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{content:""}.fa-arrow-circle-o-down:before{content:""}.fa-arrow-circle-o-up:before{content:""}.fa-inbox:before{content:""}.fa-play-circle-o:before{content:""}.fa-repeat:before,.fa-rotate-right:before{content:""}.fa-refresh:before{content:""}.fa-list-alt:before{content:""}.fa-lock:before{content:""}.fa-flag:before{content:""}.fa-headphones:before{content:""}.fa-volume-off:before{content:""}.fa-volume-down:before{content:""}.fa-volume-up:before{content:""}.fa-qrcode:before{content:""}.fa-barcode:before{content:""}.fa-tag:before{content:""}.fa-tags:before{content:""}.fa-book:before,.icon-book:before{content:""}.fa-bookmark:before{content:""}.fa-print:before{content:""}.fa-camera:before{content:""}.fa-font:before{content:""}.fa-bold:before{content:""}.fa-italic:before{content:""}.fa-text-height:before{content:""}.fa-text-width:before{content:""}.fa-align-left:before{content:""}.fa-align-center:before{content:""}.fa-align-right:before{content:""}.fa-align-justify:before{content:""}.fa-list:before{content:""}.fa-dedent:before,.fa-outdent:before{content:""}.fa-indent:before{content:""}.fa-video-camera:before{content:""}.fa-image:before,.fa-photo:before,.fa-picture-o:before{content:""}.fa-pencil:before{content:""}.fa-map-marker:before{content:""}.fa-adjust:before{content:""}.fa-tint:before{content:""}.fa-edit:before,.fa-pencil-square-o:before{content:""}.fa-share-square-o:before{content:""}.fa-check-square-o:before{content:""}.fa-arrows:before{content:""}.fa-step-backward:before{content:""}.fa-fast-backward:before{content:""}.fa-backward:before{content:""}.fa-play:before{content:""}.fa-pause:before{content:""}.fa-stop:before{content:""}.fa-forward:before{content:""}.fa-fast-forward:before{content:""}.fa-step-forward:before{content:""}.fa-eject:before{content:""}.fa-chevron-left:before{content:""}.fa-chevron-right:before{content:""}.fa-plus-circle:before{content:""}.fa-minus-circle:before{content:""}.fa-times-circle:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before{content:""}.fa-check-circle:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before{content:""}.fa-question-circle:before{content:""}.fa-info-circle:before{content:""}.fa-crosshairs:before{content:""}.fa-times-circle-o:before{content:""}.fa-check-circle-o:before{content:""}.fa-ban:before{content:""}.fa-arrow-left:before{content:""}.fa-arrow-right:before{content:""}.fa-arrow-up:before{content:""}.fa-arrow-down:before{content:""}.fa-mail-forward:before,.fa-share:before{content:""}.fa-expand:before{content:""}.fa-compress:before{content:""}.fa-plus:before{content:""}.fa-minus:before{content:""}.fa-asterisk:before{content:""}.fa-exclamation-circle:before,.rst-content .admonition-title:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before{content:""}.fa-gift:before{content:""}.fa-leaf:before{content:""}.fa-fire:before,.icon-fire:before{content:""}.fa-eye:before{content:""}.fa-eye-slash:before{content:""}.fa-exclamation-triangle:before,.fa-warning:before{content:""}.fa-plane:before{content:""}.fa-calendar:before{content:""}.fa-random:before{content:""}.fa-comment:before{content:""}.fa-magnet:before{content:""}.fa-chevron-up:before{content:""}.fa-chevron-down:before{content:""}.fa-retweet:before{content:""}.fa-shopping-cart:before{content:""}.fa-folder:before{content:""}.fa-folder-open:before{content:""}.fa-arrows-v:before{content:""}.fa-arrows-h:before{content:""}.fa-bar-chart-o:before,.fa-bar-chart:before{content:""}.fa-twitter-square:before{content:""}.fa-facebook-square:before{content:""}.fa-camera-retro:before{content:""}.fa-key:before{content:""}.fa-cogs:before,.fa-gears:before{content:""}.fa-comments:before{content:""}.fa-thumbs-o-up:before{content:""}.fa-thumbs-o-down:before{content:""}.fa-star-half:before{content:""}.fa-heart-o:before{content:""}.fa-sign-out:before{content:""}.fa-linkedin-square:before{content:""}.fa-thumb-tack:before{content:""}.fa-external-link:before{content:""}.fa-sign-in:before{content:""}.fa-trophy:before{content:""}.fa-github-square:before{content:""}.fa-upload:before{content:""}.fa-lemon-o:before{content:""}.fa-phone:before{content:""}.fa-square-o:before{content:""}.fa-bookmark-o:before{content:""}.fa-phone-square:before{content:""}.fa-twitter:before{content:""}.fa-facebook-f:before,.fa-facebook:before{content:""}.fa-github:before,.icon-github:before{content:""}.fa-unlock:before{content:""}.fa-credit-card:before{content:""}.fa-feed:before,.fa-rss:before{content:""}.fa-hdd-o:before{content:""}.fa-bullhorn:before{content:""}.fa-bell:before{content:""}.fa-certificate:before{content:""}.fa-hand-o-right:before{content:""}.fa-hand-o-left:before{content:""}.fa-hand-o-up:before{content:""}.fa-hand-o-down:before{content:""}.fa-arrow-circle-left:before,.icon-circle-arrow-left:before{content:""}.fa-arrow-circle-right:before,.icon-circle-arrow-right:before{content:""}.fa-arrow-circle-up:before{content:""}.fa-arrow-circle-down:before{content:""}.fa-globe:before{content:""}.fa-wrench:before{content:""}.fa-tasks:before{content:""}.fa-filter:before{content:""}.fa-briefcase:before{content:""}.fa-arrows-alt:before{content:""}.fa-group:before,.fa-users:before{content:""}.fa-chain:before,.fa-link:before,.icon-link:before{content:""}.fa-cloud:before{content:""}.fa-flask:before{content:""}.fa-cut:before,.fa-scissors:before{content:""}.fa-copy:before,.fa-files-o:before{content:""}.fa-paperclip:before{content:""}.fa-floppy-o:before,.fa-save:before{content:""}.fa-square:before{content:""}.fa-bars:before,.fa-navicon:before,.fa-reorder:before{content:""}.fa-list-ul:before{content:""}.fa-list-ol:before{content:""}.fa-strikethrough:before{content:""}.fa-underline:before{content:""}.fa-table:before{content:""}.fa-magic:before{content:""}.fa-truck:before{content:""}.fa-pinterest:before{content:""}.fa-pinterest-square:before{content:""}.fa-google-plus-square:before{content:""}.fa-google-plus:before{content:""}.fa-money:before{content:""}.fa-caret-down:before,.icon-caret-down:before,.wy-dropdown .caret:before{content:""}.fa-caret-up:before{content:""}.fa-caret-left:before{content:""}.fa-caret-right:before{content:""}.fa-columns:before{content:""}.fa-sort:before,.fa-unsorted:before{content:""}.fa-sort-desc:before,.fa-sort-down:before{content:""}.fa-sort-asc:before,.fa-sort-up:before{content:""}.fa-envelope:before{content:""}.fa-linkedin:before{content:""}.fa-rotate-left:before,.fa-undo:before{content:""}.fa-gavel:before,.fa-legal:before{content:""}.fa-dashboard:before,.fa-tachometer:before{content:""}.fa-comment-o:before{content:""}.fa-comments-o:before{content:""}.fa-bolt:before,.fa-flash:before{content:""}.fa-sitemap:before{content:""}.fa-umbrella:before{content:""}.fa-clipboard:before,.fa-paste:before{content:""}.fa-lightbulb-o:before{content:""}.fa-exchange:before{content:""}.fa-cloud-download:before{content:""}.fa-cloud-upload:before{content:""}.fa-user-md:before{content:""}.fa-stethoscope:before{content:""}.fa-suitcase:before{content:""}.fa-bell-o:before{content:""}.fa-coffee:before{content:""}.fa-cutlery:before{content:""}.fa-file-text-o:before{content:""}.fa-building-o:before{content:""}.fa-hospital-o:before{content:""}.fa-ambulance:before{content:""}.fa-medkit:before{content:""}.fa-fighter-jet:before{content:""}.fa-beer:before{content:""}.fa-h-square:before{content:""}.fa-plus-square:before{content:""}.fa-angle-double-left:before{content:""}.fa-angle-double-right:before{content:""}.fa-angle-double-up:before{content:""}.fa-angle-double-down:before{content:""}.fa-angle-left:before{content:""}.fa-angle-right:before{content:""}.fa-angle-up:before{content:""}.fa-angle-down:before{content:""}.fa-desktop:before{content:""}.fa-laptop:before{content:""}.fa-tablet:before{content:""}.fa-mobile-phone:before,.fa-mobile:before{content:""}.fa-circle-o:before{content:""}.fa-quote-left:before{content:""}.fa-quote-right:before{content:""}.fa-spinner:before{content:""}.fa-circle:before{content:""}.fa-mail-reply:before,.fa-reply:before{content:""}.fa-github-alt:before{content:""}.fa-folder-o:before{content:""}.fa-folder-open-o:before{content:""}.fa-smile-o:before{content:""}.fa-frown-o:before{content:""}.fa-meh-o:before{content:""}.fa-gamepad:before{content:""}.fa-keyboard-o:before{content:""}.fa-flag-o:before{content:""}.fa-flag-checkered:before{content:""}.fa-terminal:before{content:""}.fa-code:before{content:""}.fa-mail-reply-all:before,.fa-reply-all:before{content:""}.fa-star-half-empty:before,.fa-star-half-full:before,.fa-star-half-o:before{content:""}.fa-location-arrow:before{content:""}.fa-crop:before{content:""}.fa-code-fork:before{content:""}.fa-chain-broken:before,.fa-unlink:before{content:""}.fa-question:before{content:""}.fa-info:before{content:""}.fa-exclamation:before{content:""}.fa-superscript:before{content:""}.fa-subscript:before{content:""}.fa-eraser:before{content:""}.fa-puzzle-piece:before{content:""}.fa-microphone:before{content:""}.fa-microphone-slash:before{content:""}.fa-shield:before{content:""}.fa-calendar-o:before{content:""}.fa-fire-extinguisher:before{content:""}.fa-rocket:before{content:""}.fa-maxcdn:before{content:""}.fa-chevron-circle-left:before{content:""}.fa-chevron-circle-right:before{content:""}.fa-chevron-circle-up:before{content:""}.fa-chevron-circle-down:before{content:""}.fa-html5:before{content:""}.fa-css3:before{content:""}.fa-anchor:before{content:""}.fa-unlock-alt:before{content:""}.fa-bullseye:before{content:""}.fa-ellipsis-h:before{content:""}.fa-ellipsis-v:before{content:""}.fa-rss-square:before{content:""}.fa-play-circle:before{content:""}.fa-ticket:before{content:""}.fa-minus-square:before{content:""}.fa-minus-square-o:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before{content:""}.fa-level-up:before{content:""}.fa-level-down:before{content:""}.fa-check-square:before{content:""}.fa-pencil-square:before{content:""}.fa-external-link-square:before{content:""}.fa-share-square:before{content:""}.fa-compass:before{content:""}.fa-caret-square-o-down:before,.fa-toggle-down:before{content:""}.fa-caret-square-o-up:before,.fa-toggle-up:before{content:""}.fa-caret-square-o-right:before,.fa-toggle-right:before{content:""}.fa-eur:before,.fa-euro:before{content:""}.fa-gbp:before{content:""}.fa-dollar:before,.fa-usd:before{content:""}.fa-inr:before,.fa-rupee:before{content:""}.fa-cny:before,.fa-jpy:before,.fa-rmb:before,.fa-yen:before{content:""}.fa-rouble:before,.fa-rub:before,.fa-ruble:before{content:""}.fa-krw:before,.fa-won:before{content:""}.fa-bitcoin:before,.fa-btc:before{content:""}.fa-file:before{content:""}.fa-file-text:before{content:""}.fa-sort-alpha-asc:before{content:""}.fa-sort-alpha-desc:before{content:""}.fa-sort-amount-asc:before{content:""}.fa-sort-amount-desc:before{content:""}.fa-sort-numeric-asc:before{content:""}.fa-sort-numeric-desc:before{content:""}.fa-thumbs-up:before{content:""}.fa-thumbs-down:before{content:""}.fa-youtube-square:before{content:""}.fa-youtube:before{content:""}.fa-xing:before{content:""}.fa-xing-square:before{content:""}.fa-youtube-play:before{content:""}.fa-dropbox:before{content:""}.fa-stack-overflow:before{content:""}.fa-instagram:before{content:""}.fa-flickr:before{content:""}.fa-adn:before{content:""}.fa-bitbucket:before,.icon-bitbucket:before{content:""}.fa-bitbucket-square:before{content:""}.fa-tumblr:before{content:""}.fa-tumblr-square:before{content:""}.fa-long-arrow-down:before{content:""}.fa-long-arrow-up:before{content:""}.fa-long-arrow-left:before{content:""}.fa-long-arrow-right:before{content:""}.fa-apple:before{content:""}.fa-windows:before{content:""}.fa-android:before{content:""}.fa-linux:before{content:""}.fa-dribbble:before{content:""}.fa-skype:before{content:""}.fa-foursquare:before{content:""}.fa-trello:before{content:""}.fa-female:before{content:""}.fa-male:before{content:""}.fa-gittip:before,.fa-gratipay:before{content:""}.fa-sun-o:before{content:""}.fa-moon-o:before{content:""}.fa-archive:before{content:""}.fa-bug:before{content:""}.fa-vk:before{content:""}.fa-weibo:before{content:""}.fa-renren:before{content:""}.fa-pagelines:before{content:""}.fa-stack-exchange:before{content:""}.fa-arrow-circle-o-right:before{content:""}.fa-arrow-circle-o-left:before{content:""}.fa-caret-square-o-left:before,.fa-toggle-left:before{content:""}.fa-dot-circle-o:before{content:""}.fa-wheelchair:before{content:""}.fa-vimeo-square:before{content:""}.fa-try:before,.fa-turkish-lira:before{content:""}.fa-plus-square-o:before,.wy-menu-vertical li button.toctree-expand:before{content:""}.fa-space-shuttle:before{content:""}.fa-slack:before{content:""}.fa-envelope-square:before{content:""}.fa-wordpress:before{content:""}.fa-openid:before{content:""}.fa-bank:before,.fa-institution:before,.fa-university:before{content:""}.fa-graduation-cap:before,.fa-mortar-board:before{content:""}.fa-yahoo:before{content:""}.fa-google:before{content:""}.fa-reddit:before{content:""}.fa-reddit-square:before{content:""}.fa-stumbleupon-circle:before{content:""}.fa-stumbleupon:before{content:""}.fa-delicious:before{content:""}.fa-digg:before{content:""}.fa-pied-piper-pp:before{content:""}.fa-pied-piper-alt:before{content:""}.fa-drupal:before{content:""}.fa-joomla:before{content:""}.fa-language:before{content:""}.fa-fax:before{content:""}.fa-building:before{content:""}.fa-child:before{content:""}.fa-paw:before{content:""}.fa-spoon:before{content:""}.fa-cube:before{content:""}.fa-cubes:before{content:""}.fa-behance:before{content:""}.fa-behance-square:before{content:""}.fa-steam:before{content:""}.fa-steam-square:before{content:""}.fa-recycle:before{content:""}.fa-automobile:before,.fa-car:before{content:""}.fa-cab:before,.fa-taxi:before{content:""}.fa-tree:before{content:""}.fa-spotify:before{content:""}.fa-deviantart:before{content:""}.fa-soundcloud:before{content:""}.fa-database:before{content:""}.fa-file-pdf-o:before{content:""}.fa-file-word-o:before{content:""}.fa-file-excel-o:before{content:""}.fa-file-powerpoint-o:before{content:""}.fa-file-image-o:before,.fa-file-photo-o:before,.fa-file-picture-o:before{content:""}.fa-file-archive-o:before,.fa-file-zip-o:before{content:""}.fa-file-audio-o:before,.fa-file-sound-o:before{content:""}.fa-file-movie-o:before,.fa-file-video-o:before{content:""}.fa-file-code-o:before{content:""}.fa-vine:before{content:""}.fa-codepen:before{content:""}.fa-jsfiddle:before{content:""}.fa-life-bouy:before,.fa-life-buoy:before,.fa-life-ring:before,.fa-life-saver:before,.fa-support:before{content:""}.fa-circle-o-notch:before{content:""}.fa-ra:before,.fa-rebel:before,.fa-resistance:before{content:""}.fa-empire:before,.fa-ge:before{content:""}.fa-git-square:before{content:""}.fa-git:before{content:""}.fa-hacker-news:before,.fa-y-combinator-square:before,.fa-yc-square:before{content:""}.fa-tencent-weibo:before{content:""}.fa-qq:before{content:""}.fa-wechat:before,.fa-weixin:before{content:""}.fa-paper-plane:before,.fa-send:before{content:""}.fa-paper-plane-o:before,.fa-send-o:before{content:""}.fa-history:before{content:""}.fa-circle-thin:before{content:""}.fa-header:before{content:""}.fa-paragraph:before{content:""}.fa-sliders:before{content:""}.fa-share-alt:before{content:""}.fa-share-alt-square:before{content:""}.fa-bomb:before{content:""}.fa-futbol-o:before,.fa-soccer-ball-o:before{content:""}.fa-tty:before{content:""}.fa-binoculars:before{content:""}.fa-plug:before{content:""}.fa-slideshare:before{content:""}.fa-twitch:before{content:""}.fa-yelp:before{content:""}.fa-newspaper-o:before{content:""}.fa-wifi:before{content:""}.fa-calculator:before{content:""}.fa-paypal:before{content:""}.fa-google-wallet:before{content:""}.fa-cc-visa:before{content:""}.fa-cc-mastercard:before{content:""}.fa-cc-discover:before{content:""}.fa-cc-amex:before{content:""}.fa-cc-paypal:before{content:""}.fa-cc-stripe:before{content:""}.fa-bell-slash:before{content:""}.fa-bell-slash-o:before{content:""}.fa-trash:before{content:""}.fa-copyright:before{content:""}.fa-at:before{content:""}.fa-eyedropper:before{content:""}.fa-paint-brush:before{content:""}.fa-birthday-cake:before{content:""}.fa-area-chart:before{content:""}.fa-pie-chart:before{content:""}.fa-line-chart:before{content:""}.fa-lastfm:before{content:""}.fa-lastfm-square:before{content:""}.fa-toggle-off:before{content:""}.fa-toggle-on:before{content:""}.fa-bicycle:before{content:""}.fa-bus:before{content:""}.fa-ioxhost:before{content:""}.fa-angellist:before{content:""}.fa-cc:before{content:""}.fa-ils:before,.fa-shekel:before,.fa-sheqel:before{content:""}.fa-meanpath:before{content:""}.fa-buysellads:before{content:""}.fa-connectdevelop:before{content:""}.fa-dashcube:before{content:""}.fa-forumbee:before{content:""}.fa-leanpub:before{content:""}.fa-sellsy:before{content:""}.fa-shirtsinbulk:before{content:""}.fa-simplybuilt:before{content:""}.fa-skyatlas:before{content:""}.fa-cart-plus:before{content:""}.fa-cart-arrow-down:before{content:""}.fa-diamond:before{content:""}.fa-ship:before{content:""}.fa-user-secret:before{content:""}.fa-motorcycle:before{content:""}.fa-street-view:before{content:""}.fa-heartbeat:before{content:""}.fa-venus:before{content:""}.fa-mars:before{content:""}.fa-mercury:before{content:""}.fa-intersex:before,.fa-transgender:before{content:""}.fa-transgender-alt:before{content:""}.fa-venus-double:before{content:""}.fa-mars-double:before{content:""}.fa-venus-mars:before{content:""}.fa-mars-stroke:before{content:""}.fa-mars-stroke-v:before{content:""}.fa-mars-stroke-h:before{content:""}.fa-neuter:before{content:""}.fa-genderless:before{content:""}.fa-facebook-official:before{content:""}.fa-pinterest-p:before{content:""}.fa-whatsapp:before{content:""}.fa-server:before{content:""}.fa-user-plus:before{content:""}.fa-user-times:before{content:""}.fa-bed:before,.fa-hotel:before{content:""}.fa-viacoin:before{content:""}.fa-train:before{content:""}.fa-subway:before{content:""}.fa-medium:before{content:""}.fa-y-combinator:before,.fa-yc:before{content:""}.fa-optin-monster:before{content:""}.fa-opencart:before{content:""}.fa-expeditedssl:before{content:""}.fa-battery-4:before,.fa-battery-full:before,.fa-battery:before{content:""}.fa-battery-3:before,.fa-battery-three-quarters:before{content:""}.fa-battery-2:before,.fa-battery-half:before{content:""}.fa-battery-1:before,.fa-battery-quarter:before{content:""}.fa-battery-0:before,.fa-battery-empty:before{content:""}.fa-mouse-pointer:before{content:""}.fa-i-cursor:before{content:""}.fa-object-group:before{content:""}.fa-object-ungroup:before{content:""}.fa-sticky-note:before{content:""}.fa-sticky-note-o:before{content:""}.fa-cc-jcb:before{content:""}.fa-cc-diners-club:before{content:""}.fa-clone:before{content:""}.fa-balance-scale:before{content:""}.fa-hourglass-o:before{content:""}.fa-hourglass-1:before,.fa-hourglass-start:before{content:""}.fa-hourglass-2:before,.fa-hourglass-half:before{content:""}.fa-hourglass-3:before,.fa-hourglass-end:before{content:""}.fa-hourglass:before{content:""}.fa-hand-grab-o:before,.fa-hand-rock-o:before{content:""}.fa-hand-paper-o:before,.fa-hand-stop-o:before{content:""}.fa-hand-scissors-o:before{content:""}.fa-hand-lizard-o:before{content:""}.fa-hand-spock-o:before{content:""}.fa-hand-pointer-o:before{content:""}.fa-hand-peace-o:before{content:""}.fa-trademark:before{content:""}.fa-registered:before{content:""}.fa-creative-commons:before{content:""}.fa-gg:before{content:""}.fa-gg-circle:before{content:""}.fa-tripadvisor:before{content:""}.fa-odnoklassniki:before{content:""}.fa-odnoklassniki-square:before{content:""}.fa-get-pocket:before{content:""}.fa-wikipedia-w:before{content:""}.fa-safari:before{content:""}.fa-chrome:before{content:""}.fa-firefox:before{content:""}.fa-opera:before{content:""}.fa-internet-explorer:before{content:""}.fa-television:before,.fa-tv:before{content:""}.fa-contao:before{content:""}.fa-500px:before{content:""}.fa-amazon:before{content:""}.fa-calendar-plus-o:before{content:""}.fa-calendar-minus-o:before{content:""}.fa-calendar-times-o:before{content:""}.fa-calendar-check-o:before{content:""}.fa-industry:before{content:""}.fa-map-pin:before{content:""}.fa-map-signs:before{content:""}.fa-map-o:before{content:""}.fa-map:before{content:""}.fa-commenting:before{content:""}.fa-commenting-o:before{content:""}.fa-houzz:before{content:""}.fa-vimeo:before{content:""}.fa-black-tie:before{content:""}.fa-fonticons:before{content:""}.fa-reddit-alien:before{content:""}.fa-edge:before{content:""}.fa-credit-card-alt:before{content:""}.fa-codiepie:before{content:""}.fa-modx:before{content:""}.fa-fort-awesome:before{content:""}.fa-usb:before{content:""}.fa-product-hunt:before{content:""}.fa-mixcloud:before{content:""}.fa-scribd:before{content:""}.fa-pause-circle:before{content:""}.fa-pause-circle-o:before{content:""}.fa-stop-circle:before{content:""}.fa-stop-circle-o:before{content:""}.fa-shopping-bag:before{content:""}.fa-shopping-basket:before{content:""}.fa-hashtag:before{content:""}.fa-bluetooth:before{content:""}.fa-bluetooth-b:before{content:""}.fa-percent:before{content:""}.fa-gitlab:before,.icon-gitlab:before{content:""}.fa-wpbeginner:before{content:""}.fa-wpforms:before{content:""}.fa-envira:before{content:""}.fa-universal-access:before{content:""}.fa-wheelchair-alt:before{content:""}.fa-question-circle-o:before{content:""}.fa-blind:before{content:""}.fa-audio-description:before{content:""}.fa-volume-control-phone:before{content:""}.fa-braille:before{content:""}.fa-assistive-listening-systems:before{content:""}.fa-american-sign-language-interpreting:before,.fa-asl-interpreting:before{content:""}.fa-deaf:before,.fa-deafness:before,.fa-hard-of-hearing:before{content:""}.fa-glide:before{content:""}.fa-glide-g:before{content:""}.fa-sign-language:before,.fa-signing:before{content:""}.fa-low-vision:before{content:""}.fa-viadeo:before{content:""}.fa-viadeo-square:before{content:""}.fa-snapchat:before{content:""}.fa-snapchat-ghost:before{content:""}.fa-snapchat-square:before{content:""}.fa-pied-piper:before{content:""}.fa-first-order:before{content:""}.fa-yoast:before{content:""}.fa-themeisle:before{content:""}.fa-google-plus-circle:before,.fa-google-plus-official:before{content:""}.fa-fa:before,.fa-font-awesome:before{content:""}.fa-handshake-o:before{content:""}.fa-envelope-open:before{content:""}.fa-envelope-open-o:before{content:""}.fa-linode:before{content:""}.fa-address-book:before{content:""}.fa-address-book-o:before{content:""}.fa-address-card:before,.fa-vcard:before{content:""}.fa-address-card-o:before,.fa-vcard-o:before{content:""}.fa-user-circle:before{content:""}.fa-user-circle-o:before{content:""}.fa-user-o:before{content:""}.fa-id-badge:before{content:""}.fa-drivers-license:before,.fa-id-card:before{content:""}.fa-drivers-license-o:before,.fa-id-card-o:before{content:""}.fa-quora:before{content:""}.fa-free-code-camp:before{content:""}.fa-telegram:before{content:""}.fa-thermometer-4:before,.fa-thermometer-full:before,.fa-thermometer:before{content:""}.fa-thermometer-3:before,.fa-thermometer-three-quarters:before{content:""}.fa-thermometer-2:before,.fa-thermometer-half:before{content:""}.fa-thermometer-1:before,.fa-thermometer-quarter:before{content:""}.fa-thermometer-0:before,.fa-thermometer-empty:before{content:""}.fa-shower:before{content:""}.fa-bath:before,.fa-bathtub:before,.fa-s15:before{content:""}.fa-podcast:before{content:""}.fa-window-maximize:before{content:""}.fa-window-minimize:before{content:""}.fa-window-restore:before{content:""}.fa-times-rectangle:before,.fa-window-close:before{content:""}.fa-times-rectangle-o:before,.fa-window-close-o:before{content:""}.fa-bandcamp:before{content:""}.fa-grav:before{content:""}.fa-etsy:before{content:""}.fa-imdb:before{content:""}.fa-ravelry:before{content:""}.fa-eercast:before{content:""}.fa-microchip:before{content:""}.fa-snowflake-o:before{content:""}.fa-superpowers:before{content:""}.fa-wpexplorer:before{content:""}.fa-meetup:before{content:""}.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;margin:0;overflow:visible;clip:auto}.fa,.icon,.rst-content .admonition-title,.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content code.download span:first-child,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink,.rst-content tt.download span:first-child,.wy-dropdown .caret,.wy-inline-validate.wy-inline-validate-danger .wy-input-context,.wy-inline-validate.wy-inline-validate-info .wy-input-context,.wy-inline-validate.wy-inline-validate-success .wy-input-context,.wy-inline-validate.wy-inline-validate-warning .wy-input-context,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li button.toctree-expand{font-family:inherit}.fa:before,.icon:before,.rst-content .admonition-title:before,.rst-content .code-block-caption .headerlink:before,.rst-content .eqno .headerlink:before,.rst-content code.download span:first-child:before,.rst-content dl dt .headerlink:before,.rst-content h1 .headerlink:before,.rst-content h2 .headerlink:before,.rst-content h3 .headerlink:before,.rst-content h4 .headerlink:before,.rst-content h5 .headerlink:before,.rst-content h6 .headerlink:before,.rst-content p.caption .headerlink:before,.rst-content p .headerlink:before,.rst-content table>caption .headerlink:before,.rst-content tt.download span:first-child:before,.wy-dropdown .caret:before,.wy-inline-validate.wy-inline-validate-danger .wy-input-context:before,.wy-inline-validate.wy-inline-validate-info .wy-input-context:before,.wy-inline-validate.wy-inline-validate-success .wy-input-context:before,.wy-inline-validate.wy-inline-validate-warning .wy-input-context:before,.wy-menu-vertical li.current>a button.toctree-expand:before,.wy-menu-vertical li.on a button.toctree-expand:before,.wy-menu-vertical li button.toctree-expand:before{font-family:FontAwesome;display:inline-block;font-style:normal;font-weight:400;line-height:1;text-decoration:inherit}.rst-content .code-block-caption a .headerlink,.rst-content .eqno a .headerlink,.rst-content a .admonition-title,.rst-content code.download a span:first-child,.rst-content dl dt a .headerlink,.rst-content h1 a .headerlink,.rst-content h2 a .headerlink,.rst-content h3 a .headerlink,.rst-content h4 a .headerlink,.rst-content h5 a .headerlink,.rst-content h6 a .headerlink,.rst-content p.caption a .headerlink,.rst-content p a .headerlink,.rst-content table>caption a .headerlink,.rst-content tt.download a span:first-child,.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand,.wy-menu-vertical li a button.toctree-expand,a .fa,a .icon,a .rst-content .admonition-title,a .rst-content .code-block-caption .headerlink,a .rst-content .eqno .headerlink,a .rst-content code.download span:first-child,a .rst-content dl dt .headerlink,a .rst-content h1 .headerlink,a .rst-content h2 .headerlink,a .rst-content h3 .headerlink,a .rst-content h4 .headerlink,a .rst-content h5 .headerlink,a .rst-content h6 .headerlink,a .rst-content p.caption .headerlink,a .rst-content p .headerlink,a .rst-content table>caption .headerlink,a .rst-content tt.download span:first-child,a .wy-menu-vertical li button.toctree-expand{display:inline-block;text-decoration:inherit}.btn .fa,.btn .icon,.btn .rst-content .admonition-title,.btn .rst-content .code-block-caption .headerlink,.btn .rst-content .eqno .headerlink,.btn .rst-content code.download span:first-child,.btn .rst-content dl dt .headerlink,.btn .rst-content h1 .headerlink,.btn .rst-content h2 .headerlink,.btn .rst-content h3 .headerlink,.btn .rst-content h4 .headerlink,.btn .rst-content h5 .headerlink,.btn .rst-content h6 .headerlink,.btn .rst-content p .headerlink,.btn .rst-content table>caption .headerlink,.btn .rst-content tt.download span:first-child,.btn .wy-menu-vertical li.current>a button.toctree-expand,.btn .wy-menu-vertical li.on a button.toctree-expand,.btn .wy-menu-vertical li button.toctree-expand,.nav .fa,.nav .icon,.nav .rst-content .admonition-title,.nav .rst-content .code-block-caption .headerlink,.nav .rst-content .eqno .headerlink,.nav .rst-content code.download span:first-child,.nav .rst-content dl dt .headerlink,.nav .rst-content h1 .headerlink,.nav .rst-content h2 .headerlink,.nav .rst-content h3 .headerlink,.nav .rst-content h4 .headerlink,.nav .rst-content h5 .headerlink,.nav .rst-content h6 .headerlink,.nav .rst-content p .headerlink,.nav .rst-content table>caption .headerlink,.nav .rst-content tt.download span:first-child,.nav .wy-menu-vertical li.current>a button.toctree-expand,.nav .wy-menu-vertical li.on a button.toctree-expand,.nav .wy-menu-vertical li button.toctree-expand,.rst-content .btn .admonition-title,.rst-content .code-block-caption .btn .headerlink,.rst-content .code-block-caption .nav .headerlink,.rst-content .eqno .btn .headerlink,.rst-content .eqno .nav .headerlink,.rst-content .nav .admonition-title,.rst-content code.download .btn span:first-child,.rst-content code.download .nav span:first-child,.rst-content dl dt .btn .headerlink,.rst-content dl dt .nav .headerlink,.rst-content h1 .btn .headerlink,.rst-content h1 .nav .headerlink,.rst-content h2 .btn .headerlink,.rst-content h2 .nav .headerlink,.rst-content h3 .btn .headerlink,.rst-content h3 .nav .headerlink,.rst-content h4 .btn .headerlink,.rst-content h4 .nav .headerlink,.rst-content h5 .btn .headerlink,.rst-content h5 .nav .headerlink,.rst-content h6 .btn .headerlink,.rst-content h6 .nav .headerlink,.rst-content p .btn .headerlink,.rst-content p .nav .headerlink,.rst-content table>caption .btn .headerlink,.rst-content table>caption .nav .headerlink,.rst-content tt.download .btn span:first-child,.rst-content tt.download .nav span:first-child,.wy-menu-vertical li .btn button.toctree-expand,.wy-menu-vertical li.current>a .btn button.toctree-expand,.wy-menu-vertical li.current>a .nav button.toctree-expand,.wy-menu-vertical li .nav button.toctree-expand,.wy-menu-vertical li.on a .btn button.toctree-expand,.wy-menu-vertical li.on a .nav button.toctree-expand{display:inline}.btn .fa-large.icon,.btn .fa.fa-large,.btn .rst-content .code-block-caption .fa-large.headerlink,.btn .rst-content .eqno .fa-large.headerlink,.btn .rst-content .fa-large.admonition-title,.btn .rst-content code.download span.fa-large:first-child,.btn .rst-content dl dt .fa-large.headerlink,.btn .rst-content h1 .fa-large.headerlink,.btn .rst-content h2 .fa-large.headerlink,.btn .rst-content h3 .fa-large.headerlink,.btn .rst-content h4 .fa-large.headerlink,.btn .rst-content h5 .fa-large.headerlink,.btn .rst-content h6 .fa-large.headerlink,.btn .rst-content p .fa-large.headerlink,.btn .rst-content table>caption .fa-large.headerlink,.btn .rst-content tt.download span.fa-large:first-child,.btn .wy-menu-vertical li button.fa-large.toctree-expand,.nav .fa-large.icon,.nav .fa.fa-large,.nav .rst-content .code-block-caption .fa-large.headerlink,.nav .rst-content .eqno .fa-large.headerlink,.nav .rst-content .fa-large.admonition-title,.nav .rst-content code.download span.fa-large:first-child,.nav .rst-content dl dt .fa-large.headerlink,.nav .rst-content h1 .fa-large.headerlink,.nav .rst-content h2 .fa-large.headerlink,.nav .rst-content h3 .fa-large.headerlink,.nav .rst-content h4 .fa-large.headerlink,.nav .rst-content h5 .fa-large.headerlink,.nav .rst-content h6 .fa-large.headerlink,.nav .rst-content p .fa-large.headerlink,.nav .rst-content table>caption .fa-large.headerlink,.nav .rst-content tt.download span.fa-large:first-child,.nav .wy-menu-vertical li button.fa-large.toctree-expand,.rst-content .btn .fa-large.admonition-title,.rst-content .code-block-caption .btn .fa-large.headerlink,.rst-content .code-block-caption .nav .fa-large.headerlink,.rst-content .eqno .btn .fa-large.headerlink,.rst-content .eqno .nav .fa-large.headerlink,.rst-content .nav .fa-large.admonition-title,.rst-content code.download .btn span.fa-large:first-child,.rst-content code.download .nav span.fa-large:first-child,.rst-content dl dt .btn .fa-large.headerlink,.rst-content dl dt .nav .fa-large.headerlink,.rst-content h1 .btn .fa-large.headerlink,.rst-content h1 .nav .fa-large.headerlink,.rst-content h2 .btn .fa-large.headerlink,.rst-content h2 .nav .fa-large.headerlink,.rst-content h3 .btn .fa-large.headerlink,.rst-content h3 .nav .fa-large.headerlink,.rst-content h4 .btn .fa-large.headerlink,.rst-content h4 .nav .fa-large.headerlink,.rst-content h5 .btn .fa-large.headerlink,.rst-content h5 .nav .fa-large.headerlink,.rst-content h6 .btn .fa-large.headerlink,.rst-content h6 .nav .fa-large.headerlink,.rst-content p .btn .fa-large.headerlink,.rst-content p .nav .fa-large.headerlink,.rst-content table>caption .btn .fa-large.headerlink,.rst-content table>caption .nav .fa-large.headerlink,.rst-content tt.download .btn span.fa-large:first-child,.rst-content tt.download .nav span.fa-large:first-child,.wy-menu-vertical li .btn button.fa-large.toctree-expand,.wy-menu-vertical li .nav button.fa-large.toctree-expand{line-height:.9em}.btn .fa-spin.icon,.btn .fa.fa-spin,.btn .rst-content .code-block-caption .fa-spin.headerlink,.btn .rst-content .eqno .fa-spin.headerlink,.btn .rst-content .fa-spin.admonition-title,.btn .rst-content code.download span.fa-spin:first-child,.btn .rst-content dl dt .fa-spin.headerlink,.btn .rst-content h1 .fa-spin.headerlink,.btn .rst-content h2 .fa-spin.headerlink,.btn .rst-content h3 .fa-spin.headerlink,.btn .rst-content h4 .fa-spin.headerlink,.btn .rst-content h5 .fa-spin.headerlink,.btn .rst-content h6 .fa-spin.headerlink,.btn .rst-content p .fa-spin.headerlink,.btn .rst-content table>caption .fa-spin.headerlink,.btn .rst-content tt.download span.fa-spin:first-child,.btn .wy-menu-vertical li button.fa-spin.toctree-expand,.nav .fa-spin.icon,.nav .fa.fa-spin,.nav .rst-content .code-block-caption .fa-spin.headerlink,.nav .rst-content .eqno .fa-spin.headerlink,.nav .rst-content .fa-spin.admonition-title,.nav .rst-content code.download span.fa-spin:first-child,.nav .rst-content dl dt .fa-spin.headerlink,.nav .rst-content h1 .fa-spin.headerlink,.nav .rst-content h2 .fa-spin.headerlink,.nav .rst-content h3 .fa-spin.headerlink,.nav .rst-content h4 .fa-spin.headerlink,.nav .rst-content h5 .fa-spin.headerlink,.nav .rst-content h6 .fa-spin.headerlink,.nav .rst-content p .fa-spin.headerlink,.nav .rst-content table>caption .fa-spin.headerlink,.nav .rst-content tt.download span.fa-spin:first-child,.nav .wy-menu-vertical li button.fa-spin.toctree-expand,.rst-content .btn .fa-spin.admonition-title,.rst-content .code-block-caption .btn .fa-spin.headerlink,.rst-content .code-block-caption .nav .fa-spin.headerlink,.rst-content .eqno .btn .fa-spin.headerlink,.rst-content .eqno .nav .fa-spin.headerlink,.rst-content .nav .fa-spin.admonition-title,.rst-content code.download .btn span.fa-spin:first-child,.rst-content code.download .nav span.fa-spin:first-child,.rst-content dl dt .btn .fa-spin.headerlink,.rst-content dl dt .nav .fa-spin.headerlink,.rst-content h1 .btn .fa-spin.headerlink,.rst-content h1 .nav .fa-spin.headerlink,.rst-content h2 .btn .fa-spin.headerlink,.rst-content h2 .nav .fa-spin.headerlink,.rst-content h3 .btn .fa-spin.headerlink,.rst-content h3 .nav .fa-spin.headerlink,.rst-content h4 .btn .fa-spin.headerlink,.rst-content h4 .nav .fa-spin.headerlink,.rst-content h5 .btn .fa-spin.headerlink,.rst-content h5 .nav .fa-spin.headerlink,.rst-content h6 .btn .fa-spin.headerlink,.rst-content h6 .nav .fa-spin.headerlink,.rst-content p .btn .fa-spin.headerlink,.rst-content p .nav .fa-spin.headerlink,.rst-content table>caption .btn .fa-spin.headerlink,.rst-content table>caption .nav .fa-spin.headerlink,.rst-content tt.download .btn span.fa-spin:first-child,.rst-content tt.download .nav span.fa-spin:first-child,.wy-menu-vertical li .btn button.fa-spin.toctree-expand,.wy-menu-vertical li .nav button.fa-spin.toctree-expand{display:inline-block}.btn.fa:before,.btn.icon:before,.rst-content .btn.admonition-title:before,.rst-content .code-block-caption .btn.headerlink:before,.rst-content .eqno .btn.headerlink:before,.rst-content code.download span.btn:first-child:before,.rst-content dl dt .btn.headerlink:before,.rst-content h1 .btn.headerlink:before,.rst-content h2 .btn.headerlink:before,.rst-content h3 .btn.headerlink:before,.rst-content h4 .btn.headerlink:before,.rst-content h5 .btn.headerlink:before,.rst-content h6 .btn.headerlink:before,.rst-content p .btn.headerlink:before,.rst-content table>caption .btn.headerlink:before,.rst-content tt.download span.btn:first-child:before,.wy-menu-vertical li button.btn.toctree-expand:before{opacity:.5;-webkit-transition:opacity .05s ease-in;-moz-transition:opacity .05s ease-in;transition:opacity .05s ease-in}.btn.fa:hover:before,.btn.icon:hover:before,.rst-content .btn.admonition-title:hover:before,.rst-content .code-block-caption .btn.headerlink:hover:before,.rst-content .eqno .btn.headerlink:hover:before,.rst-content code.download span.btn:first-child:hover:before,.rst-content dl dt .btn.headerlink:hover:before,.rst-content h1 .btn.headerlink:hover:before,.rst-content h2 .btn.headerlink:hover:before,.rst-content h3 .btn.headerlink:hover:before,.rst-content h4 .btn.headerlink:hover:before,.rst-content h5 .btn.headerlink:hover:before,.rst-content h6 .btn.headerlink:hover:before,.rst-content p .btn.headerlink:hover:before,.rst-content table>caption .btn.headerlink:hover:before,.rst-content tt.download span.btn:first-child:hover:before,.wy-menu-vertical li button.btn.toctree-expand:hover:before{opacity:1}.btn-mini .fa:before,.btn-mini .icon:before,.btn-mini .rst-content .admonition-title:before,.btn-mini .rst-content .code-block-caption .headerlink:before,.btn-mini .rst-content .eqno .headerlink:before,.btn-mini .rst-content code.download span:first-child:before,.btn-mini .rst-content dl dt .headerlink:before,.btn-mini .rst-content h1 .headerlink:before,.btn-mini .rst-content h2 .headerlink:before,.btn-mini .rst-content h3 .headerlink:before,.btn-mini .rst-content h4 .headerlink:before,.btn-mini .rst-content h5 .headerlink:before,.btn-mini .rst-content h6 .headerlink:before,.btn-mini .rst-content p .headerlink:before,.btn-mini .rst-content table>caption .headerlink:before,.btn-mini .rst-content tt.download span:first-child:before,.btn-mini .wy-menu-vertical li button.toctree-expand:before,.rst-content .btn-mini .admonition-title:before,.rst-content .code-block-caption .btn-mini .headerlink:before,.rst-content .eqno .btn-mini .headerlink:before,.rst-content code.download .btn-mini span:first-child:before,.rst-content dl dt .btn-mini .headerlink:before,.rst-content h1 .btn-mini .headerlink:before,.rst-content h2 .btn-mini .headerlink:before,.rst-content h3 .btn-mini .headerlink:before,.rst-content h4 .btn-mini .headerlink:before,.rst-content h5 .btn-mini .headerlink:before,.rst-content h6 .btn-mini .headerlink:before,.rst-content p .btn-mini .headerlink:before,.rst-content table>caption .btn-mini .headerlink:before,.rst-content tt.download .btn-mini span:first-child:before,.wy-menu-vertical li .btn-mini button.toctree-expand:before{font-size:14px;vertical-align:-15%}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning,.wy-alert{padding:12px;line-height:24px;margin-bottom:24px;background:#e7f2fa}.rst-content .admonition-title,.wy-alert-title{font-weight:700;display:block;color:#fff;background:#6ab0de;padding:6px 12px;margin:-12px -12px 12px}.rst-content .danger,.rst-content .error,.rst-content .wy-alert-danger.admonition,.rst-content .wy-alert-danger.admonition-todo,.rst-content .wy-alert-danger.attention,.rst-content .wy-alert-danger.caution,.rst-content .wy-alert-danger.hint,.rst-content .wy-alert-danger.important,.rst-content .wy-alert-danger.note,.rst-content .wy-alert-danger.seealso,.rst-content .wy-alert-danger.tip,.rst-content .wy-alert-danger.warning,.wy-alert.wy-alert-danger{background:#fdf3f2}.rst-content .danger .admonition-title,.rst-content .danger .wy-alert-title,.rst-content .error .admonition-title,.rst-content .error .wy-alert-title,.rst-content .wy-alert-danger.admonition-todo .admonition-title,.rst-content .wy-alert-danger.admonition-todo .wy-alert-title,.rst-content .wy-alert-danger.admonition .admonition-title,.rst-content .wy-alert-danger.admonition .wy-alert-title,.rst-content .wy-alert-danger.attention .admonition-title,.rst-content .wy-alert-danger.attention .wy-alert-title,.rst-content .wy-alert-danger.caution .admonition-title,.rst-content .wy-alert-danger.caution .wy-alert-title,.rst-content .wy-alert-danger.hint .admonition-title,.rst-content .wy-alert-danger.hint .wy-alert-title,.rst-content .wy-alert-danger.important .admonition-title,.rst-content .wy-alert-danger.important .wy-alert-title,.rst-content .wy-alert-danger.note .admonition-title,.rst-content .wy-alert-danger.note .wy-alert-title,.rst-content .wy-alert-danger.seealso .admonition-title,.rst-content .wy-alert-danger.seealso .wy-alert-title,.rst-content .wy-alert-danger.tip .admonition-title,.rst-content .wy-alert-danger.tip .wy-alert-title,.rst-content .wy-alert-danger.warning .admonition-title,.rst-content .wy-alert-danger.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-danger .admonition-title,.wy-alert.wy-alert-danger .rst-content .admonition-title,.wy-alert.wy-alert-danger .wy-alert-title{background:#f29f97}.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .warning,.rst-content .wy-alert-warning.admonition,.rst-content .wy-alert-warning.danger,.rst-content .wy-alert-warning.error,.rst-content .wy-alert-warning.hint,.rst-content .wy-alert-warning.important,.rst-content .wy-alert-warning.note,.rst-content .wy-alert-warning.seealso,.rst-content .wy-alert-warning.tip,.wy-alert.wy-alert-warning{background:#ffedcc}.rst-content .admonition-todo .admonition-title,.rst-content .admonition-todo .wy-alert-title,.rst-content .attention .admonition-title,.rst-content .attention .wy-alert-title,.rst-content .caution .admonition-title,.rst-content .caution .wy-alert-title,.rst-content .warning .admonition-title,.rst-content .warning .wy-alert-title,.rst-content .wy-alert-warning.admonition .admonition-title,.rst-content .wy-alert-warning.admonition .wy-alert-title,.rst-content .wy-alert-warning.danger .admonition-title,.rst-content .wy-alert-warning.danger .wy-alert-title,.rst-content .wy-alert-warning.error .admonition-title,.rst-content .wy-alert-warning.error .wy-alert-title,.rst-content .wy-alert-warning.hint .admonition-title,.rst-content .wy-alert-warning.hint .wy-alert-title,.rst-content .wy-alert-warning.important .admonition-title,.rst-content .wy-alert-warning.important .wy-alert-title,.rst-content .wy-alert-warning.note .admonition-title,.rst-content .wy-alert-warning.note .wy-alert-title,.rst-content .wy-alert-warning.seealso .admonition-title,.rst-content .wy-alert-warning.seealso .wy-alert-title,.rst-content .wy-alert-warning.tip .admonition-title,.rst-content .wy-alert-warning.tip .wy-alert-title,.rst-content .wy-alert.wy-alert-warning .admonition-title,.wy-alert.wy-alert-warning .rst-content .admonition-title,.wy-alert.wy-alert-warning .wy-alert-title{background:#f0b37e}.rst-content .note,.rst-content .seealso,.rst-content .wy-alert-info.admonition,.rst-content .wy-alert-info.admonition-todo,.rst-content .wy-alert-info.attention,.rst-content .wy-alert-info.caution,.rst-content .wy-alert-info.danger,.rst-content .wy-alert-info.error,.rst-content .wy-alert-info.hint,.rst-content .wy-alert-info.important,.rst-content .wy-alert-info.tip,.rst-content .wy-alert-info.warning,.wy-alert.wy-alert-info{background:#e7f2fa}.rst-content .note .admonition-title,.rst-content .note .wy-alert-title,.rst-content .seealso .admonition-title,.rst-content .seealso .wy-alert-title,.rst-content .wy-alert-info.admonition-todo .admonition-title,.rst-content .wy-alert-info.admonition-todo .wy-alert-title,.rst-content .wy-alert-info.admonition .admonition-title,.rst-content .wy-alert-info.admonition .wy-alert-title,.rst-content .wy-alert-info.attention .admonition-title,.rst-content .wy-alert-info.attention .wy-alert-title,.rst-content .wy-alert-info.caution .admonition-title,.rst-content .wy-alert-info.caution .wy-alert-title,.rst-content .wy-alert-info.danger .admonition-title,.rst-content .wy-alert-info.danger .wy-alert-title,.rst-content .wy-alert-info.error .admonition-title,.rst-content .wy-alert-info.error .wy-alert-title,.rst-content .wy-alert-info.hint .admonition-title,.rst-content .wy-alert-info.hint .wy-alert-title,.rst-content .wy-alert-info.important .admonition-title,.rst-content .wy-alert-info.important .wy-alert-title,.rst-content .wy-alert-info.tip .admonition-title,.rst-content .wy-alert-info.tip .wy-alert-title,.rst-content .wy-alert-info.warning .admonition-title,.rst-content .wy-alert-info.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-info .admonition-title,.wy-alert.wy-alert-info .rst-content .admonition-title,.wy-alert.wy-alert-info .wy-alert-title{background:#6ab0de}.rst-content .hint,.rst-content .important,.rst-content .tip,.rst-content .wy-alert-success.admonition,.rst-content .wy-alert-success.admonition-todo,.rst-content .wy-alert-success.attention,.rst-content .wy-alert-success.caution,.rst-content .wy-alert-success.danger,.rst-content .wy-alert-success.error,.rst-content .wy-alert-success.note,.rst-content .wy-alert-success.seealso,.rst-content .wy-alert-success.warning,.wy-alert.wy-alert-success{background:#dbfaf4}.rst-content .hint .admonition-title,.rst-content .hint .wy-alert-title,.rst-content .important .admonition-title,.rst-content .important .wy-alert-title,.rst-content .tip .admonition-title,.rst-content .tip .wy-alert-title,.rst-content .wy-alert-success.admonition-todo .admonition-title,.rst-content .wy-alert-success.admonition-todo .wy-alert-title,.rst-content .wy-alert-success.admonition .admonition-title,.rst-content .wy-alert-success.admonition .wy-alert-title,.rst-content .wy-alert-success.attention .admonition-title,.rst-content .wy-alert-success.attention .wy-alert-title,.rst-content .wy-alert-success.caution .admonition-title,.rst-content .wy-alert-success.caution .wy-alert-title,.rst-content .wy-alert-success.danger .admonition-title,.rst-content .wy-alert-success.danger .wy-alert-title,.rst-content .wy-alert-success.error .admonition-title,.rst-content .wy-alert-success.error .wy-alert-title,.rst-content .wy-alert-success.note .admonition-title,.rst-content .wy-alert-success.note .wy-alert-title,.rst-content .wy-alert-success.seealso .admonition-title,.rst-content .wy-alert-success.seealso .wy-alert-title,.rst-content .wy-alert-success.warning .admonition-title,.rst-content .wy-alert-success.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-success .admonition-title,.wy-alert.wy-alert-success .rst-content .admonition-title,.wy-alert.wy-alert-success .wy-alert-title{background:#1abc9c}.rst-content .wy-alert-neutral.admonition,.rst-content .wy-alert-neutral.admonition-todo,.rst-content .wy-alert-neutral.attention,.rst-content .wy-alert-neutral.caution,.rst-content .wy-alert-neutral.danger,.rst-content .wy-alert-neutral.error,.rst-content .wy-alert-neutral.hint,.rst-content .wy-alert-neutral.important,.rst-content .wy-alert-neutral.note,.rst-content .wy-alert-neutral.seealso,.rst-content .wy-alert-neutral.tip,.rst-content .wy-alert-neutral.warning,.wy-alert.wy-alert-neutral{background:#f3f6f6}.rst-content .wy-alert-neutral.admonition-todo .admonition-title,.rst-content .wy-alert-neutral.admonition-todo .wy-alert-title,.rst-content .wy-alert-neutral.admonition .admonition-title,.rst-content .wy-alert-neutral.admonition .wy-alert-title,.rst-content .wy-alert-neutral.attention .admonition-title,.rst-content .wy-alert-neutral.attention .wy-alert-title,.rst-content .wy-alert-neutral.caution .admonition-title,.rst-content .wy-alert-neutral.caution .wy-alert-title,.rst-content .wy-alert-neutral.danger .admonition-title,.rst-content .wy-alert-neutral.danger .wy-alert-title,.rst-content .wy-alert-neutral.error .admonition-title,.rst-content .wy-alert-neutral.error .wy-alert-title,.rst-content .wy-alert-neutral.hint .admonition-title,.rst-content .wy-alert-neutral.hint .wy-alert-title,.rst-content .wy-alert-neutral.important .admonition-title,.rst-content .wy-alert-neutral.important .wy-alert-title,.rst-content .wy-alert-neutral.note .admonition-title,.rst-content .wy-alert-neutral.note .wy-alert-title,.rst-content .wy-alert-neutral.seealso .admonition-title,.rst-content .wy-alert-neutral.seealso .wy-alert-title,.rst-content .wy-alert-neutral.tip .admonition-title,.rst-content .wy-alert-neutral.tip .wy-alert-title,.rst-content .wy-alert-neutral.warning .admonition-title,.rst-content .wy-alert-neutral.warning .wy-alert-title,.rst-content .wy-alert.wy-alert-neutral .admonition-title,.wy-alert.wy-alert-neutral .rst-content .admonition-title,.wy-alert.wy-alert-neutral .wy-alert-title{color:#404040;background:#e1e4e5}.rst-content .wy-alert-neutral.admonition-todo a,.rst-content .wy-alert-neutral.admonition a,.rst-content .wy-alert-neutral.attention a,.rst-content .wy-alert-neutral.caution a,.rst-content .wy-alert-neutral.danger a,.rst-content .wy-alert-neutral.error a,.rst-content .wy-alert-neutral.hint a,.rst-content .wy-alert-neutral.important a,.rst-content .wy-alert-neutral.note a,.rst-content .wy-alert-neutral.seealso a,.rst-content .wy-alert-neutral.tip a,.rst-content .wy-alert-neutral.warning a,.wy-alert.wy-alert-neutral a{color:#2980b9}.rst-content .admonition-todo p:last-child,.rst-content .admonition p:last-child,.rst-content .attention p:last-child,.rst-content .caution p:last-child,.rst-content .danger p:last-child,.rst-content .error p:last-child,.rst-content .hint p:last-child,.rst-content .important p:last-child,.rst-content .note p:last-child,.rst-content .seealso p:last-child,.rst-content .tip p:last-child,.rst-content .warning p:last-child,.wy-alert p:last-child{margin-bottom:0}.wy-tray-container{position:fixed;bottom:0;left:0;z-index:600}.wy-tray-container li{display:block;width:300px;background:transparent;color:#fff;text-align:center;box-shadow:0 5px 5px 0 rgba(0,0,0,.1);padding:0 24px;min-width:20%;opacity:0;height:0;line-height:56px;overflow:hidden;-webkit-transition:all .3s ease-in;-moz-transition:all .3s ease-in;transition:all .3s ease-in}.wy-tray-container li.wy-tray-item-success{background:#27ae60}.wy-tray-container li.wy-tray-item-info{background:#2980b9}.wy-tray-container li.wy-tray-item-warning{background:#e67e22}.wy-tray-container li.wy-tray-item-danger{background:#e74c3c}.wy-tray-container li.on{opacity:1;height:56px}@media screen and (max-width:768px){.wy-tray-container{bottom:auto;top:0;width:100%}.wy-tray-container li{width:100%}}button{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle;cursor:pointer;line-height:normal;-webkit-appearance:button;*overflow:visible}button::-moz-focus-inner,input::-moz-focus-inner{border:0;padding:0}button[disabled]{cursor:default}.btn{display:inline-block;border-radius:2px;line-height:normal;white-space:nowrap;text-align:center;cursor:pointer;font-size:100%;padding:6px 12px 8px;color:#fff;border:1px solid rgba(0,0,0,.1);background-color:#27ae60;text-decoration:none;font-weight:400;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 2px -1px hsla(0,0%,100%,.5),inset 0 -2px 0 0 rgba(0,0,0,.1);outline-none:false;vertical-align:middle;*display:inline;zoom:1;-webkit-user-drag:none;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;-webkit-transition:all .1s linear;-moz-transition:all .1s linear;transition:all .1s linear}.btn-hover{background:#2e8ece;color:#fff}.btn:hover{background:#2cc36b;color:#fff}.btn:focus{background:#2cc36b;outline:0}.btn:active{box-shadow:inset 0 -1px 0 0 rgba(0,0,0,.05),inset 0 2px 0 0 rgba(0,0,0,.1);padding:8px 12px 6px}.btn:visited{color:#fff}.btn-disabled,.btn-disabled:active,.btn-disabled:focus,.btn-disabled:hover,.btn:disabled{background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled = false);filter:alpha(opacity=40);opacity:.4;cursor:not-allowed;box-shadow:none}.btn::-moz-focus-inner{padding:0;border:0}.btn-small{font-size:80%}.btn-info{background-color:#2980b9!important}.btn-info:hover{background-color:#2e8ece!important}.btn-neutral{background-color:#f3f6f6!important;color:#404040!important}.btn-neutral:hover{background-color:#e5ebeb!important;color:#404040}.btn-neutral:visited{color:#404040!important}.btn-success{background-color:#27ae60!important}.btn-success:hover{background-color:#295!important}.btn-danger{background-color:#e74c3c!important}.btn-danger:hover{background-color:#ea6153!important}.btn-warning{background-color:#e67e22!important}.btn-warning:hover{background-color:#e98b39!important}.btn-invert{background-color:#222}.btn-invert:hover{background-color:#2f2f2f!important}.btn-link{background-color:transparent!important;color:#2980b9;box-shadow:none;border-color:transparent!important}.btn-link:active,.btn-link:hover{background-color:transparent!important;color:#409ad5!important;box-shadow:none}.btn-link:visited{color:#9b59b6}.wy-btn-group .btn,.wy-control .btn{vertical-align:middle}.wy-btn-group{margin-bottom:24px;*zoom:1}.wy-btn-group:after,.wy-btn-group:before{display:table;content:""}.wy-btn-group:after{clear:both}.wy-dropdown{position:relative;display:inline-block}.wy-dropdown-active .wy-dropdown-menu{display:block}.wy-dropdown-menu{position:absolute;left:0;display:none;float:left;top:100%;min-width:100%;background:#fcfcfc;z-index:100;border:1px solid #cfd7dd;box-shadow:0 2px 2px 0 rgba(0,0,0,.1);padding:12px}.wy-dropdown-menu>dd>a{display:block;clear:both;color:#404040;white-space:nowrap;font-size:90%;padding:0 12px;cursor:pointer}.wy-dropdown-menu>dd>a:hover{background:#2980b9;color:#fff}.wy-dropdown-menu>dd.divider{border-top:1px solid #cfd7dd;margin:6px 0}.wy-dropdown-menu>dd.search{padding-bottom:12px}.wy-dropdown-menu>dd.search input[type=search]{width:100%}.wy-dropdown-menu>dd.call-to-action{background:#e3e3e3;text-transform:uppercase;font-weight:500;font-size:80%}.wy-dropdown-menu>dd.call-to-action:hover{background:#e3e3e3}.wy-dropdown-menu>dd.call-to-action .btn{color:#fff}.wy-dropdown.wy-dropdown-up .wy-dropdown-menu{bottom:100%;top:auto;left:auto;right:0}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu{background:#fcfcfc;margin-top:2px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a{padding:6px 12px}.wy-dropdown.wy-dropdown-bubble .wy-dropdown-menu a:hover{background:#2980b9;color:#fff}.wy-dropdown.wy-dropdown-left .wy-dropdown-menu{right:0;left:auto;text-align:right}.wy-dropdown-arrow:before{content:" ";border-bottom:5px solid #f5f5f5;border-left:5px solid transparent;border-right:5px solid transparent;position:absolute;display:block;top:-4px;left:50%;margin-left:-3px}.wy-dropdown-arrow.wy-dropdown-arrow-left:before{left:11px}.wy-form-stacked select{display:block}.wy-form-aligned .wy-help-inline,.wy-form-aligned input,.wy-form-aligned label,.wy-form-aligned select,.wy-form-aligned textarea{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-form-aligned .wy-control-group>label{display:inline-block;vertical-align:middle;width:10em;margin:6px 12px 0 0;float:left}.wy-form-aligned .wy-control{float:left}.wy-form-aligned .wy-control label{display:block}.wy-form-aligned .wy-control select{margin-top:6px}fieldset{margin:0}fieldset,legend{border:0;padding:0}legend{width:100%;white-space:normal;margin-bottom:24px;font-size:150%;*margin-left:-7px}label,legend{display:block}label{margin:0 0 .3125em;color:#333;font-size:90%}input,select,textarea{font-size:100%;margin:0;vertical-align:baseline;*vertical-align:middle}.wy-control-group{margin-bottom:24px;max-width:1200px;margin-left:auto;margin-right:auto;*zoom:1}.wy-control-group:after,.wy-control-group:before{display:table;content:""}.wy-control-group:after{clear:both}.wy-control-group.wy-control-group-required>label:after{content:" *";color:#e74c3c}.wy-control-group .wy-form-full,.wy-control-group .wy-form-halves,.wy-control-group .wy-form-thirds{padding-bottom:12px}.wy-control-group .wy-form-full input[type=color],.wy-control-group .wy-form-full input[type=date],.wy-control-group .wy-form-full input[type=datetime-local],.wy-control-group .wy-form-full input[type=datetime],.wy-control-group .wy-form-full input[type=email],.wy-control-group .wy-form-full input[type=month],.wy-control-group .wy-form-full input[type=number],.wy-control-group .wy-form-full input[type=password],.wy-control-group .wy-form-full input[type=search],.wy-control-group .wy-form-full input[type=tel],.wy-control-group .wy-form-full input[type=text],.wy-control-group .wy-form-full input[type=time],.wy-control-group .wy-form-full input[type=url],.wy-control-group .wy-form-full input[type=week],.wy-control-group .wy-form-full select,.wy-control-group .wy-form-halves input[type=color],.wy-control-group .wy-form-halves input[type=date],.wy-control-group .wy-form-halves input[type=datetime-local],.wy-control-group .wy-form-halves input[type=datetime],.wy-control-group .wy-form-halves input[type=email],.wy-control-group .wy-form-halves input[type=month],.wy-control-group .wy-form-halves input[type=number],.wy-control-group .wy-form-halves input[type=password],.wy-control-group .wy-form-halves input[type=search],.wy-control-group .wy-form-halves input[type=tel],.wy-control-group .wy-form-halves input[type=text],.wy-control-group .wy-form-halves input[type=time],.wy-control-group .wy-form-halves input[type=url],.wy-control-group .wy-form-halves input[type=week],.wy-control-group .wy-form-halves select,.wy-control-group .wy-form-thirds input[type=color],.wy-control-group .wy-form-thirds input[type=date],.wy-control-group .wy-form-thirds input[type=datetime-local],.wy-control-group .wy-form-thirds input[type=datetime],.wy-control-group .wy-form-thirds input[type=email],.wy-control-group .wy-form-thirds input[type=month],.wy-control-group .wy-form-thirds input[type=number],.wy-control-group .wy-form-thirds input[type=password],.wy-control-group .wy-form-thirds input[type=search],.wy-control-group .wy-form-thirds input[type=tel],.wy-control-group .wy-form-thirds input[type=text],.wy-control-group .wy-form-thirds input[type=time],.wy-control-group .wy-form-thirds input[type=url],.wy-control-group .wy-form-thirds input[type=week],.wy-control-group .wy-form-thirds select{width:100%}.wy-control-group .wy-form-full{float:left;display:block;width:100%;margin-right:0}.wy-control-group .wy-form-full:last-child{margin-right:0}.wy-control-group .wy-form-halves{float:left;display:block;margin-right:2.35765%;width:48.82117%}.wy-control-group .wy-form-halves:last-child,.wy-control-group .wy-form-halves:nth-of-type(2n){margin-right:0}.wy-control-group .wy-form-halves:nth-of-type(odd){clear:left}.wy-control-group .wy-form-thirds{float:left;display:block;margin-right:2.35765%;width:31.76157%}.wy-control-group .wy-form-thirds:last-child,.wy-control-group .wy-form-thirds:nth-of-type(3n){margin-right:0}.wy-control-group .wy-form-thirds:nth-of-type(3n+1){clear:left}.wy-control-group.wy-control-group-no-input .wy-control,.wy-control-no-input{margin:6px 0 0;font-size:90%}.wy-control-no-input{display:inline-block}.wy-control-group.fluid-input input[type=color],.wy-control-group.fluid-input input[type=date],.wy-control-group.fluid-input input[type=datetime-local],.wy-control-group.fluid-input input[type=datetime],.wy-control-group.fluid-input input[type=email],.wy-control-group.fluid-input input[type=month],.wy-control-group.fluid-input input[type=number],.wy-control-group.fluid-input input[type=password],.wy-control-group.fluid-input input[type=search],.wy-control-group.fluid-input input[type=tel],.wy-control-group.fluid-input input[type=text],.wy-control-group.fluid-input input[type=time],.wy-control-group.fluid-input input[type=url],.wy-control-group.fluid-input input[type=week]{width:100%}.wy-form-message-inline{padding-left:.3em;color:#666;font-size:90%}.wy-form-message{display:block;color:#999;font-size:70%;margin-top:.3125em;font-style:italic}.wy-form-message p{font-size:inherit;font-style:italic;margin-bottom:6px}.wy-form-message p:last-child{margin-bottom:0}input{line-height:normal}input[type=button],input[type=reset],input[type=submit]{-webkit-appearance:button;cursor:pointer;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;*overflow:visible}input[type=color],input[type=date],input[type=datetime-local],input[type=datetime],input[type=email],input[type=month],input[type=number],input[type=password],input[type=search],input[type=tel],input[type=text],input[type=time],input[type=url],input[type=week]{-webkit-appearance:none;padding:6px;display:inline-block;border:1px solid #ccc;font-size:80%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;box-shadow:inset 0 1px 3px #ddd;border-radius:0;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}input[type=datetime-local]{padding:.34375em .625em}input[disabled]{cursor:default}input[type=checkbox],input[type=radio]{padding:0;margin-right:.3125em;*height:13px;*width:13px}input[type=checkbox],input[type=radio],input[type=search]{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}input[type=search]::-webkit-search-cancel-button,input[type=search]::-webkit-search-decoration{-webkit-appearance:none}input[type=color]:focus,input[type=date]:focus,input[type=datetime-local]:focus,input[type=datetime]:focus,input[type=email]:focus,input[type=month]:focus,input[type=number]:focus,input[type=password]:focus,input[type=search]:focus,input[type=tel]:focus,input[type=text]:focus,input[type=time]:focus,input[type=url]:focus,input[type=week]:focus{outline:0;outline:thin dotted\9;border-color:#333}input.no-focus:focus{border-color:#ccc!important}input[type=checkbox]:focus,input[type=file]:focus,input[type=radio]:focus{outline:thin dotted #333;outline:1px auto #129fea}input[type=color][disabled],input[type=date][disabled],input[type=datetime-local][disabled],input[type=datetime][disabled],input[type=email][disabled],input[type=month][disabled],input[type=number][disabled],input[type=password][disabled],input[type=search][disabled],input[type=tel][disabled],input[type=text][disabled],input[type=time][disabled],input[type=url][disabled],input[type=week][disabled]{cursor:not-allowed;background-color:#fafafa}input:focus:invalid,select:focus:invalid,textarea:focus:invalid{color:#e74c3c;border:1px solid #e74c3c}input:focus:invalid:focus,select:focus:invalid:focus,textarea:focus:invalid:focus{border-color:#e74c3c}input[type=checkbox]:focus:invalid:focus,input[type=file]:focus:invalid:focus,input[type=radio]:focus:invalid:focus{outline-color:#e74c3c}input.wy-input-large{padding:12px;font-size:100%}textarea{overflow:auto;vertical-align:top;width:100%;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif}select,textarea{padding:.5em .625em;display:inline-block;border:1px solid #ccc;font-size:80%;box-shadow:inset 0 1px 3px #ddd;-webkit-transition:border .3s linear;-moz-transition:border .3s linear;transition:border .3s linear}select{border:1px solid #ccc;background-color:#fff}select[multiple]{height:auto}select:focus,textarea:focus{outline:0}input[readonly],select[disabled],select[readonly],textarea[disabled],textarea[readonly]{cursor:not-allowed;background-color:#fafafa}input[type=checkbox][disabled],input[type=radio][disabled]{cursor:not-allowed}.wy-checkbox,.wy-radio{margin:6px 0;color:#404040;display:block}.wy-checkbox input,.wy-radio input{vertical-align:baseline}.wy-form-message-inline{display:inline-block;*display:inline;*zoom:1;vertical-align:middle}.wy-input-prefix,.wy-input-suffix{white-space:nowrap;padding:6px}.wy-input-prefix .wy-input-context,.wy-input-suffix .wy-input-context{line-height:27px;padding:0 8px;display:inline-block;font-size:80%;background-color:#f3f6f6;border:1px solid #ccc;color:#999}.wy-input-suffix .wy-input-context{border-left:0}.wy-input-prefix .wy-input-context{border-right:0}.wy-switch{position:relative;display:block;height:24px;margin-top:12px;cursor:pointer}.wy-switch:before{left:0;top:0;width:36px;height:12px;background:#ccc}.wy-switch:after,.wy-switch:before{position:absolute;content:"";display:block;border-radius:4px;-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;transition:all .2s ease-in-out}.wy-switch:after{width:18px;height:18px;background:#999;left:-3px;top:-3px}.wy-switch span{position:absolute;left:48px;display:block;font-size:12px;color:#ccc;line-height:1}.wy-switch.active:before{background:#1e8449}.wy-switch.active:after{left:24px;background:#27ae60}.wy-switch.disabled{cursor:not-allowed;opacity:.8}.wy-control-group.wy-control-group-error .wy-form-message,.wy-control-group.wy-control-group-error>label{color:#e74c3c}.wy-control-group.wy-control-group-error input[type=color],.wy-control-group.wy-control-group-error input[type=date],.wy-control-group.wy-control-group-error input[type=datetime-local],.wy-control-group.wy-control-group-error input[type=datetime],.wy-control-group.wy-control-group-error input[type=email],.wy-control-group.wy-control-group-error input[type=month],.wy-control-group.wy-control-group-error input[type=number],.wy-control-group.wy-control-group-error input[type=password],.wy-control-group.wy-control-group-error input[type=search],.wy-control-group.wy-control-group-error input[type=tel],.wy-control-group.wy-control-group-error input[type=text],.wy-control-group.wy-control-group-error input[type=time],.wy-control-group.wy-control-group-error input[type=url],.wy-control-group.wy-control-group-error input[type=week],.wy-control-group.wy-control-group-error textarea{border:1px solid #e74c3c}.wy-inline-validate{white-space:nowrap}.wy-inline-validate .wy-input-context{padding:.5em .625em;display:inline-block;font-size:80%}.wy-inline-validate.wy-inline-validate-success .wy-input-context{color:#27ae60}.wy-inline-validate.wy-inline-validate-danger .wy-input-context{color:#e74c3c}.wy-inline-validate.wy-inline-validate-warning .wy-input-context{color:#e67e22}.wy-inline-validate.wy-inline-validate-info .wy-input-context{color:#2980b9}.rotate-90{-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.rotate-180{-webkit-transform:rotate(180deg);-moz-transform:rotate(180deg);-ms-transform:rotate(180deg);-o-transform:rotate(180deg);transform:rotate(180deg)}.rotate-270{-webkit-transform:rotate(270deg);-moz-transform:rotate(270deg);-ms-transform:rotate(270deg);-o-transform:rotate(270deg);transform:rotate(270deg)}.mirror{-webkit-transform:scaleX(-1);-moz-transform:scaleX(-1);-ms-transform:scaleX(-1);-o-transform:scaleX(-1);transform:scaleX(-1)}.mirror.rotate-90{-webkit-transform:scaleX(-1) rotate(90deg);-moz-transform:scaleX(-1) rotate(90deg);-ms-transform:scaleX(-1) rotate(90deg);-o-transform:scaleX(-1) rotate(90deg);transform:scaleX(-1) rotate(90deg)}.mirror.rotate-180{-webkit-transform:scaleX(-1) rotate(180deg);-moz-transform:scaleX(-1) rotate(180deg);-ms-transform:scaleX(-1) rotate(180deg);-o-transform:scaleX(-1) rotate(180deg);transform:scaleX(-1) rotate(180deg)}.mirror.rotate-270{-webkit-transform:scaleX(-1) rotate(270deg);-moz-transform:scaleX(-1) rotate(270deg);-ms-transform:scaleX(-1) rotate(270deg);-o-transform:scaleX(-1) rotate(270deg);transform:scaleX(-1) rotate(270deg)}@media only screen and (max-width:480px){.wy-form button[type=submit]{margin:.7em 0 0}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=text],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week],.wy-form label{margin-bottom:.3em;display:block}.wy-form input[type=color],.wy-form input[type=date],.wy-form input[type=datetime-local],.wy-form input[type=datetime],.wy-form input[type=email],.wy-form input[type=month],.wy-form input[type=number],.wy-form input[type=password],.wy-form input[type=search],.wy-form input[type=tel],.wy-form input[type=time],.wy-form input[type=url],.wy-form input[type=week]{margin-bottom:0}.wy-form-aligned .wy-control-group label{margin-bottom:.3em;text-align:left;display:block;width:100%}.wy-form-aligned .wy-control{margin:1.5em 0 0}.wy-form-message,.wy-form-message-inline,.wy-form .wy-help-inline{display:block;font-size:80%;padding:6px 0}}@media screen and (max-width:768px){.tablet-hide{display:none}}@media screen and (max-width:480px){.mobile-hide{display:none}}.float-left{float:left}.float-right{float:right}.full-width{width:100%}.rst-content table.docutils,.rst-content table.field-list,.wy-table{border-collapse:collapse;border-spacing:0;empty-cells:show;margin-bottom:24px}.rst-content table.docutils caption,.rst-content table.field-list caption,.wy-table caption{color:#000;font:italic 85%/1 arial,sans-serif;padding:1em 0;text-align:center}.rst-content table.docutils td,.rst-content table.docutils th,.rst-content table.field-list td,.rst-content table.field-list th,.wy-table td,.wy-table th{font-size:90%;margin:0;overflow:visible;padding:8px 16px}.rst-content table.docutils td:first-child,.rst-content table.docutils th:first-child,.rst-content table.field-list td:first-child,.rst-content table.field-list th:first-child,.wy-table td:first-child,.wy-table th:first-child{border-left-width:0}.rst-content table.docutils thead,.rst-content table.field-list thead,.wy-table thead{color:#000;text-align:left;vertical-align:bottom;white-space:nowrap}.rst-content table.docutils thead th,.rst-content table.field-list thead th,.wy-table thead th{font-weight:700;border-bottom:2px solid #e1e4e5}.rst-content table.docutils td,.rst-content table.field-list td,.wy-table td{background-color:transparent;vertical-align:middle}.rst-content table.docutils td p,.rst-content table.field-list td p,.wy-table td p{line-height:18px}.rst-content table.docutils td p:last-child,.rst-content table.field-list td p:last-child,.wy-table td p:last-child{margin-bottom:0}.rst-content table.docutils .wy-table-cell-min,.rst-content table.field-list .wy-table-cell-min,.wy-table .wy-table-cell-min{width:1%;padding-right:0}.rst-content table.docutils .wy-table-cell-min input[type=checkbox],.rst-content table.field-list .wy-table-cell-min input[type=checkbox],.wy-table .wy-table-cell-min input[type=checkbox]{margin:0}.wy-table-secondary{color:grey;font-size:90%}.wy-table-tertiary{color:grey;font-size:80%}.rst-content table.docutils:not(.field-list) tr:nth-child(2n-1) td,.wy-table-backed,.wy-table-odd td,.wy-table-striped tr:nth-child(2n-1) td{background-color:#f3f6f6}.rst-content table.docutils,.wy-table-bordered-all{border:1px solid #e1e4e5}.rst-content table.docutils td,.wy-table-bordered-all td{border-bottom:1px solid #e1e4e5;border-left:1px solid #e1e4e5}.rst-content table.docutils tbody>tr:last-child td,.wy-table-bordered-all tbody>tr:last-child td{border-bottom-width:0}.wy-table-bordered{border:1px solid #e1e4e5}.wy-table-bordered-rows td{border-bottom:1px solid #e1e4e5}.wy-table-bordered-rows tbody>tr:last-child td{border-bottom-width:0}.wy-table-horizontal td,.wy-table-horizontal th{border-width:0 0 1px;border-bottom:1px solid #e1e4e5}.wy-table-horizontal tbody>tr:last-child td{border-bottom-width:0}.wy-table-responsive{margin-bottom:24px;max-width:100%;overflow:auto}.wy-table-responsive table{margin-bottom:0!important}.wy-table-responsive table td,.wy-table-responsive table th{white-space:nowrap}a{color:#2980b9;text-decoration:none;cursor:pointer}a:hover{color:#3091d1}a:visited{color:#9b59b6}html{height:100%}body,html{overflow-x:hidden}body{font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;font-weight:400;color:#404040;min-height:100%;background:#edf0f2}.wy-text-left{text-align:left}.wy-text-center{text-align:center}.wy-text-right{text-align:right}.wy-text-large{font-size:120%}.wy-text-normal{font-size:100%}.wy-text-small,small{font-size:80%}.wy-text-strike{text-decoration:line-through}.wy-text-warning{color:#e67e22!important}a.wy-text-warning:hover{color:#eb9950!important}.wy-text-info{color:#2980b9!important}a.wy-text-info:hover{color:#409ad5!important}.wy-text-success{color:#27ae60!important}a.wy-text-success:hover{color:#36d278!important}.wy-text-danger{color:#e74c3c!important}a.wy-text-danger:hover{color:#ed7669!important}.wy-text-neutral{color:#404040!important}a.wy-text-neutral:hover{color:#595959!important}.rst-content .toctree-wrapper>p.caption,h1,h2,h3,h4,h5,h6,legend{margin-top:0;font-weight:700;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif}p{line-height:24px;font-size:16px;margin:0 0 24px}h1{font-size:175%}.rst-content .toctree-wrapper>p.caption,h2{font-size:150%}h3{font-size:125%}h4{font-size:115%}h5{font-size:110%}h6{font-size:100%}hr{display:block;height:1px;border:0;border-top:1px solid #e1e4e5;margin:24px 0;padding:0}.rst-content code,.rst-content tt,code{white-space:nowrap;max-width:100%;background:#fff;border:1px solid #e1e4e5;font-size:75%;padding:0 5px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#e74c3c;overflow-x:auto}.rst-content tt.code-large,code.code-large{font-size:90%}.rst-content .section ul,.rst-content .toctree-wrapper ul,.rst-content section ul,.wy-plain-list-disc,article ul{list-style:disc;line-height:24px;margin-bottom:24px}.rst-content .section ul li,.rst-content .toctree-wrapper ul li,.rst-content section ul li,.wy-plain-list-disc li,article ul li{list-style:disc;margin-left:24px}.rst-content .section ul li p:last-child,.rst-content .section ul li ul,.rst-content .toctree-wrapper ul li p:last-child,.rst-content .toctree-wrapper ul li ul,.rst-content section ul li p:last-child,.rst-content section ul li ul,.wy-plain-list-disc li p:last-child,.wy-plain-list-disc li ul,article ul li p:last-child,article ul li ul{margin-bottom:0}.rst-content .section ul li li,.rst-content .toctree-wrapper ul li li,.rst-content section ul li li,.wy-plain-list-disc li li,article ul li li{list-style:circle}.rst-content .section ul li li li,.rst-content .toctree-wrapper ul li li li,.rst-content section ul li li li,.wy-plain-list-disc li li li,article ul li li li{list-style:square}.rst-content .section ul li ol li,.rst-content .toctree-wrapper ul li ol li,.rst-content section ul li ol li,.wy-plain-list-disc li ol li,article ul li ol li{list-style:decimal}.rst-content .section ol,.rst-content .section ol.arabic,.rst-content .toctree-wrapper ol,.rst-content .toctree-wrapper ol.arabic,.rst-content section ol,.rst-content section ol.arabic,.wy-plain-list-decimal,article ol{list-style:decimal;line-height:24px;margin-bottom:24px}.rst-content .section ol.arabic li,.rst-content .section ol li,.rst-content .toctree-wrapper ol.arabic li,.rst-content .toctree-wrapper ol li,.rst-content section ol.arabic li,.rst-content section ol li,.wy-plain-list-decimal li,article ol li{list-style:decimal;margin-left:24px}.rst-content .section ol.arabic li ul,.rst-content .section ol li p:last-child,.rst-content .section ol li ul,.rst-content .toctree-wrapper ol.arabic li ul,.rst-content .toctree-wrapper ol li p:last-child,.rst-content .toctree-wrapper ol li ul,.rst-content section ol.arabic li ul,.rst-content section ol li p:last-child,.rst-content section ol li ul,.wy-plain-list-decimal li p:last-child,.wy-plain-list-decimal li ul,article ol li p:last-child,article ol li ul{margin-bottom:0}.rst-content .section ol.arabic li ul li,.rst-content .section ol li ul li,.rst-content .toctree-wrapper ol.arabic li ul li,.rst-content .toctree-wrapper ol li ul li,.rst-content section ol.arabic li ul li,.rst-content section ol li ul li,.wy-plain-list-decimal li ul li,article ol li ul li{list-style:disc}.wy-breadcrumbs{*zoom:1}.wy-breadcrumbs:after,.wy-breadcrumbs:before{display:table;content:""}.wy-breadcrumbs:after{clear:both}.wy-breadcrumbs>li{display:inline-block;padding-top:5px}.wy-breadcrumbs>li.wy-breadcrumbs-aside{float:right}.rst-content .wy-breadcrumbs>li code,.rst-content .wy-breadcrumbs>li tt,.wy-breadcrumbs>li .rst-content tt,.wy-breadcrumbs>li code{all:inherit;color:inherit}.breadcrumb-item:before{content:"/";color:#bbb;font-size:13px;padding:0 6px 0 3px}.wy-breadcrumbs-extra{margin-bottom:0;color:#b3b3b3;font-size:80%;display:inline-block}@media screen and (max-width:480px){.wy-breadcrumbs-extra,.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}@media print{.wy-breadcrumbs li.wy-breadcrumbs-aside{display:none}}html{font-size:16px}.wy-affix{position:fixed;top:1.618em}.wy-menu a:hover{text-decoration:none}.wy-menu-horiz{*zoom:1}.wy-menu-horiz:after,.wy-menu-horiz:before{display:table;content:""}.wy-menu-horiz:after{clear:both}.wy-menu-horiz li,.wy-menu-horiz ul{display:inline-block}.wy-menu-horiz li:hover{background:hsla(0,0%,100%,.1)}.wy-menu-horiz li.divide-left{border-left:1px solid #404040}.wy-menu-horiz li.divide-right{border-right:1px solid #404040}.wy-menu-horiz a{height:32px;display:inline-block;line-height:32px;padding:0 16px}.wy-menu-vertical{width:300px}.wy-menu-vertical header,.wy-menu-vertical p.caption{color:#55a5d9;height:32px;line-height:32px;padding:0 1.618em;margin:12px 0 0;display:block;font-weight:700;text-transform:uppercase;font-size:85%;white-space:nowrap}.wy-menu-vertical ul{margin-bottom:0}.wy-menu-vertical li.divide-top{border-top:1px solid #404040}.wy-menu-vertical li.divide-bottom{border-bottom:1px solid #404040}.wy-menu-vertical li.current{background:#e3e3e3}.wy-menu-vertical li.current a{color:grey;border-right:1px solid #c9c9c9;padding:.4045em 2.427em}.wy-menu-vertical li.current a:hover{background:#d6d6d6}.rst-content .wy-menu-vertical li tt,.wy-menu-vertical li .rst-content tt,.wy-menu-vertical li code{border:none;background:inherit;color:inherit;padding-left:0;padding-right:0}.wy-menu-vertical li button.toctree-expand{display:block;float:left;margin-left:-1.2em;line-height:18px;color:#4d4d4d;border:none;background:none;padding:0}.wy-menu-vertical li.current>a,.wy-menu-vertical li.on a{color:#404040;font-weight:700;position:relative;background:#fcfcfc;border:none;padding:.4045em 1.618em}.wy-menu-vertical li.current>a:hover,.wy-menu-vertical li.on a:hover{background:#fcfcfc}.wy-menu-vertical li.current>a:hover button.toctree-expand,.wy-menu-vertical li.on a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.current>a button.toctree-expand,.wy-menu-vertical li.on a button.toctree-expand{display:block;line-height:18px;color:#333}.wy-menu-vertical li.toctree-l1.current>a{border-bottom:1px solid #c9c9c9;border-top:1px solid #c9c9c9}.wy-menu-vertical .toctree-l1.current .toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .toctree-l11>ul{display:none}.wy-menu-vertical .toctree-l1.current .current.toctree-l2>ul,.wy-menu-vertical .toctree-l2.current .current.toctree-l3>ul,.wy-menu-vertical .toctree-l3.current .current.toctree-l4>ul,.wy-menu-vertical .toctree-l4.current .current.toctree-l5>ul,.wy-menu-vertical .toctree-l5.current .current.toctree-l6>ul,.wy-menu-vertical .toctree-l6.current .current.toctree-l7>ul,.wy-menu-vertical .toctree-l7.current .current.toctree-l8>ul,.wy-menu-vertical .toctree-l8.current .current.toctree-l9>ul,.wy-menu-vertical .toctree-l9.current .current.toctree-l10>ul,.wy-menu-vertical .toctree-l10.current .current.toctree-l11>ul{display:block}.wy-menu-vertical li.toctree-l3,.wy-menu-vertical li.toctree-l4{font-size:.9em}.wy-menu-vertical li.toctree-l2 a,.wy-menu-vertical li.toctree-l3 a,.wy-menu-vertical li.toctree-l4 a,.wy-menu-vertical li.toctree-l5 a,.wy-menu-vertical li.toctree-l6 a,.wy-menu-vertical li.toctree-l7 a,.wy-menu-vertical li.toctree-l8 a,.wy-menu-vertical li.toctree-l9 a,.wy-menu-vertical li.toctree-l10 a{color:#404040}.wy-menu-vertical li.toctree-l2 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l3 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l4 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l5 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l6 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l7 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l8 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l9 a:hover button.toctree-expand,.wy-menu-vertical li.toctree-l10 a:hover button.toctree-expand{color:grey}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a,.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a,.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a,.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a,.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a,.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a,.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a,.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{display:block}.wy-menu-vertical li.toctree-l2.current>a{padding:.4045em 2.427em}.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{padding:.4045em 1.618em .4045em 4.045em}.wy-menu-vertical li.toctree-l3.current>a{padding:.4045em 4.045em}.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{padding:.4045em 1.618em .4045em 5.663em}.wy-menu-vertical li.toctree-l4.current>a{padding:.4045em 5.663em}.wy-menu-vertical li.toctree-l4.current li.toctree-l5>a{padding:.4045em 1.618em .4045em 7.281em}.wy-menu-vertical li.toctree-l5.current>a{padding:.4045em 7.281em}.wy-menu-vertical li.toctree-l5.current li.toctree-l6>a{padding:.4045em 1.618em .4045em 8.899em}.wy-menu-vertical li.toctree-l6.current>a{padding:.4045em 8.899em}.wy-menu-vertical li.toctree-l6.current li.toctree-l7>a{padding:.4045em 1.618em .4045em 10.517em}.wy-menu-vertical li.toctree-l7.current>a{padding:.4045em 10.517em}.wy-menu-vertical li.toctree-l7.current li.toctree-l8>a{padding:.4045em 1.618em .4045em 12.135em}.wy-menu-vertical li.toctree-l8.current>a{padding:.4045em 12.135em}.wy-menu-vertical li.toctree-l8.current li.toctree-l9>a{padding:.4045em 1.618em .4045em 13.753em}.wy-menu-vertical li.toctree-l9.current>a{padding:.4045em 13.753em}.wy-menu-vertical li.toctree-l9.current li.toctree-l10>a{padding:.4045em 1.618em .4045em 15.371em}.wy-menu-vertical li.toctree-l10.current>a{padding:.4045em 15.371em}.wy-menu-vertical li.toctree-l10.current li.toctree-l11>a{padding:.4045em 1.618em .4045em 16.989em}.wy-menu-vertical li.toctree-l2.current>a,.wy-menu-vertical li.toctree-l2.current li.toctree-l3>a{background:#c9c9c9}.wy-menu-vertical li.toctree-l2 button.toctree-expand{color:#a3a3a3}.wy-menu-vertical li.toctree-l3.current>a,.wy-menu-vertical li.toctree-l3.current li.toctree-l4>a{background:#bdbdbd}.wy-menu-vertical li.toctree-l3 button.toctree-expand{color:#969696}.wy-menu-vertical li.current ul{display:block}.wy-menu-vertical li ul{margin-bottom:0;display:none}.wy-menu-vertical li ul li a{margin-bottom:0;color:#d9d9d9;font-weight:400}.wy-menu-vertical a{line-height:18px;padding:.4045em 1.618em;display:block;position:relative;font-size:90%;color:#d9d9d9}.wy-menu-vertical a:hover{background-color:#4e4a4a;cursor:pointer}.wy-menu-vertical a:hover button.toctree-expand{color:#d9d9d9}.wy-menu-vertical a:active{background-color:#2980b9;cursor:pointer;color:#fff}.wy-menu-vertical a:active button.toctree-expand{color:#fff}.wy-side-nav-search{display:block;width:300px;padding:.809em;margin-bottom:.809em;z-index:200;background-color:#2980b9;text-align:center;color:#fcfcfc}.wy-side-nav-search input[type=text]{width:100%;border-radius:50px;padding:6px 12px;border-color:#2472a4}.wy-side-nav-search img{display:block;margin:auto auto .809em;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-side-nav-search .wy-dropdown>a,.wy-side-nav-search>a{color:#fcfcfc;font-size:100%;font-weight:700;display:inline-block;padding:4px 6px;margin-bottom:.809em;max-width:100%}.wy-side-nav-search .wy-dropdown>a:hover,.wy-side-nav-search>a:hover{background:hsla(0,0%,100%,.1)}.wy-side-nav-search .wy-dropdown>a img.logo,.wy-side-nav-search>a img.logo{display:block;margin:0 auto;height:auto;width:auto;border-radius:0;max-width:100%;background:transparent}.wy-side-nav-search .wy-dropdown>a.icon img.logo,.wy-side-nav-search>a.icon img.logo{margin-top:.85em}.wy-side-nav-search>div.version{margin-top:-.4045em;margin-bottom:.809em;font-weight:400;color:hsla(0,0%,100%,.3)}.wy-nav .wy-menu-vertical header{color:#2980b9}.wy-nav .wy-menu-vertical a{color:#b3b3b3}.wy-nav .wy-menu-vertical a:hover{background-color:#2980b9;color:#fff}[data-menu-wrap]{-webkit-transition:all .2s ease-in;-moz-transition:all .2s ease-in;transition:all .2s ease-in;position:absolute;opacity:1;width:100%;opacity:0}[data-menu-wrap].move-center{left:0;right:auto;opacity:1}[data-menu-wrap].move-left{right:auto;left:-100%;opacity:0}[data-menu-wrap].move-right{right:-100%;left:auto;opacity:0}.wy-body-for-nav{background:#fcfcfc}.wy-grid-for-nav{position:absolute;width:100%;height:100%}.wy-nav-side{position:fixed;top:0;bottom:0;left:0;padding-bottom:2em;width:300px;overflow-x:hidden;overflow-y:hidden;min-height:100%;color:#9b9b9b;background:#343131;z-index:200}.wy-side-scroll{width:320px;position:relative;overflow-x:hidden;overflow-y:scroll;height:100%}.wy-nav-top{display:none;background:#2980b9;color:#fff;padding:.4045em .809em;position:relative;line-height:50px;text-align:center;font-size:100%;*zoom:1}.wy-nav-top:after,.wy-nav-top:before{display:table;content:""}.wy-nav-top:after{clear:both}.wy-nav-top a{color:#fff;font-weight:700}.wy-nav-top img{margin-right:12px;height:45px;width:45px;background-color:#2980b9;padding:5px;border-radius:100%}.wy-nav-top i{font-size:30px;float:left;cursor:pointer;padding-top:inherit}.wy-nav-content-wrap{margin-left:300px;background:#fcfcfc;min-height:100%}.wy-nav-content{padding:1.618em 3.236em;height:100%;max-width:800px;margin:auto}.wy-body-mask{position:fixed;width:100%;height:100%;background:rgba(0,0,0,.2);display:none;z-index:499}.wy-body-mask.on{display:block}footer{color:grey}footer p{margin-bottom:12px}.rst-content footer span.commit tt,footer span.commit .rst-content tt,footer span.commit code{padding:0;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:1em;background:none;border:none;color:grey}.rst-footer-buttons{*zoom:1}.rst-footer-buttons:after,.rst-footer-buttons:before{width:100%;display:table;content:""}.rst-footer-buttons:after{clear:both}.rst-breadcrumbs-buttons{margin-top:12px;*zoom:1}.rst-breadcrumbs-buttons:after,.rst-breadcrumbs-buttons:before{display:table;content:""}.rst-breadcrumbs-buttons:after{clear:both}#search-results .search li{margin-bottom:24px;border-bottom:1px solid #e1e4e5;padding-bottom:24px}#search-results .search li:first-child{border-top:1px solid #e1e4e5;padding-top:24px}#search-results .search li a{font-size:120%;margin-bottom:12px;display:inline-block}#search-results .context{color:grey;font-size:90%}.genindextable li>ul{margin-left:24px}@media screen and (max-width:768px){.wy-body-for-nav{background:#fcfcfc}.wy-nav-top{display:block}.wy-nav-side{left:-300px}.wy-nav-side.shift{width:85%;left:0}.wy-menu.wy-menu-vertical,.wy-side-nav-search,.wy-side-scroll{width:auto}.wy-nav-content-wrap{margin-left:0}.wy-nav-content-wrap .wy-nav-content{padding:1.618em}.wy-nav-content-wrap.shift{position:fixed;min-width:100%;left:85%;top:0;height:100%;overflow:hidden}}@media screen and (min-width:1100px){.wy-nav-content-wrap{background:rgba(0,0,0,.05)}.wy-nav-content{margin:0;background:#fcfcfc}}@media print{.rst-versions,.wy-nav-side,footer{display:none}.wy-nav-content-wrap{margin-left:0}}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:Lato,proxima-nova,Helvetica Neue,Arial,sans-serif;z-index:400}.rst-versions a{color:#2980b9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27ae60;*zoom:1}.rst-versions .rst-current-version:after,.rst-versions .rst-current-version:before{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-content .code-block-caption .rst-versions .rst-current-version .headerlink,.rst-content .eqno .rst-versions .rst-current-version .headerlink,.rst-content .rst-versions .rst-current-version .admonition-title,.rst-content code.download .rst-versions .rst-current-version span:first-child,.rst-content dl dt .rst-versions .rst-current-version .headerlink,.rst-content h1 .rst-versions .rst-current-version .headerlink,.rst-content h2 .rst-versions .rst-current-version .headerlink,.rst-content h3 .rst-versions .rst-current-version .headerlink,.rst-content h4 .rst-versions .rst-current-version .headerlink,.rst-content h5 .rst-versions .rst-current-version .headerlink,.rst-content h6 .rst-versions .rst-current-version .headerlink,.rst-content p .rst-versions .rst-current-version .headerlink,.rst-content table>caption .rst-versions .rst-current-version .headerlink,.rst-content tt.download .rst-versions .rst-current-version span:first-child,.rst-versions .rst-current-version .fa,.rst-versions .rst-current-version .icon,.rst-versions .rst-current-version .rst-content .admonition-title,.rst-versions .rst-current-version .rst-content .code-block-caption .headerlink,.rst-versions .rst-current-version .rst-content .eqno .headerlink,.rst-versions .rst-current-version .rst-content code.download span:first-child,.rst-versions .rst-current-version .rst-content dl dt .headerlink,.rst-versions .rst-current-version .rst-content h1 .headerlink,.rst-versions .rst-current-version .rst-content h2 .headerlink,.rst-versions .rst-current-version .rst-content h3 .headerlink,.rst-versions .rst-current-version .rst-content h4 .headerlink,.rst-versions .rst-current-version .rst-content h5 .headerlink,.rst-versions .rst-current-version .rst-content h6 .headerlink,.rst-versions .rst-current-version .rst-content p .headerlink,.rst-versions .rst-current-version .rst-content table>caption .headerlink,.rst-versions .rst-current-version .rst-content tt.download span:first-child,.rst-versions .rst-current-version .wy-menu-vertical li button.toctree-expand,.wy-menu-vertical li .rst-versions .rst-current-version button.toctree-expand{color:#fcfcfc}.rst-versions .rst-current-version .fa-book,.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#e74c3c;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#f1c40f;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:grey;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:1px solid #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .fa-book,.rst-versions.rst-badge .icon-book{float:none;line-height:30px}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book,.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge>.rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width:768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}}.rst-content .toctree-wrapper>p.caption,.rst-content h1,.rst-content h2,.rst-content h3,.rst-content h4,.rst-content h5,.rst-content h6{margin-bottom:24px}.rst-content img{max-width:100%;height:auto}.rst-content div.figure,.rst-content figure{margin-bottom:24px}.rst-content div.figure .caption-text,.rst-content figure .caption-text{font-style:italic}.rst-content div.figure p:last-child.caption,.rst-content figure p:last-child.caption{margin-bottom:0}.rst-content div.figure.align-center,.rst-content figure.align-center{text-align:center}.rst-content .section>a>img,.rst-content .section>img,.rst-content section>a>img,.rst-content section>img{margin-bottom:24px}.rst-content abbr[title]{text-decoration:none}.rst-content.style-external-links a.reference.external:after{font-family:FontAwesome;content:"\f08e";color:#b3b3b3;vertical-align:super;font-size:60%;margin:0 .2em}.rst-content blockquote{margin-left:24px;line-height:24px;margin-bottom:24px}.rst-content pre.literal-block{white-space:pre;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;display:block;overflow:auto}.rst-content div[class^=highlight],.rst-content pre.literal-block{border:1px solid #e1e4e5;overflow-x:auto;margin:1px 0 24px}.rst-content div[class^=highlight] div[class^=highlight],.rst-content pre.literal-block div[class^=highlight]{padding:0;border:none;margin:0}.rst-content div[class^=highlight] td.code{width:100%}.rst-content .linenodiv pre{border-right:1px solid #e6e9ea;margin:0;padding:12px;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;user-select:none;pointer-events:none}.rst-content div[class^=highlight] pre{white-space:pre;margin:0;padding:12px;display:block;overflow:auto}.rst-content div[class^=highlight] pre .hll{display:block;margin:0 -12px;padding:0 12px}.rst-content .linenodiv pre,.rst-content div[class^=highlight] pre,.rst-content pre.literal-block{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;font-size:12px;line-height:1.4}.rst-content div.highlight .gp,.rst-content div.highlight span.linenos{user-select:none;pointer-events:none}.rst-content div.highlight span.linenos{display:inline-block;padding-left:0;padding-right:12px;margin-right:12px;border-right:1px solid #e6e9ea}.rst-content .code-block-caption{font-style:italic;font-size:85%;line-height:1;padding:1em 0;text-align:center}@media print{.rst-content .codeblock,.rst-content div[class^=highlight],.rst-content div[class^=highlight] pre{white-space:pre-wrap}}.rst-content .admonition,.rst-content .admonition-todo,.rst-content .attention,.rst-content .caution,.rst-content .danger,.rst-content .error,.rst-content .hint,.rst-content .important,.rst-content .note,.rst-content .seealso,.rst-content .tip,.rst-content .warning{clear:both}.rst-content .admonition-todo .last,.rst-content .admonition-todo>:last-child,.rst-content .admonition .last,.rst-content .admonition>:last-child,.rst-content .attention .last,.rst-content .attention>:last-child,.rst-content .caution .last,.rst-content .caution>:last-child,.rst-content .danger .last,.rst-content .danger>:last-child,.rst-content .error .last,.rst-content .error>:last-child,.rst-content .hint .last,.rst-content .hint>:last-child,.rst-content .important .last,.rst-content .important>:last-child,.rst-content .note .last,.rst-content .note>:last-child,.rst-content .seealso .last,.rst-content .seealso>:last-child,.rst-content .tip .last,.rst-content .tip>:last-child,.rst-content .warning .last,.rst-content .warning>:last-child{margin-bottom:0}.rst-content .admonition-title:before{margin-right:4px}.rst-content .admonition table{border-color:rgba(0,0,0,.1)}.rst-content .admonition table td,.rst-content .admonition table th{background:transparent!important;border-color:rgba(0,0,0,.1)!important}.rst-content .section ol.loweralpha,.rst-content .section ol.loweralpha>li,.rst-content .toctree-wrapper ol.loweralpha,.rst-content .toctree-wrapper ol.loweralpha>li,.rst-content section ol.loweralpha,.rst-content section ol.loweralpha>li{list-style:lower-alpha}.rst-content .section ol.upperalpha,.rst-content .section ol.upperalpha>li,.rst-content .toctree-wrapper ol.upperalpha,.rst-content .toctree-wrapper ol.upperalpha>li,.rst-content section ol.upperalpha,.rst-content section ol.upperalpha>li{list-style:upper-alpha}.rst-content .section ol li>*,.rst-content .section ul li>*,.rst-content .toctree-wrapper ol li>*,.rst-content .toctree-wrapper ul li>*,.rst-content section ol li>*,.rst-content section ul li>*{margin-top:12px;margin-bottom:12px}.rst-content .section ol li>:first-child,.rst-content .section ul li>:first-child,.rst-content .toctree-wrapper ol li>:first-child,.rst-content .toctree-wrapper ul li>:first-child,.rst-content section ol li>:first-child,.rst-content section ul li>:first-child{margin-top:0}.rst-content .section ol li>p,.rst-content .section ol li>p:last-child,.rst-content .section ul li>p,.rst-content .section ul li>p:last-child,.rst-content .toctree-wrapper ol li>p,.rst-content .toctree-wrapper ol li>p:last-child,.rst-content .toctree-wrapper ul li>p,.rst-content .toctree-wrapper ul li>p:last-child,.rst-content section ol li>p,.rst-content section ol li>p:last-child,.rst-content section ul li>p,.rst-content section ul li>p:last-child{margin-bottom:12px}.rst-content .section ol li>p:only-child,.rst-content .section ol li>p:only-child:last-child,.rst-content .section ul li>p:only-child,.rst-content .section ul li>p:only-child:last-child,.rst-content .toctree-wrapper ol li>p:only-child,.rst-content .toctree-wrapper ol li>p:only-child:last-child,.rst-content .toctree-wrapper ul li>p:only-child,.rst-content .toctree-wrapper ul li>p:only-child:last-child,.rst-content section ol li>p:only-child,.rst-content section ol li>p:only-child:last-child,.rst-content section ul li>p:only-child,.rst-content section ul li>p:only-child:last-child{margin-bottom:0}.rst-content .section ol li>ol,.rst-content .section ol li>ul,.rst-content .section ul li>ol,.rst-content .section ul li>ul,.rst-content .toctree-wrapper ol li>ol,.rst-content .toctree-wrapper ol li>ul,.rst-content .toctree-wrapper ul li>ol,.rst-content .toctree-wrapper ul li>ul,.rst-content section ol li>ol,.rst-content section ol li>ul,.rst-content section ul li>ol,.rst-content section ul li>ul{margin-bottom:12px}.rst-content .section ol.simple li>*,.rst-content .section ol.simple li ol,.rst-content .section ol.simple li ul,.rst-content .section ul.simple li>*,.rst-content .section ul.simple li ol,.rst-content .section ul.simple li ul,.rst-content .toctree-wrapper ol.simple li>*,.rst-content .toctree-wrapper ol.simple li ol,.rst-content .toctree-wrapper ol.simple li ul,.rst-content .toctree-wrapper ul.simple li>*,.rst-content .toctree-wrapper ul.simple li ol,.rst-content .toctree-wrapper ul.simple li ul,.rst-content section ol.simple li>*,.rst-content section ol.simple li ol,.rst-content section ol.simple li ul,.rst-content section ul.simple li>*,.rst-content section ul.simple li ol,.rst-content section ul.simple li ul{margin-top:0;margin-bottom:0}.rst-content .line-block{margin-left:0;margin-bottom:24px;line-height:24px}.rst-content .line-block .line-block{margin-left:24px;margin-bottom:0}.rst-content .topic-title{font-weight:700;margin-bottom:12px}.rst-content .toc-backref{color:#404040}.rst-content .align-right{float:right;margin:0 0 24px 24px}.rst-content .align-left{float:left;margin:0 24px 24px 0}.rst-content .align-center{margin:auto}.rst-content .align-center:not(table){display:block}.rst-content .code-block-caption .headerlink,.rst-content .eqno .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink,.rst-content dl dt .headerlink,.rst-content h1 .headerlink,.rst-content h2 .headerlink,.rst-content h3 .headerlink,.rst-content h4 .headerlink,.rst-content h5 .headerlink,.rst-content h6 .headerlink,.rst-content p.caption .headerlink,.rst-content p .headerlink,.rst-content table>caption .headerlink{opacity:0;font-size:14px;font-family:FontAwesome;margin-left:.5em}.rst-content .code-block-caption .headerlink:focus,.rst-content .code-block-caption:hover .headerlink,.rst-content .eqno .headerlink:focus,.rst-content .eqno:hover .headerlink,.rst-content .toctree-wrapper>p.caption .headerlink:focus,.rst-content .toctree-wrapper>p.caption:hover .headerlink,.rst-content dl dt .headerlink:focus,.rst-content dl dt:hover .headerlink,.rst-content h1 .headerlink:focus,.rst-content h1:hover .headerlink,.rst-content h2 .headerlink:focus,.rst-content h2:hover .headerlink,.rst-content h3 .headerlink:focus,.rst-content h3:hover .headerlink,.rst-content h4 .headerlink:focus,.rst-content h4:hover .headerlink,.rst-content h5 .headerlink:focus,.rst-content h5:hover .headerlink,.rst-content h6 .headerlink:focus,.rst-content h6:hover .headerlink,.rst-content p.caption .headerlink:focus,.rst-content p.caption:hover .headerlink,.rst-content p .headerlink:focus,.rst-content p:hover .headerlink,.rst-content table>caption .headerlink:focus,.rst-content table>caption:hover .headerlink{opacity:1}.rst-content p a{overflow-wrap:anywhere}.rst-content .wy-table td p,.rst-content .wy-table td ul,.rst-content .wy-table th p,.rst-content .wy-table th ul,.rst-content table.docutils td p,.rst-content table.docutils td ul,.rst-content table.docutils th p,.rst-content table.docutils th ul,.rst-content table.field-list td p,.rst-content table.field-list td ul,.rst-content table.field-list th p,.rst-content table.field-list th ul{font-size:inherit}.rst-content .btn:focus{outline:2px solid}.rst-content table>caption .headerlink:after{font-size:12px}.rst-content .centered{text-align:center}.rst-content .sidebar{float:right;width:40%;display:block;margin:0 0 24px 24px;padding:24px;background:#f3f6f6;border:1px solid #e1e4e5}.rst-content .sidebar dl,.rst-content .sidebar p,.rst-content .sidebar ul{font-size:90%}.rst-content .sidebar .last,.rst-content .sidebar>:last-child{margin-bottom:0}.rst-content .sidebar .sidebar-title{display:block;font-family:Roboto Slab,ff-tisa-web-pro,Georgia,Arial,sans-serif;font-weight:700;background:#e1e4e5;padding:6px 12px;margin:-24px -24px 24px;font-size:100%}.rst-content .highlighted{background:#f1c40f;box-shadow:0 0 0 2px #f1c40f;display:inline;font-weight:700}.rst-content .citation-reference,.rst-content .footnote-reference{vertical-align:baseline;position:relative;top:-.4em;line-height:0;font-size:90%}.rst-content .citation-reference>span.fn-bracket,.rst-content .footnote-reference>span.fn-bracket{display:none}.rst-content .hlist{width:100%}.rst-content dl dt span.classifier:before{content:" : "}.rst-content dl dt span.classifier-delimiter{display:none!important}html.writer-html4 .rst-content table.docutils.citation,html.writer-html4 .rst-content table.docutils.footnote{background:none;border:none}html.writer-html4 .rst-content table.docutils.citation td,html.writer-html4 .rst-content table.docutils.citation tr,html.writer-html4 .rst-content table.docutils.footnote td,html.writer-html4 .rst-content table.docutils.footnote tr{border:none;background-color:transparent!important;white-space:normal}html.writer-html4 .rst-content table.docutils.citation td.label,html.writer-html4 .rst-content table.docutils.footnote td.label{padding-left:0;padding-right:0;vertical-align:top}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{display:grid;grid-template-columns:auto minmax(80%,95%)}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{display:inline-grid;grid-template-columns:max-content auto}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{display:grid;grid-template-columns:auto auto minmax(.65rem,auto) minmax(40%,95%)}html.writer-html5 .rst-content aside.citation>span.label,html.writer-html5 .rst-content aside.footnote>span.label,html.writer-html5 .rst-content div.citation>span.label{grid-column-start:1;grid-column-end:2}html.writer-html5 .rst-content aside.citation>span.backrefs,html.writer-html5 .rst-content aside.footnote>span.backrefs,html.writer-html5 .rst-content div.citation>span.backrefs{grid-column-start:2;grid-column-end:3;grid-row-start:1;grid-row-end:3}html.writer-html5 .rst-content aside.citation>p,html.writer-html5 .rst-content aside.footnote>p,html.writer-html5 .rst-content div.citation>p{grid-column-start:4;grid-column-end:5}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.field-list,html.writer-html5 .rst-content dl.footnote{margin-bottom:24px}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dt{padding-left:1rem}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.field-list>dd,html.writer-html5 .rst-content dl.field-list>dt,html.writer-html5 .rst-content dl.footnote>dd,html.writer-html5 .rst-content dl.footnote>dt{margin-bottom:0}html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{font-size:.9rem}html.writer-html5 .rst-content dl.citation>dt,html.writer-html5 .rst-content dl.footnote>dt{margin:0 .5rem .5rem 0;line-height:1.2rem;word-break:break-all;font-weight:400}html.writer-html5 .rst-content dl.citation>dt>span.brackets:before,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:before{content:"["}html.writer-html5 .rst-content dl.citation>dt>span.brackets:after,html.writer-html5 .rst-content dl.footnote>dt>span.brackets:after{content:"]"}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a{word-break:keep-all}html.writer-html5 .rst-content dl.citation>dt>span.fn-backref>a:not(:first-child):before,html.writer-html5 .rst-content dl.footnote>dt>span.fn-backref>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content dl.citation>dd,html.writer-html5 .rst-content dl.footnote>dd{margin:0 0 .5rem;line-height:1.2rem}html.writer-html5 .rst-content dl.citation>dd p,html.writer-html5 .rst-content dl.footnote>dd p{font-size:.9rem}html.writer-html5 .rst-content aside.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content div.citation{padding-left:1rem;padding-right:1rem;font-size:.9rem;line-height:1.2rem}html.writer-html5 .rst-content aside.citation p,html.writer-html5 .rst-content aside.footnote p,html.writer-html5 .rst-content div.citation p{font-size:.9rem;line-height:1.2rem;margin-bottom:12px}html.writer-html5 .rst-content aside.citation span.backrefs,html.writer-html5 .rst-content aside.footnote span.backrefs,html.writer-html5 .rst-content div.citation span.backrefs{text-align:left;font-style:italic;margin-left:.65rem;word-break:break-word;word-spacing:-.1rem;max-width:5rem}html.writer-html5 .rst-content aside.citation span.backrefs>a,html.writer-html5 .rst-content aside.footnote span.backrefs>a,html.writer-html5 .rst-content div.citation span.backrefs>a{word-break:keep-all}html.writer-html5 .rst-content aside.citation span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content aside.footnote span.backrefs>a:not(:first-child):before,html.writer-html5 .rst-content div.citation span.backrefs>a:not(:first-child):before{content:" "}html.writer-html5 .rst-content aside.citation span.label,html.writer-html5 .rst-content aside.footnote span.label,html.writer-html5 .rst-content div.citation span.label{line-height:1.2rem}html.writer-html5 .rst-content aside.citation-list,html.writer-html5 .rst-content aside.footnote-list,html.writer-html5 .rst-content div.citation-list{margin-bottom:24px}html.writer-html5 .rst-content dl.option-list kbd{font-size:.9rem}.rst-content table.docutils.footnote,html.writer-html4 .rst-content table.docutils.citation,html.writer-html5 .rst-content aside.footnote,html.writer-html5 .rst-content aside.footnote-list aside.footnote,html.writer-html5 .rst-content div.citation-list>div.citation,html.writer-html5 .rst-content dl.citation,html.writer-html5 .rst-content dl.footnote{color:grey}.rst-content table.docutils.footnote code,.rst-content table.docutils.footnote tt,html.writer-html4 .rst-content table.docutils.citation code,html.writer-html4 .rst-content table.docutils.citation tt,html.writer-html5 .rst-content aside.footnote-list aside.footnote code,html.writer-html5 .rst-content aside.footnote-list aside.footnote tt,html.writer-html5 .rst-content aside.footnote code,html.writer-html5 .rst-content aside.footnote tt,html.writer-html5 .rst-content div.citation-list>div.citation code,html.writer-html5 .rst-content div.citation-list>div.citation tt,html.writer-html5 .rst-content dl.citation code,html.writer-html5 .rst-content dl.citation tt,html.writer-html5 .rst-content dl.footnote code,html.writer-html5 .rst-content dl.footnote tt{color:#555}.rst-content .wy-table-responsive.citation,.rst-content .wy-table-responsive.footnote{margin-bottom:0}.rst-content .wy-table-responsive.citation+:not(.citation),.rst-content .wy-table-responsive.footnote+:not(.footnote){margin-top:24px}.rst-content .wy-table-responsive.citation:last-child,.rst-content .wy-table-responsive.footnote:last-child{margin-bottom:24px}.rst-content table.docutils th{border-color:#e1e4e5}html.writer-html5 .rst-content table.docutils th{border:1px solid #e1e4e5}html.writer-html5 .rst-content table.docutils td>p,html.writer-html5 .rst-content table.docutils th>p{line-height:1rem;margin-bottom:0;font-size:.9rem}.rst-content table.docutils td .last,.rst-content table.docutils td .last>:last-child{margin-bottom:0}.rst-content table.field-list,.rst-content table.field-list td{border:none}.rst-content table.field-list td p{line-height:inherit}.rst-content table.field-list td>strong{display:inline-block}.rst-content table.field-list .field-name{padding-right:10px;text-align:left;white-space:nowrap}.rst-content table.field-list .field-body{text-align:left}.rst-content code,.rst-content tt{color:#000;font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;padding:2px 5px}.rst-content code big,.rst-content code em,.rst-content tt big,.rst-content tt em{font-size:100%!important;line-height:normal}.rst-content code.literal,.rst-content tt.literal{color:#e74c3c;white-space:normal}.rst-content code.xref,.rst-content tt.xref,a .rst-content code,a .rst-content tt{font-weight:700;color:#404040;overflow-wrap:normal}.rst-content kbd,.rst-content pre,.rst-content samp{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace}.rst-content a code,.rst-content a tt{color:#2980b9}.rst-content dl{margin-bottom:24px}.rst-content dl dt{font-weight:700;margin-bottom:12px}.rst-content dl ol,.rst-content dl p,.rst-content dl table,.rst-content dl ul{margin-bottom:12px}.rst-content dl dd{margin:0 0 12px 24px;line-height:24px}.rst-content dl dd>ol:last-child,.rst-content dl dd>p:last-child,.rst-content dl dd>table:last-child,.rst-content dl dd>ul:last-child{margin-bottom:0}html.writer-html4 .rst-content dl:not(.docutils),html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple){margin-bottom:24px}html.writer-html4 .rst-content dl:not(.docutils)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{display:table;margin:6px 0;font-size:90%;line-height:normal;background:#e7f2fa;color:#2980b9;border-top:3px solid #6ab0de;padding:6px;position:relative}html.writer-html4 .rst-content dl:not(.docutils)>dt:before,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:before{color:#6ab0de}html.writer-html4 .rst-content dl:not(.docutils)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt{margin-bottom:6px;border:none;border-left:3px solid #ccc;background:#f0f0f0;color:#555}html.writer-html4 .rst-content dl:not(.docutils) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) dl:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt .headerlink{color:#404040;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils)>dt:first-child,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple)>dt:first-child{margin-top:0}html.writer-html4 .rst-content dl:not(.docutils) code.descclassname,html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descclassname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{background-color:transparent;border:none;padding:0;font-size:100%!important}html.writer-html4 .rst-content dl:not(.docutils) code.descname,html.writer-html4 .rst-content dl:not(.docutils) tt.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) code.descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) tt.descname{font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .optional,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .optional{display:inline-block;padding:0 4px;color:#000;font-weight:700}html.writer-html4 .rst-content dl:not(.docutils) .property,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .property{display:inline-block;padding-right:8px;max-width:100%}html.writer-html4 .rst-content dl:not(.docutils) .k,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .k{font-style:italic}html.writer-html4 .rst-content dl:not(.docutils) .descclassname,html.writer-html4 .rst-content dl:not(.docutils) .descname,html.writer-html4 .rst-content dl:not(.docutils) .sig-name,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descclassname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .descname,html.writer-html5 .rst-content dl[class]:not(.option-list):not(.field-list):not(.footnote):not(.citation):not(.glossary):not(.simple) .sig-name{font-family:SFMono-Regular,Menlo,Monaco,Consolas,Liberation Mono,Courier New,Courier,monospace;color:#000}.rst-content .viewcode-back,.rst-content .viewcode-link{display:inline-block;color:#27ae60;font-size:80%;padding-left:24px}.rst-content .viewcode-back{display:block;float:right}.rst-content p.rubric{margin-bottom:12px;font-weight:700}.rst-content code.download,.rst-content tt.download{background:inherit;padding:inherit;font-weight:400;font-family:inherit;font-size:inherit;color:inherit;border:inherit;white-space:inherit}.rst-content code.download span:first-child,.rst-content tt.download span:first-child{-webkit-font-smoothing:subpixel-antialiased}.rst-content code.download span:first-child:before,.rst-content tt.download span:first-child:before{margin-right:4px}.rst-content .guilabel,.rst-content .menuselection{font-size:80%;font-weight:700;border-radius:4px;padding:2.4px 6px;margin:auto 2px}.rst-content .guilabel,.rst-content .menuselection{border:1px solid #7fbbe3;background:#e7f2fa}.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>.kbd,.rst-content :not(dl.option-list)>:not(dt):not(kbd):not(.kbd)>kbd{color:inherit;font-size:80%;background-color:#fff;border:1px solid #a6a6a6;border-radius:4px;box-shadow:0 2px grey;padding:2.4px 6px;margin:auto 0}.rst-content .versionmodified{font-style:italic}@media screen and (max-width:480px){.rst-content .sidebar{width:100%}}span[id*=MathJax-Span]{color:#404040}.math{text-align:center}@font-face{font-family:Lato;src:url(fonts/lato-normal.woff2?bd03a2cc277bbbc338d464e679fe9942) format("woff2"),url(fonts/lato-normal.woff?27bd77b9162d388cb8d4c4217c7c5e2a) format("woff");font-weight:400;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold.woff2?cccb897485813c7c256901dbca54ecf2) format("woff2"),url(fonts/lato-bold.woff?d878b6c29b10beca227e9eef4246111b) format("woff");font-weight:700;font-style:normal;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-bold-italic.woff2?0b6bb6725576b072c5d0b02ecdd1900d) format("woff2"),url(fonts/lato-bold-italic.woff?9c7e4e9eb485b4a121c760e61bc3707c) format("woff");font-weight:700;font-style:italic;font-display:block}@font-face{font-family:Lato;src:url(fonts/lato-normal-italic.woff2?4eb103b4d12be57cb1d040ed5e162e9d) format("woff2"),url(fonts/lato-normal-italic.woff?f28f2d6482446544ef1ea1ccc6dd5892) format("woff");font-weight:400;font-style:italic;font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:400;src:url(fonts/Roboto-Slab-Regular.woff2?7abf5b8d04d26a2cafea937019bca958) format("woff2"),url(fonts/Roboto-Slab-Regular.woff?c1be9284088d487c5e3ff0a10a92e58c) format("woff");font-display:block}@font-face{font-family:Roboto Slab;font-style:normal;font-weight:700;src:url(fonts/Roboto-Slab-Bold.woff2?9984f4a9bda09be08e83f2506954adbe) format("woff2"),url(fonts/Roboto-Slab-Bold.woff?bed5564a116b05148e3b3bea6fb1162a) format("woff");font-display:block} \ No newline at end of file diff --git a/_static/custom.css b/_static/custom.css new file mode 100644 index 000000000..3674c5a50 --- /dev/null +++ b/_static/custom.css @@ -0,0 +1,44 @@ +/* Override nav bar color */ +/*.wy-side-nav-search { + background-color: #fbfbb6; +} +.wy-side-nav-search > a { + color: #b2355c +}*/ + +/* Override text bar color */ +/*.caption-text { + color: #b2355c; +}*/ + +/* Override code signature colour */ +/*.rst-content dl:not(.docutils) dt { + background: #fbfbb6; + color: #b2355c; + border-top: solid 3px #b2355c; +}*/ + +/* Override hyperlink colour */ +/* a { + color: #b2355c; +}*/ + +/* Make content width wider*/ +.wy-nav-content { + max-width: 60% !important; +} + + +.wy-side-nav-search { + display: block; + width: 300px; + padding: 0.809em; + margin-bottom: 0.809em; + z-index: 200; + background-color: #fcfcfc; + text-align: center; + padding: 0.809em; + display: block; + color: #fcfcfc; + margin-bottom: 0.809em; +} diff --git a/_static/doctools.js b/_static/doctools.js new file mode 100644 index 000000000..d06a71d75 --- /dev/null +++ b/_static/doctools.js @@ -0,0 +1,156 @@ +/* + * doctools.js + * ~~~~~~~~~~~ + * + * Base JavaScript utilities for all Sphinx HTML documentation. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/_static/documentation_options.js b/_static/documentation_options.js new file mode 100644 index 000000000..2f6d4d374 --- /dev/null +++ b/_static/documentation_options.js @@ -0,0 +1,14 @@ +var DOCUMENTATION_OPTIONS = { + URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), + VERSION: '0.8.3', + LANGUAGE: 'en', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/_static/file.png b/_static/file.png new file mode 100644 index 000000000..a858a410e Binary files /dev/null and b/_static/file.png differ diff --git a/_static/jquery.js b/_static/jquery.js new file mode 100644 index 000000000..c4c6022f2 --- /dev/null +++ b/_static/jquery.js @@ -0,0 +1,2 @@ +/*! jQuery v3.6.0 | (c) OpenJS Foundation and other contributors | jquery.org/license */ +!function(e,t){"use strict";"object"==typeof module&&"object"==typeof module.exports?module.exports=e.document?t(e,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return t(e)}:t(e)}("undefined"!=typeof window?window:this,function(C,e){"use strict";var t=[],r=Object.getPrototypeOf,s=t.slice,g=t.flat?function(e){return t.flat.call(e)}:function(e){return t.concat.apply([],e)},u=t.push,i=t.indexOf,n={},o=n.toString,v=n.hasOwnProperty,a=v.toString,l=a.call(Object),y={},m=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType&&"function"!=typeof e.item},x=function(e){return null!=e&&e===e.window},E=C.document,c={type:!0,src:!0,nonce:!0,noModule:!0};function b(e,t,n){var r,i,o=(n=n||E).createElement("script");if(o.text=e,t)for(r in c)(i=t[r]||t.getAttribute&&t.getAttribute(r))&&o.setAttribute(r,i);n.head.appendChild(o).parentNode.removeChild(o)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?n[o.call(e)]||"object":typeof e}var f="3.6.0",S=function(e,t){return new S.fn.init(e,t)};function p(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!m(e)&&!x(e)&&("array"===n||0===t||"number"==typeof t&&0+~]|"+M+")"+M+"*"),U=new RegExp(M+"|>"),X=new RegExp(F),V=new RegExp("^"+I+"$"),G={ID:new RegExp("^#("+I+")"),CLASS:new RegExp("^\\.("+I+")"),TAG:new RegExp("^("+I+"|[*])"),ATTR:new RegExp("^"+W),PSEUDO:new RegExp("^"+F),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+R+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/HTML$/i,Q=/^(?:input|select|textarea|button)$/i,J=/^h\d$/i,K=/^[^{]+\{\s*\[native \w/,Z=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\[\\da-fA-F]{1,6}"+M+"?|\\\\([^\\r\\n\\f])","g"),ne=function(e,t){var n="0x"+e.slice(1)-65536;return t||(n<0?String.fromCharCode(n+65536):String.fromCharCode(n>>10|55296,1023&n|56320))},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ie=function(e,t){return t?"\0"===e?"\ufffd":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},oe=function(){T()},ae=be(function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()},{dir:"parentNode",next:"legend"});try{H.apply(t=O.call(p.childNodes),p.childNodes),t[p.childNodes.length].nodeType}catch(e){H={apply:t.length?function(e,t){L.apply(e,O.call(t))}:function(e,t){var n=e.length,r=0;while(e[n++]=t[r++]);e.length=n-1}}}function se(t,e,n,r){var i,o,a,s,u,l,c,f=e&&e.ownerDocument,p=e?e.nodeType:9;if(n=n||[],"string"!=typeof t||!t||1!==p&&9!==p&&11!==p)return n;if(!r&&(T(e),e=e||C,E)){if(11!==p&&(u=Z.exec(t)))if(i=u[1]){if(9===p){if(!(a=e.getElementById(i)))return n;if(a.id===i)return n.push(a),n}else if(f&&(a=f.getElementById(i))&&y(e,a)&&a.id===i)return n.push(a),n}else{if(u[2])return H.apply(n,e.getElementsByTagName(t)),n;if((i=u[3])&&d.getElementsByClassName&&e.getElementsByClassName)return H.apply(n,e.getElementsByClassName(i)),n}if(d.qsa&&!N[t+" "]&&(!v||!v.test(t))&&(1!==p||"object"!==e.nodeName.toLowerCase())){if(c=t,f=e,1===p&&(U.test(t)||z.test(t))){(f=ee.test(t)&&ye(e.parentNode)||e)===e&&d.scope||((s=e.getAttribute("id"))?s=s.replace(re,ie):e.setAttribute("id",s=S)),o=(l=h(t)).length;while(o--)l[o]=(s?"#"+s:":scope")+" "+xe(l[o]);c=l.join(",")}try{return H.apply(n,f.querySelectorAll(c)),n}catch(e){N(t,!0)}finally{s===S&&e.removeAttribute("id")}}}return g(t.replace($,"$1"),e,n,r)}function ue(){var r=[];return function e(t,n){return r.push(t+" ")>b.cacheLength&&delete e[r.shift()],e[t+" "]=n}}function le(e){return e[S]=!0,e}function ce(e){var t=C.createElement("fieldset");try{return!!e(t)}catch(e){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){var n=e.split("|"),r=n.length;while(r--)b.attrHandle[n[r]]=t}function pe(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function de(t){return function(e){return"input"===e.nodeName.toLowerCase()&&e.type===t}}function he(n){return function(e){var t=e.nodeName.toLowerCase();return("input"===t||"button"===t)&&e.type===n}}function ge(t){return function(e){return"form"in e?e.parentNode&&!1===e.disabled?"label"in e?"label"in e.parentNode?e.parentNode.disabled===t:e.disabled===t:e.isDisabled===t||e.isDisabled!==!t&&ae(e)===t:e.disabled===t:"label"in e&&e.disabled===t}}function ve(a){return le(function(o){return o=+o,le(function(e,t){var n,r=a([],e.length,o),i=r.length;while(i--)e[n=r[i]]&&(e[n]=!(t[n]=e[n]))})})}function ye(e){return e&&"undefined"!=typeof e.getElementsByTagName&&e}for(e in d=se.support={},i=se.isXML=function(e){var t=e&&e.namespaceURI,n=e&&(e.ownerDocument||e).documentElement;return!Y.test(t||n&&n.nodeName||"HTML")},T=se.setDocument=function(e){var t,n,r=e?e.ownerDocument||e:p;return r!=C&&9===r.nodeType&&r.documentElement&&(a=(C=r).documentElement,E=!i(C),p!=C&&(n=C.defaultView)&&n.top!==n&&(n.addEventListener?n.addEventListener("unload",oe,!1):n.attachEvent&&n.attachEvent("onunload",oe)),d.scope=ce(function(e){return a.appendChild(e).appendChild(C.createElement("div")),"undefined"!=typeof e.querySelectorAll&&!e.querySelectorAll(":scope fieldset div").length}),d.attributes=ce(function(e){return e.className="i",!e.getAttribute("className")}),d.getElementsByTagName=ce(function(e){return e.appendChild(C.createComment("")),!e.getElementsByTagName("*").length}),d.getElementsByClassName=K.test(C.getElementsByClassName),d.getById=ce(function(e){return a.appendChild(e).id=S,!C.getElementsByName||!C.getElementsByName(S).length}),d.getById?(b.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n=t.getElementById(e);return n?[n]:[]}}):(b.filter.ID=function(e){var n=e.replace(te,ne);return function(e){var t="undefined"!=typeof e.getAttributeNode&&e.getAttributeNode("id");return t&&t.value===n}},b.find.ID=function(e,t){if("undefined"!=typeof t.getElementById&&E){var n,r,i,o=t.getElementById(e);if(o){if((n=o.getAttributeNode("id"))&&n.value===e)return[o];i=t.getElementsByName(e),r=0;while(o=i[r++])if((n=o.getAttributeNode("id"))&&n.value===e)return[o]}return[]}}),b.find.TAG=d.getElementsByTagName?function(e,t){return"undefined"!=typeof t.getElementsByTagName?t.getElementsByTagName(e):d.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},b.find.CLASS=d.getElementsByClassName&&function(e,t){if("undefined"!=typeof t.getElementsByClassName&&E)return t.getElementsByClassName(e)},s=[],v=[],(d.qsa=K.test(C.querySelectorAll))&&(ce(function(e){var t;a.appendChild(e).innerHTML="",e.querySelectorAll("[msallowcapture^='']").length&&v.push("[*^$]="+M+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||v.push("\\["+M+"*(?:value|"+R+")"),e.querySelectorAll("[id~="+S+"-]").length||v.push("~="),(t=C.createElement("input")).setAttribute("name",""),e.appendChild(t),e.querySelectorAll("[name='']").length||v.push("\\["+M+"*name"+M+"*="+M+"*(?:''|\"\")"),e.querySelectorAll(":checked").length||v.push(":checked"),e.querySelectorAll("a#"+S+"+*").length||v.push(".#.+[+~]"),e.querySelectorAll("\\\f"),v.push("[\\r\\n\\f]")}),ce(function(e){e.innerHTML="";var t=C.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&v.push("name"+M+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&v.push(":enabled",":disabled"),a.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&v.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),v.push(",.*:")})),(d.matchesSelector=K.test(c=a.matches||a.webkitMatchesSelector||a.mozMatchesSelector||a.oMatchesSelector||a.msMatchesSelector))&&ce(function(e){d.disconnectedMatch=c.call(e,"*"),c.call(e,"[s!='']:x"),s.push("!=",F)}),v=v.length&&new RegExp(v.join("|")),s=s.length&&new RegExp(s.join("|")),t=K.test(a.compareDocumentPosition),y=t||K.test(a.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},j=t?function(e,t){if(e===t)return l=!0,0;var n=!e.compareDocumentPosition-!t.compareDocumentPosition;return n||(1&(n=(e.ownerDocument||e)==(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!d.sortDetached&&t.compareDocumentPosition(e)===n?e==C||e.ownerDocument==p&&y(p,e)?-1:t==C||t.ownerDocument==p&&y(p,t)?1:u?P(u,e)-P(u,t):0:4&n?-1:1)}:function(e,t){if(e===t)return l=!0,0;var n,r=0,i=e.parentNode,o=t.parentNode,a=[e],s=[t];if(!i||!o)return e==C?-1:t==C?1:i?-1:o?1:u?P(u,e)-P(u,t):0;if(i===o)return pe(e,t);n=e;while(n=n.parentNode)a.unshift(n);n=t;while(n=n.parentNode)s.unshift(n);while(a[r]===s[r])r++;return r?pe(a[r],s[r]):a[r]==p?-1:s[r]==p?1:0}),C},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if(T(e),d.matchesSelector&&E&&!N[t+" "]&&(!s||!s.test(t))&&(!v||!v.test(t)))try{var n=c.call(e,t);if(n||d.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(e){N(t,!0)}return 0":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return G.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&X.test(n)&&(t=h(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=m[e+" "];return t||(t=new RegExp("(^|"+M+")"+e+"("+M+"|$)"))&&m(e,function(e){return t.test("string"==typeof e.className&&e.className||"undefined"!=typeof e.getAttribute&&e.getAttribute("class")||"")})},ATTR:function(n,r,i){return function(e){var t=se.attr(e,n);return null==t?"!="===r:!r||(t+="","="===r?t===i:"!="===r?t!==i:"^="===r?i&&0===t.indexOf(i):"*="===r?i&&-1:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function j(e,n,r){return m(n)?S.grep(e,function(e,t){return!!n.call(e,t,e)!==r}):n.nodeType?S.grep(e,function(e){return e===n!==r}):"string"!=typeof n?S.grep(e,function(e){return-1)[^>]*|#([\w-]+))$/;(S.fn.init=function(e,t,n){var r,i;if(!e)return this;if(n=n||D,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&3<=e.length?[null,e,null]:q.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof S?t[0]:t,S.merge(this,S.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:E,!0)),N.test(r[1])&&S.isPlainObject(t))for(r in t)m(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(i=E.getElementById(r[2]))&&(this[0]=i,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):m(e)?void 0!==n.ready?n.ready(e):e(S):S.makeArray(e,this)}).prototype=S.fn,D=S(E);var L=/^(?:parents|prev(?:Until|All))/,H={children:!0,contents:!0,next:!0,prev:!0};function O(e,t){while((e=e[t])&&1!==e.nodeType);return e}S.fn.extend({has:function(e){var t=S(e,this),n=t.length;return this.filter(function(){for(var e=0;e\x20\t\r\n\f]*)/i,he=/^$|^module$|\/(?:java|ecma)script/i;ce=E.createDocumentFragment().appendChild(E.createElement("div")),(fe=E.createElement("input")).setAttribute("type","radio"),fe.setAttribute("checked","checked"),fe.setAttribute("name","t"),ce.appendChild(fe),y.checkClone=ce.cloneNode(!0).cloneNode(!0).lastChild.checked,ce.innerHTML="",y.noCloneChecked=!!ce.cloneNode(!0).lastChild.defaultValue,ce.innerHTML="",y.option=!!ce.lastChild;var ge={thead:[1,"","
"],col:[2,"","
"],tr:[2,"","
"],td:[3,"","
"],_default:[0,"",""]};function ve(e,t){var n;return n="undefined"!=typeof e.getElementsByTagName?e.getElementsByTagName(t||"*"):"undefined"!=typeof e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&A(e,t)?S.merge([e],n):n}function ye(e,t){for(var n=0,r=e.length;n",""]);var me=/<|&#?\w+;/;function xe(e,t,n,r,i){for(var o,a,s,u,l,c,f=t.createDocumentFragment(),p=[],d=0,h=e.length;d\s*$/g;function je(e,t){return A(e,"table")&&A(11!==t.nodeType?t:t.firstChild,"tr")&&S(e).children("tbody")[0]||e}function De(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function qe(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Le(e,t){var n,r,i,o,a,s;if(1===t.nodeType){if(Y.hasData(e)&&(s=Y.get(e).events))for(i in Y.remove(t,"handle events"),s)for(n=0,r=s[i].length;n").attr(n.scriptAttrs||{}).prop({charset:n.scriptCharset,src:n.url}).on("load error",i=function(e){r.remove(),i=null,e&&t("error"===e.type?404:200,e.type)}),E.head.appendChild(r[0])},abort:function(){i&&i()}}});var _t,zt=[],Ut=/(=)\?(?=&|$)|\?\?/;S.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=zt.pop()||S.expando+"_"+wt.guid++;return this[e]=!0,e}}),S.ajaxPrefilter("json jsonp",function(e,t,n){var r,i,o,a=!1!==e.jsonp&&(Ut.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Ut.test(e.data)&&"data");if(a||"jsonp"===e.dataTypes[0])return r=e.jsonpCallback=m(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,a?e[a]=e[a].replace(Ut,"$1"+r):!1!==e.jsonp&&(e.url+=(Tt.test(e.url)?"&":"?")+e.jsonp+"="+r),e.converters["script json"]=function(){return o||S.error(r+" was not called"),o[0]},e.dataTypes[0]="json",i=C[r],C[r]=function(){o=arguments},n.always(function(){void 0===i?S(C).removeProp(r):C[r]=i,e[r]&&(e.jsonpCallback=t.jsonpCallback,zt.push(r)),o&&m(i)&&i(o[0]),o=i=void 0}),"script"}),y.createHTMLDocument=((_t=E.implementation.createHTMLDocument("").body).innerHTML="
",2===_t.childNodes.length),S.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(y.createHTMLDocument?((r=(t=E.implementation.createHTMLDocument("")).createElement("base")).href=E.location.href,t.head.appendChild(r)):t=E),o=!n&&[],(i=N.exec(e))?[t.createElement(i[1])]:(i=xe([e],t,o),o&&o.length&&S(o).remove(),S.merge([],i.childNodes)));var r,i,o},S.fn.load=function(e,t,n){var r,i,o,a=this,s=e.indexOf(" ");return-1").append(S.parseHTML(e)).find(r):e)}).always(n&&function(e,t){a.each(function(){n.apply(this,o||[e.responseText,t,e])})}),this},S.expr.pseudos.animated=function(t){return S.grep(S.timers,function(e){return t===e.elem}).length},S.offset={setOffset:function(e,t,n){var r,i,o,a,s,u,l=S.css(e,"position"),c=S(e),f={};"static"===l&&(e.style.position="relative"),s=c.offset(),o=S.css(e,"top"),u=S.css(e,"left"),("absolute"===l||"fixed"===l)&&-1<(o+u).indexOf("auto")?(a=(r=c.position()).top,i=r.left):(a=parseFloat(o)||0,i=parseFloat(u)||0),m(t)&&(t=t.call(e,n,S.extend({},s))),null!=t.top&&(f.top=t.top-s.top+a),null!=t.left&&(f.left=t.left-s.left+i),"using"in t?t.using.call(e,f):c.css(f)}},S.fn.extend({offset:function(t){if(arguments.length)return void 0===t?this:this.each(function(e){S.offset.setOffset(this,t,e)});var e,n,r=this[0];return r?r.getClientRects().length?(e=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:e.top+n.pageYOffset,left:e.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],i={top:0,left:0};if("fixed"===S.css(r,"position"))t=r.getBoundingClientRect();else{t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;while(e&&(e===n.body||e===n.documentElement)&&"static"===S.css(e,"position"))e=e.parentNode;e&&e!==r&&1===e.nodeType&&((i=S(e).offset()).top+=S.css(e,"borderTopWidth",!0),i.left+=S.css(e,"borderLeftWidth",!0))}return{top:t.top-i.top-S.css(r,"marginTop",!0),left:t.left-i.left-S.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var e=this.offsetParent;while(e&&"static"===S.css(e,"position"))e=e.offsetParent;return e||re})}}),S.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(t,i){var o="pageYOffset"===i;S.fn[t]=function(e){return $(this,function(e,t,n){var r;if(x(e)?r=e:9===e.nodeType&&(r=e.defaultView),void 0===n)return r?r[i]:e[t];r?r.scrollTo(o?r.pageXOffset:n,o?n:r.pageYOffset):e[t]=n},t,e,arguments.length)}}),S.each(["top","left"],function(e,n){S.cssHooks[n]=Fe(y.pixelPosition,function(e,t){if(t)return t=We(e,n),Pe.test(t)?S(e).position()[n]+"px":t})}),S.each({Height:"height",Width:"width"},function(a,s){S.each({padding:"inner"+a,content:s,"":"outer"+a},function(r,o){S.fn[o]=function(e,t){var n=arguments.length&&(r||"boolean"!=typeof e),i=r||(!0===e||!0===t?"margin":"border");return $(this,function(e,t,n){var r;return x(e)?0===o.indexOf("outer")?e["inner"+a]:e.document.documentElement["client"+a]:9===e.nodeType?(r=e.documentElement,Math.max(e.body["scroll"+a],r["scroll"+a],e.body["offset"+a],r["offset"+a],r["client"+a])):void 0===n?S.css(e,t,i):S.style(e,t,n,i)},s,n?e:void 0,n)}})}),S.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(e,t){S.fn[t]=function(e){return this.on(t,e)}}),S.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),S.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),function(e,n){S.fn[n]=function(e,t){return 0",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=y.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=y.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),y.elements=c+" "+a,j(b)}function f(a){var b=x[a[v]];return b||(b={},w++,a[v]=w,x[w]=b),b}function g(a,c,d){if(c||(c=b),q)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():u.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||t.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),q)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return y.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(y,b.frag)}function j(a){a||(a=b);var d=f(a);return!y.shivCSS||p||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),q||i(a,d),a}function k(a){for(var b,c=a.getElementsByTagName("*"),e=c.length,f=RegExp("^(?:"+d().join("|")+")$","i"),g=[];e--;)b=c[e],f.test(b.nodeName)&&g.push(b.applyElement(l(b)));return g}function l(a){for(var b,c=a.attributes,d=c.length,e=a.ownerDocument.createElement(A+":"+a.nodeName);d--;)b=c[d],b.specified&&e.setAttribute(b.nodeName,b.nodeValue);return e.style.cssText=a.style.cssText,e}function m(a){for(var b,c=a.split("{"),e=c.length,f=RegExp("(^|[\\s,>+~])("+d().join("|")+")(?=[[\\s,>+~#.:]|$)","gi"),g="$1"+A+"\\:$2";e--;)b=c[e]=c[e].split("}"),b[b.length-1]=b[b.length-1].replace(f,g),c[e]=b.join("}");return c.join("{")}function n(a){for(var b=a.length;b--;)a[b].removeNode()}function o(a){function b(){clearTimeout(g._removeSheetTimer),d&&d.removeNode(!0),d=null}var d,e,g=f(a),h=a.namespaces,i=a.parentWindow;return!B||a.printShived?a:("undefined"==typeof h[A]&&h.add(A),i.attachEvent("onbeforeprint",function(){b();for(var f,g,h,i=a.styleSheets,j=[],l=i.length,n=Array(l);l--;)n[l]=i[l];for(;h=n.pop();)if(!h.disabled&&z.test(h.media)){try{f=h.imports,g=f.length}catch(o){g=0}for(l=0;g>l;l++)n.push(f[l]);try{j.push(h.cssText)}catch(o){}}j=m(j.reverse().join("")),e=k(a),d=c(a,j)}),i.attachEvent("onafterprint",function(){n(e),clearTimeout(g._removeSheetTimer),g._removeSheetTimer=setTimeout(b,500)}),a.printShived=!0,a)}var p,q,r="3.7.3",s=a.html5||{},t=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,u=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,v="_html5shiv",w=0,x={};!function(){try{var a=b.createElement("a");a.innerHTML="",p="hidden"in a,q=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){p=!0,q=!0}}();var y={elements:s.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:r,shivCSS:s.shivCSS!==!1,supportsUnknownElements:q,shivMethods:s.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=y,j(b);var z=/^$|\b(?:all|print)\b/,A="html5shiv",B=!q&&function(){var c=b.documentElement;return!("undefined"==typeof b.namespaces||"undefined"==typeof b.parentWindow||"undefined"==typeof c.applyElement||"undefined"==typeof c.removeNode||"undefined"==typeof a.attachEvent)}();y.type+=" print",y.shivPrint=o,o(b),"object"==typeof module&&module.exports&&(module.exports=y)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/_static/js/html5shiv.min.js b/_static/js/html5shiv.min.js new file mode 100644 index 000000000..cd1c674f5 --- /dev/null +++ b/_static/js/html5shiv.min.js @@ -0,0 +1,4 @@ +/** +* @preserve HTML5 Shiv 3.7.3 | @afarkas @jdalton @jon_neal @rem | MIT/GPL2 Licensed +*/ +!function(a,b){function c(a,b){var c=a.createElement("p"),d=a.getElementsByTagName("head")[0]||a.documentElement;return c.innerHTML="x",d.insertBefore(c.lastChild,d.firstChild)}function d(){var a=t.elements;return"string"==typeof a?a.split(" "):a}function e(a,b){var c=t.elements;"string"!=typeof c&&(c=c.join(" ")),"string"!=typeof a&&(a=a.join(" ")),t.elements=c+" "+a,j(b)}function f(a){var b=s[a[q]];return b||(b={},r++,a[q]=r,s[r]=b),b}function g(a,c,d){if(c||(c=b),l)return c.createElement(a);d||(d=f(c));var e;return e=d.cache[a]?d.cache[a].cloneNode():p.test(a)?(d.cache[a]=d.createElem(a)).cloneNode():d.createElem(a),!e.canHaveChildren||o.test(a)||e.tagUrn?e:d.frag.appendChild(e)}function h(a,c){if(a||(a=b),l)return a.createDocumentFragment();c=c||f(a);for(var e=c.frag.cloneNode(),g=0,h=d(),i=h.length;i>g;g++)e.createElement(h[g]);return e}function i(a,b){b.cache||(b.cache={},b.createElem=a.createElement,b.createFrag=a.createDocumentFragment,b.frag=b.createFrag()),a.createElement=function(c){return t.shivMethods?g(c,a,b):b.createElem(c)},a.createDocumentFragment=Function("h,f","return function(){var n=f.cloneNode(),c=n.createElement;h.shivMethods&&("+d().join().replace(/[\w\-:]+/g,function(a){return b.createElem(a),b.frag.createElement(a),'c("'+a+'")'})+");return n}")(t,b.frag)}function j(a){a||(a=b);var d=f(a);return!t.shivCSS||k||d.hasCSS||(d.hasCSS=!!c(a,"article,aside,dialog,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}mark{background:#FF0;color:#000}template{display:none}")),l||i(a,d),a}var k,l,m="3.7.3-pre",n=a.html5||{},o=/^<|^(?:button|map|select|textarea|object|iframe|option|optgroup)$/i,p=/^(?:a|b|code|div|fieldset|h1|h2|h3|h4|h5|h6|i|label|li|ol|p|q|span|strong|style|table|tbody|td|th|tr|ul)$/i,q="_html5shiv",r=0,s={};!function(){try{var a=b.createElement("a");a.innerHTML="",k="hidden"in a,l=1==a.childNodes.length||function(){b.createElement("a");var a=b.createDocumentFragment();return"undefined"==typeof a.cloneNode||"undefined"==typeof a.createDocumentFragment||"undefined"==typeof a.createElement}()}catch(c){k=!0,l=!0}}();var t={elements:n.elements||"abbr article aside audio bdi canvas data datalist details dialog figcaption figure footer header hgroup main mark meter nav output picture progress section summary template time video",version:m,shivCSS:n.shivCSS!==!1,supportsUnknownElements:l,shivMethods:n.shivMethods!==!1,type:"default",shivDocument:j,createElement:g,createDocumentFragment:h,addElements:e};a.html5=t,j(b),"object"==typeof module&&module.exports&&(module.exports=t)}("undefined"!=typeof window?window:this,document); \ No newline at end of file diff --git a/_static/js/theme.js b/_static/js/theme.js new file mode 100644 index 000000000..1fddb6ee4 --- /dev/null +++ b/_static/js/theme.js @@ -0,0 +1 @@ +!function(n){var e={};function t(i){if(e[i])return e[i].exports;var o=e[i]={i:i,l:!1,exports:{}};return n[i].call(o.exports,o,o.exports,t),o.l=!0,o.exports}t.m=n,t.c=e,t.d=function(n,e,i){t.o(n,e)||Object.defineProperty(n,e,{enumerable:!0,get:i})},t.r=function(n){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(n,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(n,"__esModule",{value:!0})},t.t=function(n,e){if(1&e&&(n=t(n)),8&e)return n;if(4&e&&"object"==typeof n&&n&&n.__esModule)return n;var i=Object.create(null);if(t.r(i),Object.defineProperty(i,"default",{enumerable:!0,value:n}),2&e&&"string"!=typeof n)for(var o in n)t.d(i,o,function(e){return n[e]}.bind(null,o));return i},t.n=function(n){var e=n&&n.__esModule?function(){return n.default}:function(){return n};return t.d(e,"a",e),e},t.o=function(n,e){return Object.prototype.hasOwnProperty.call(n,e)},t.p="",t(t.s=0)}([function(n,e,t){t(1),n.exports=t(3)},function(n,e,t){(function(){var e="undefined"!=typeof window?window.jQuery:t(2);n.exports.ThemeNav={navBar:null,win:null,winScroll:!1,winResize:!1,linkScroll:!1,winPosition:0,winHeight:null,docHeight:null,isRunning:!1,enable:function(n){var t=this;void 0===n&&(n=!0),t.isRunning||(t.isRunning=!0,e((function(e){t.init(e),t.reset(),t.win.on("hashchange",t.reset),n&&t.win.on("scroll",(function(){t.linkScroll||t.winScroll||(t.winScroll=!0,requestAnimationFrame((function(){t.onScroll()})))})),t.win.on("resize",(function(){t.winResize||(t.winResize=!0,requestAnimationFrame((function(){t.onResize()})))})),t.onResize()})))},enableSticky:function(){this.enable(!0)},init:function(n){n(document);var e=this;this.navBar=n("div.wy-side-scroll:first"),this.win=n(window),n(document).on("click","[data-toggle='wy-nav-top']",(function(){n("[data-toggle='wy-nav-shift']").toggleClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift")})).on("click",".wy-menu-vertical .current ul li a",(function(){var t=n(this);n("[data-toggle='wy-nav-shift']").removeClass("shift"),n("[data-toggle='rst-versions']").toggleClass("shift"),e.toggleCurrent(t),e.hashChange()})).on("click","[data-toggle='rst-current-version']",(function(){n("[data-toggle='rst-versions']").toggleClass("shift-up")})),n("table.docutils:not(.field-list,.footnote,.citation)").wrap("
"),n("table.docutils.footnote").wrap("
"),n("table.docutils.citation").wrap("
"),n(".wy-menu-vertical ul").not(".simple").siblings("a").each((function(){var t=n(this);expand=n(''),expand.on("click",(function(n){return e.toggleCurrent(t),n.stopPropagation(),!1})),t.prepend(expand)}))},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),t=e.find('[href="'+n+'"]');if(0===t.length){var i=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(t=e.find('[href="#'+i.attr("id")+'"]')).length&&(t=e.find('[href="#"]'))}if(t.length>0){$(".wy-menu-vertical .current").removeClass("current").attr("aria-expanded","false"),t.addClass("current").attr("aria-expanded","true"),t.closest("li.toctree-l1").parent().addClass("current").attr("aria-expanded","true");for(let n=1;n<=10;n++)t.closest("li.toctree-l"+n).addClass("current").attr("aria-expanded","true");t[0].scrollIntoView()}}catch(n){console.log("Error expanding nav for anchor",n)}},onScroll:function(){this.winScroll=!1;var n=this.win.scrollTop(),e=n+this.winHeight,t=this.navBar.scrollTop()+(n-this.winPosition);n<0||e>this.docHeight||(this.navBar.scrollTop(t),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",(function(){this.linkScroll=!1}))},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current").attr("aria-expanded","false"),e.siblings().find("li.current").removeClass("current").attr("aria-expanded","false");var t=e.find("> ul li");t.length&&(t.removeClass("current").attr("aria-expanded","false"),e.toggleClass("current").attr("aria-expanded",(function(n,e){return"true"==e?"false":"true"})))}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:n.exports.ThemeNav,StickyNav:n.exports.ThemeNav}),function(){for(var n=0,e=["ms","moz","webkit","o"],t=0;t0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/_static/logo.png b/_static/logo.png new file mode 100644 index 000000000..934ba5c22 Binary files /dev/null and b/_static/logo.png differ diff --git a/_static/minus.png b/_static/minus.png new file mode 100644 index 000000000..d96755fda Binary files /dev/null and b/_static/minus.png differ diff --git a/_static/plus.png b/_static/plus.png new file mode 100644 index 000000000..7107cec93 Binary files /dev/null and b/_static/plus.png differ diff --git a/_static/pygments.css b/_static/pygments.css new file mode 100644 index 000000000..0d49244ed --- /dev/null +++ b/_static/pygments.css @@ -0,0 +1,75 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #eeffcc; } +.highlight .c { color: #408090; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #007020; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #408090; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #408090; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #007020 } /* Comment.Preproc */ +.highlight .cpf { color: #408090; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #408090; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #408090; background-color: #fff0f0 } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #333333 } /* Generic.Output */ +.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #007020 } /* Keyword.Pseudo */ +.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #902000 } /* Keyword.Type */ +.highlight .m { color: #208050 } /* Literal.Number */ +.highlight .s { color: #4070a0 } /* Literal.String */ +.highlight .na { color: #4070a0 } /* Name.Attribute */ +.highlight .nb { color: #007020 } /* Name.Builtin */ +.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.highlight .no { color: #60add5 } /* Name.Constant */ +.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #007020 } /* Name.Exception */ +.highlight .nf { color: #06287e } /* Name.Function */ +.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ +.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #bb60d5 } /* Name.Variable */ +.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #208050 } /* Literal.Number.Bin */ +.highlight .mf { color: #208050 } /* Literal.Number.Float */ +.highlight .mh { color: #208050 } /* Literal.Number.Hex */ +.highlight .mi { color: #208050 } /* Literal.Number.Integer */ +.highlight .mo { color: #208050 } /* Literal.Number.Oct */ +.highlight .sa { color: #4070a0 } /* Literal.String.Affix */ +.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ +.highlight .sc { color: #4070a0 } /* Literal.String.Char */ +.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ +.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4070a0 } /* Literal.String.Double */ +.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ +.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.highlight .sx { color: #c65d09 } /* Literal.String.Other */ +.highlight .sr { color: #235388 } /* Literal.String.Regex */ +.highlight .s1 { color: #4070a0 } /* Literal.String.Single */ +.highlight .ss { color: #517918 } /* Literal.String.Symbol */ +.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #06287e } /* Name.Function.Magic */ +.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ +.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ +.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ +.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ +.highlight .il { color: #208050 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/_static/rev_flow_chart.png b/_static/rev_flow_chart.png new file mode 100644 index 000000000..d6ff65585 Binary files /dev/null and b/_static/rev_flow_chart.png differ diff --git a/_static/searchtools.js b/_static/searchtools.js new file mode 100644 index 000000000..97d56a74d --- /dev/null +++ b/_static/searchtools.js @@ -0,0 +1,566 @@ +/* + * searchtools.js + * ~~~~~~~~~~~~~~~~ + * + * Sphinx JavaScript utilities for the full-text search. + * + * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docUrlRoot = DOCUMENTATION_OPTIONS.URL_ROOT; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + + const [docName, title, anchor, descr, score, _filename] = item; + + let listItem = document.createElement("li"); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = docUrlRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = docUrlRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms) + ); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = _( + `Search finished, found ${resultCount} page(s) matching the search query.` + ); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() }); + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent !== undefined) return docContent.textContent; + console.warn( + "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + /** + * execute search (requires search index to be loaded) + */ + query: (query) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + // array of [docname, title, anchor, descr, score, filename] + let results = []; + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + let score = Math.round(100 * queryLower.length / title.length) + results.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id] of foundEntries) { + let score = Math.round(100 * queryLower.length / entry.length) + results.push([ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + ]); + } + } + } + + // lookup as object + objectTerms.forEach((term) => + results.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + results.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item))); + + // now sort the results by score (in opposite order of appearance, since the + // display function below uses pop() to retrieve items) and then + // alphabetically + results.sort((a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; + }); + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + results = results.reverse(); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + const arr = [ + { files: terms[word], score: Scorer.term }, + { files: titleTerms[word], score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord) && !terms[word]) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord) && !titleTerms[word]) + arr.push({ files: titleTerms[word], score: Scorer.partialTitle }); + }); + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, {}); + scoreMap.get(file)[word] = record.score; + }); + }); + + // create the mapping + files.forEach((file) => { + if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1) + fileMap.get(file).push(word); + else fileMap.set(file, [word]); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w])); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords) => { + const text = Search.htmlToText(htmlText); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js new file mode 100644 index 000000000..aae669d7e --- /dev/null +++ b/_static/sphinx_highlight.js @@ -0,0 +1,144 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + parent.insertBefore( + span, + parent.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(SphinxHighlight.highlightSearchWords); +_ready(SphinxHighlight.initEscapeListener); diff --git a/_static/tabs.css b/_static/tabs.css new file mode 100644 index 000000000..957ba60d6 --- /dev/null +++ b/_static/tabs.css @@ -0,0 +1,89 @@ +.sphinx-tabs { + margin-bottom: 1rem; +} + +[role="tablist"] { + border-bottom: 1px solid #a0b3bf; +} + +.sphinx-tabs-tab { + position: relative; + font-family: Lato,'Helvetica Neue',Arial,Helvetica,sans-serif; + color: #1D5C87; + line-height: 24px; + margin: 0; + font-size: 16px; + font-weight: 400; + background-color: rgba(255, 255, 255, 0); + border-radius: 5px 5px 0 0; + border: 0; + padding: 1rem 1.5rem; + margin-bottom: 0; +} + +.sphinx-tabs-tab[aria-selected="true"] { + font-weight: 700; + border: 1px solid #a0b3bf; + border-bottom: 1px solid white; + margin: -1px; + background-color: white; +} + +.sphinx-tabs-tab:focus { + z-index: 1; + outline-offset: 1px; +} + +.sphinx-tabs-panel { + position: relative; + padding: 1rem; + border: 1px solid #a0b3bf; + margin: 0px -1px -1px -1px; + border-radius: 0 0 5px 5px; + border-top: 0; + background: white; +} + +.sphinx-tabs-panel.code-tab { + padding: 0.4rem; +} + +.sphinx-tab img { + margin-bottom: 24 px; +} + +/* Dark theme preference styling */ + +@media (prefers-color-scheme: dark) { + body[data-theme="auto"] .sphinx-tabs-panel { + color: white; + background-color: rgb(50, 50, 50); + } + + body[data-theme="auto"] .sphinx-tabs-tab { + color: white; + background-color: rgba(255, 255, 255, 0.05); + } + + body[data-theme="auto"] .sphinx-tabs-tab[aria-selected="true"] { + border-bottom: 1px solid rgb(50, 50, 50); + background-color: rgb(50, 50, 50); + } +} + +/* Explicit dark theme styling */ + +body[data-theme="dark"] .sphinx-tabs-panel { + color: white; + background-color: rgb(50, 50, 50); +} + +body[data-theme="dark"] .sphinx-tabs-tab { + color: white; + background-color: rgba(255, 255, 255, 0.05); +} + +body[data-theme="dark"] .sphinx-tabs-tab[aria-selected="true"] { + border-bottom: 2px solid rgb(50, 50, 50); + background-color: rgb(50, 50, 50); +} diff --git a/_static/tabs.js b/_static/tabs.js new file mode 100644 index 000000000..48dc303c8 --- /dev/null +++ b/_static/tabs.js @@ -0,0 +1,145 @@ +try { + var session = window.sessionStorage || {}; +} catch (e) { + var session = {}; +} + +window.addEventListener("DOMContentLoaded", () => { + const allTabs = document.querySelectorAll('.sphinx-tabs-tab'); + const tabLists = document.querySelectorAll('[role="tablist"]'); + + allTabs.forEach(tab => { + tab.addEventListener("click", changeTabs); + }); + + tabLists.forEach(tabList => { + tabList.addEventListener("keydown", keyTabs); + }); + + // Restore group tab selection from session + const lastSelected = session.getItem('sphinx-tabs-last-selected'); + if (lastSelected != null) selectNamedTabs(lastSelected); +}); + +/** + * Key focus left and right between sibling elements using arrows + * @param {Node} e the element in focus when key was pressed + */ +function keyTabs(e) { + const tab = e.target; + let nextTab = null; + if (e.keyCode === 39 || e.keyCode === 37) { + tab.setAttribute("tabindex", -1); + // Move right + if (e.keyCode === 39) { + nextTab = tab.nextElementSibling; + if (nextTab === null) { + nextTab = tab.parentNode.firstElementChild; + } + // Move left + } else if (e.keyCode === 37) { + nextTab = tab.previousElementSibling; + if (nextTab === null) { + nextTab = tab.parentNode.lastElementChild; + } + } + } + + if (nextTab !== null) { + nextTab.setAttribute("tabindex", 0); + nextTab.focus(); + } +} + +/** + * Select or deselect clicked tab. If a group tab + * is selected, also select tab in other tabLists. + * @param {Node} e the element that was clicked + */ +function changeTabs(e) { + // Use this instead of the element that was clicked, in case it's a child + const notSelected = this.getAttribute("aria-selected") === "false"; + const positionBefore = this.parentNode.getBoundingClientRect().top; + const notClosable = !this.parentNode.classList.contains("closeable"); + + deselectTabList(this); + + if (notSelected || notClosable) { + selectTab(this); + const name = this.getAttribute("name"); + selectNamedTabs(name, this.id); + + if (this.classList.contains("group-tab")) { + // Persist during session + session.setItem('sphinx-tabs-last-selected', name); + } + } + + const positionAfter = this.parentNode.getBoundingClientRect().top; + const positionDelta = positionAfter - positionBefore; + // Scroll to offset content resizing + window.scrollTo(0, window.scrollY + positionDelta); +} + +/** + * Select tab and show associated panel. + * @param {Node} tab tab to select + */ +function selectTab(tab) { + tab.setAttribute("aria-selected", true); + + // Show the associated panel + document + .getElementById(tab.getAttribute("aria-controls")) + .removeAttribute("hidden"); +} + +/** + * Hide the panels associated with all tabs within the + * tablist containing this tab. + * @param {Node} tab a tab within the tablist to deselect + */ +function deselectTabList(tab) { + const parent = tab.parentNode; + const grandparent = parent.parentNode; + + Array.from(parent.children) + .forEach(t => t.setAttribute("aria-selected", false)); + + Array.from(grandparent.children) + .slice(1) // Skip tablist + .forEach(panel => panel.setAttribute("hidden", true)); +} + +/** + * Select grouped tabs with the same name, but no the tab + * with the given id. + * @param {Node} name name of grouped tab to be selected + * @param {Node} clickedId id of clicked tab + */ +function selectNamedTabs(name, clickedId=null) { + const groupedTabs = document.querySelectorAll(`.sphinx-tabs-tab[name="${name}"]`); + const tabLists = Array.from(groupedTabs).map(tab => tab.parentNode); + + tabLists + .forEach(tabList => { + // Don't want to change the tabList containing the clicked tab + const clickedTab = tabList.querySelector(`[id="${clickedId}"]`); + if (clickedTab === null ) { + // Select first tab with matching name + const tab = tabList.querySelector(`.sphinx-tabs-tab[name="${name}"]`); + deselectTabList(tab); + selectTab(tab); + } + }) +} + +if (typeof exports === 'undefined') { + exports = {}; +} + +exports.keyTabs = keyTabs; +exports.changeTabs = changeTabs; +exports.selectTab = selectTab; +exports.deselectTabList = deselectTabList; +exports.selectNamedTabs = selectNamedTabs; diff --git a/api.html b/api.html new file mode 100644 index 000000000..4a14f140d --- /dev/null +++ b/api.html @@ -0,0 +1,627 @@ + + + + + + + <no title> — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ + + + + + + +

reV

The Renewable Energy Potential Model

+ + +
+
+
+ +
+ +
+

© Copyright 2018, Alliance for Sustainable Energy, LLC.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/genindex.html b/genindex.html new file mode 100644 index 000000000..dae1ee0a8 --- /dev/null +++ b/genindex.html @@ -0,0 +1,6483 @@ + + + + + + Index — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ + +

Index

+ +
+ Symbols + | _ + | A + | B + | C + | D + | E + | F + | G + | H + | I + | J + | K + | L + | M + | N + | O + | P + | Q + | R + | S + | T + | U + | V + | W + | Y + +
+

Symbols

+ + + +
+ +

_

+ + + +
+ +

A

+ + + +
+ +

B

+ + + +
+ +

C

+ + + +
+ +

D

+ + + +
+ +

E

+ + + +
+ +

F

+ + + +
+ +

G

+ + + +
+ +

H

+ + + +
+ +

I

+ + + +
+ +

J

+ + + +
+ +

K

+ + +
+ +

L

+ + + +
+ +

M

+ + + +
+ +

N

+ + + +
+ +

O

+ + + +
+ +

P

+ + + +
+ +

Q

+ + + +
+ +

R

+ + + +
+ +

S

+ + + +
+ +

T

+ + + +
+ +

U

+ + + +
+ +

V

+ + + +
+ +

W

+ + + +
+ +

Y

+ + + +
+ + + +
+
+
+ +
+ +
+

© Copyright 2018, Alliance for Sustainable Energy, LLC.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/index.html b/index.html new file mode 100644 index 000000000..69e0bc8eb --- /dev/null +++ b/index.html @@ -0,0 +1,810 @@ + + + + + + + reV documentation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+
+
+

reV documentation

+
+

What is reV?

+

reV (the Renewable Energy Potential model) +is an open-source geospatial techno-economic tool that +estimates renewable energy technical potential (capacity and generation), +system cost, and supply curves for solar photovoltaics (PV), +concentrating solar power (CSP), geothermal, and wind energy. +reV allows researchers to include exhaustive spatial representation +of the built and natural environment into the generation and cost estimates +that it computes.

+

reV is highly dynamic, allowing analysts to assess potential at varying levels +of detail — from a single site up to an entire continent at temporal resolutions +ranging from five minutes to hourly, spanning a single year or multiple decades. +The reV model can (and has been used to) provide broad coverage across large spatial +extents, including North America, South and Central Asia, the Middle East, South America, +and South Africa to inform national and international-scale analyses. Still, reV is +equally well-suited for regional infrastructure and deployment planning and analysis.

+

For a detailed description of reV capabilities and functionality, see the +NREL reV technical report.

+
+
+

How does reV work?

+

reV is a set of Python classes and functions +that can be executed on HPC systems using CLI commands. +A full reV execution consists of one or more compute modules +(each consisting of their own Python class/CLI command) +strung together using a pipeline framework, +or configured using batch.

+

A typical reV workflow begins with input wind/solar/geothermal resource data +(following the rex data format) +that is passed through the generation module. This output is then collected across space and time +(if executed on the HPC), before being sent off to be aggregated under user-specified land exclusion scenarios. +Exclusion data is typically provided via a collection of high-resolution spatial data layers stored in an HDF5 file. +This file must be readable by reV’s +ExclusionLayers +class. See the reVX Setbacks utility +for instructions on generating setback exclusions for use in reV. +Next, transmission costs are computed for each aggregated +“supply-curve point” using user-provided transmission cost tables. +See the reVX transmission cost calculator utility +for instructions on generating transmission cost tables. +Finally, the supply curves and initial generation data can be used to +extract representative generation profiles for each supply curve point.

+

A visual summary of this process is given below:

+Typical reV workflow +
+

+
+

To get up and running with reV, first head over to the installation page, +then check out some of the Examples or +go straight to the CLI Documentation! +You can also check out the guide on running GAPs models.

+
+
+

Installing reV

+

NOTE: The installation instruction below assume that you have python installed +on your machine and are using conda +as your package/environment manager.

+ + +
+
+

reV command line tools

+ +
+

Launching a run

+

Tips

+ +
reV -c "/scratch/user/rev/config_pipeline.json" pipeline
+
+
+
    +
  • Running simply generation or econ can just be done from the console:

  • +
+
reV -c "/scratch/user/rev/config_gen.json" generation
+
+
+
+
+

General Run times and Node configuration on Eagle

+
    +
  • WTK Conus: 10-20 nodes per year walltime 1-4 hours

  • +
  • NSRDB Conus: 5 nodes walltime 2 hours

  • +
+

Eagle node requests

+
+
+ +
+ + +
+
+
+ +
+ +
+

© Copyright 2018, Alliance for Sustainable Energy, LLC.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.advanced_econ_modeling.html b/misc/examples.advanced_econ_modeling.html new file mode 100644 index 000000000..4f392737a --- /dev/null +++ b/misc/examples.advanced_econ_modeling.html @@ -0,0 +1,655 @@ + + + + + + + SAM Single Owner Modeling — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

SAM Single Owner Modeling

+

This example set shows how several of the reV features (batching, pipeline, +site-data) can be used in concert to create complex spatially-variant economic +analyses.

+

This example modifies the tax rate and PPA price inputs for each state. +More complex input sets on a site-by-site basis can be easily generated using a +similar site_data input method.

+
+

Workflow Description

+

The batching config in this example represents the high-level executed module. +The user executes the following command:

+
reV batch -c "../config_batch.json"
+
+
+

This creates and executes three batch job pipelines. You should be able to see +in config_batch.json how the actual input generation files are +parameterized. This is the power of the batch module - it’s sufficiently +generic to modify ANY key-value pairs in any .json file, including other +config files.

+

The first module executed in each job pipeline is the econ module. This example +shows how the site-specific input .csv can be used (see the “site_data” key +in the config_econ.json file).

+

The site_data.csv file sets site-specific input data corresponding to the +gids in the project points file. Data inputs keyed by each column header in the +site_data.csv file will be added to or replace an input in the +“tech_configs” .json files (sam_files).

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.aws_pcluster.html b/misc/examples.aws_pcluster.html new file mode 100644 index 000000000..bd74c88f1 --- /dev/null +++ b/misc/examples.aws_pcluster.html @@ -0,0 +1,910 @@ + + + + + + + Running reV on an AWS Parallel Cluster — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Running reV on an AWS Parallel Cluster

+

reV was originally designed to run on the NREL high performance computer (HPC), but you can now run reV on AWS using the NREL renewable energy resource data (the NSRDB and WTK) that lives on S3. This example will guide you through how to set up reV on an AWS HPC environment with dynamically scaled EC2 compute resources and input resource data sourced from S3 via HSDS.

+

If you plan on only running reV for a handful of sites (less than 100), first check out our running with HSDS example, which will be a lot easier to get started with. Larger reV jobs require you stand up your own AWS parallel cluster and HSDS server. Very small jobs can be run locally using the NREL HSDS developer API.

+

Note that everything should be done in AWS region us-west-2 (Oregon) since this is where the NSRDB and WTK data live on S3.

+
+

Setting up an AWS Parallel Cluster

+
    +
  1. Get started with the AWS HPC Overview.

  2. +
  3. Set up a Cloud9 IDE.

  4. +
  5. Set up an AWS Parallel Cluster.

    +
    +
      +
    1. Use the rev-pcluster-config.ini file as an example.

    2. +
    3. Choose a basic instance for head node (master_instance_type), e.g. t2.micro, t2.large, c5.large, or c5.xlarge. Note that a t2 instance is free-tier eligible and is probably sufficient for the pcluster login node which will not be doing any of the compute or storage.

    4. +
    5. Choose a shared EBS storage volume (this is the /shared/ volume) with a “gp2” (volume_type) which can have SSD storage ranging from 1GB-16TB (volume_size).

    6. +
    +
    +
  6. +
  7. Optional, set up an HPC parallel filesystem.

    +
    +
      +
    • Seems like EBS is probably fine and FSx is unnecessary for non-IO-intensive reV modules. Generation will retrieve resource data from HSDS and so is probably fine with EBS. SC-aggregation is probably fine with EBS if you set pre_extract_inclusions=True.

    • +
    +
    +
  8. +
  9. Login to your cluster from your cloud9 IDE: pcluster ssh pcluster_name -i ~/.ssh/lab-3-key

  10. +
  11. Get Miniconda, install, and activate your conda environment.

    +
    +
      +
    1. wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh

    2. +
    3. sh Miniconda3-latest-Linux-x86_64.sh

    4. +
    5. source ~/.bashrc

    6. +
    +
    +
  12. +
  13. Set up an HSDS service. At this time, it is recommended that you use HSDS Local Servers on your compute cluster. See instructions below for details.

  14. +
  15. Install reV

    +
    +
      +
    1. You need to clone the reV repo to get the aws_pcluster example files. reV example files do not ship with the pypi package.

    2. +
    3. You will have to first add the pcluster public ssh key (cat ~/.ssh/id_rsa.pub) to your github ssh keys.

    4. +
    5. Put the reV repo in the /shared/ volume so that the aws_pcluster project directory is in the large EBS storage volume shared between compute nodes.

    6. +
    7. cd /shared/

    8. +
    9. git clone git@github.com:NREL/reV.git

    10. +
    11. cd /shared/reV/

    12. +
    13. pip install -e .

    14. +
    +
    +
  16. +
  17. Try running the reV aws_pcluster example:

    +
    +
      +
    1. cd /shared/reV/examples/aws_pcluster

    2. +
    3. reV pipeline -c config_pipeline.json

    4. +
    5. Check the slurm queue with squeue and the compute cluster status in the ec2 console or with sinfo

    6. +
    7. Jobs will be in state CF (configuring) while the nodes spin up (this can take several minutes) and then R (running)

    8. +
    +
    +
  18. +
+
+
+

Notes on Running reV in the AWS Parallel Cluster

+
    +
  1. If you don’t configure a custom HSDS Service you will almost certainly see 503 errors from too many requests being processed. See the instructions below to configure an HSDS Service.

  2. +
  3. AWS EC2 instances usually have twice as many vCPUs as physical CPUs due to a default of two threads per physical CPU (at least for the c5 instances) (see disable_hyperthreading = false). The pcluster framework treats each thread as a “node” that can accept one reV job. For this reason, it is recommended that you scale the "nodes" entry in the reV generation config file but keep "max_workers": 1. For example, if you use two c5.2xlarge instances in your compute fleet, this is a total of 16 vCPUs, each of which can be thought of as a HPC “node” that can run one process at a time.

  4. +
  5. If you setup an HSDS local server but the parallel cluster ends up sending too many requests (some nodes but not all will see 503 errors), you can try upping the max_task_count in the ~/hsds/admin/config/override.yml file.

  6. +
  7. If your HSDS local server nodes run out of memory (monitor with docker stats), you can try upping the dn_ram or sn_ram options in the ~/hsds/admin/config/override.yml file.

  8. +
  9. The best way to stop your pcluster is using pcluster stop pcluster_name from the cloud9 IDE (not ssh’d into the pcluster) and then stop the login node in the AWS Console EC2 interface (find the “master” node and stop the instance). This will keep the EBS data intact and not charge you for EC2 costs. When you’re done with the pcluster you can call pcluster delete pcluster_name but this will also delete all of the EBS data.

  10. +
+
+
+

Setting up HSDS Local Servers on your Compute Cluster

+

The current recommended approach for setting up an HSDS service for reV is to start local HSDS servers on your AWS parallel cluster compute nodes. These instructions set up a shell script that each reV compute job will run on its respective compute node. The shell script checks that an HSDS local server is running, and will start one if not. These instructions are generally copied from the HSDS AWS README with a few modifications.

+
    +
  1. Make sure you have installed Miniconda but have not yet installed reV/rex.

  2. +
  3. Clone the HSDS Repository. into your home directory in the pcluster login node: git clone git@github.com:HDFGroup/hsds.git (you may have to set up your ssh keys first).

  4. +
  5. Install HSDS by running python setup.py install from the hsds repository folder (running python setup.py install is currently required as the setup script does some extra magic over a pip installation).

  6. +
  7. Copy the password file: cp ~/hsds/admin/config/passwd.default ~/hsds/admin/config/passwd.txt and (optionally) modify any username/passwords you wish.

  8. +
  9. Create an HSDS config file at ~/.hscfg with the following entries:

    +
    +
    # Local HSDS server
    +hs_endpoint = http://localhost:5101
    +hs_username = admin
    +hs_password = admin
    +hs_api_key = None
    +hs_bucket = nrel-pds-hsds
    +
    +
    +
    +
  10. +
  11. Copy the start_hsds.sh script from this example (source file) to your home directory in the pcluster login node (e.g. cp /shared/reV/examples/aws_pcluster/start_hsds.sh ~/).

  12. +
  13. Replace the following environment variables in start_hsds.sh with your values: AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, and BUCKET_NAME (note that you should use AWS keys from an IAM user with admin privileges and not your AWS console root user).

  14. +
  15. Optional, to test your HSDS local server config, do the following:

    +
    +
      +
    1. Run the start script: sh ~/start_hsds.sh

    2. +
    3. Run docker ps and verify that there are 4 or more HSDS services active (hsds_rangeget_1, hsds_sn_1, hsds_head_1, and an hsds_dn_* node for every available core)

    4. +
    5. Run hsinfo and verify that this doesn’t throw an error

    6. +
    7. Try running pip install h5pyd and then run the the h5pyd test (either the .py in this example or the h5pyd test snippet below).

    8. +
    +
    +
  16. +
  17. Make sure this key-value pair is set in the execution_control block of the config_gen.json file: "sh_script": "sh ~/start_hsds.sh"

  18. +
  19. Optional, copy the config override file: cp ~/hsds/admin/config/config.yml ~/hsds/admin/config/override.yml, update any config lines in the override.yml file that you wish to change, and remove all other lines (see notes on max_task_count and dn_ram).

  20. +
  21. You should be good to go! The line in the generation config file makes reV run the start_hsds.sh script before running the reV job. The script will install docker and make sure one HSDS server is running per EC2 instance.

  22. +
+
+
+

Setting up an HSDS Kubernetes Service

+

Setting up your own HSDS Kubernetes service is one way to run a large reV job with full parallelization. This has not been trialed by the NREL team in full, but we have tested on the HSDS group’s Kubernetes cluster. If you want to pursue this route, you can follow the HSDS repository instructions for HSDS Kubernetes on AWS.

+
+
+

Setting up an HSDS Lambda Service

+

We’ve tested AWS Lambda functions as the HSDS service for reV workflows and we’ve found that Lambda functions require too much overhead to work well with the reV workflow. These instructions are included here for posterity, but HSDS-Lambda is _not_ recommended for the reV workflow.

+

These instructions are generally copied from the HSDS Lambda README with a few modifications.

+

It seems you cannot currently use the public ECR container image from the HSDS ECR repo so the first few bullets are instructions on how to set up your own HSDS image and push to a private ECR repo.

+

H5pyd cannot currently call a lambda function directly, so the instructions at the end show you how to set up an API gateway that interfaces between h5pyd and the lambda function.

+

Follow these instructions from your Cloud9 environment. None of this is directly related to the pcluster environment, except for the requirement to add the .hscfg file in the pcluster home directory.

+
    +
  1. Clone the HSDS repository into your Cloud9 environment.

  2. +
  3. You may need to resize your EBS volume.

  4. +
  5. In the AWS Management Console, create a new ECR repository called “hslambda”. Keep the default private repo settings.

  6. +
  7. Create an HSDS image and push to your hslambda ECR repo. This sublist is a combination of commands from the ECR push commands and the HSDS build instructions (make sure you use the actual push commands from your ECR repo with the actual region, repository name, and aws account id):

    +
    +
      +
    1. cd hsds

    2. +
    3. aws ecr get-login-password --region region | docker login --username AWS --password-stdin aws_account_id.dkr.ecr.region.amazonaws.com

    4. +
    5. sh lambda_build.sh

    6. +
    7. docker tag hslambda:latest aws_account_id.dkr.ecr.region.amazonaws.com/my-repository:tag

    8. +
    9. docker push aws_account_id.dkr.ecr.region.amazonaws.com/my-repository:tag

    10. +
    +
    +
  8. +
  9. You should now see your new image appear in your hslambda ECR repo in the AWS Console. Get the URI from this image.

  10. +
  11. In the AWS Management Console, go to the Lambda service interface in your desired region (us-west-2, Oregon).

  12. +
  13. Click “Create Function” -> Choose “Container Image” option, function name is hslambda, use the Container Image URI from the image you just uploaded to your ECR repo, select “Create Function” and wait for the image to load.

  14. +
  15. You should see a banner saying you’ve successfully created the hslambda function. Yay!

  16. +
  17. Set the following in the configuration tab:

    +
    +
      +
    1. Use at least 1024MB of memory (feel free to tune this later for your workload)

    2. +
    3. Timeout of at least 30 seconds (feel free to tune this later for your workload)

    4. +
    5. Use an execution role that includes S3 read only access

    6. +
    7. Add an environment variable AWS_S3_GATEWAY: http://s3.us-west-2.amazonaws.com

    8. +
    +
    +
  18. +
  19. Select the “Test” tab and click on the “Test” button. You should see a successful run with a status_code of 200 and an output like this:

    +
    +
    {
    +  "isBase64Encoded": false,
    +  "statusCode": 200,
    +  "headers": "{\"Content-Type\": \"application/json; charset=utf-8\", \"Content-Length\": \"323\", \"Date\": \"Tue, 23 Nov 2021 22:27:08 GMT\", \"Server\": \"Python/3.8 aiohttp/3.8.1\"}",
    +  "body": "{\"start_time\": 1637706428, \"state\": \"READY\", \"hsds_version\": \"0.7.0beta\", \"name\": \"HSDS on AWS Lambda\", \"greeting\": \"Welcome to HSDS!\", \"about\": \"HSDS is a webservice for HDF data\", \"node_count\": 1, \"dn_urls\": [\"http+unix://%2Ftmp%2Fhs1a1c917f%2Fdn_1.sock\"], \"dn_ids\": [\"dn-001\"], \"username\": \"anonymous\", \"isadmin\": false}"
    +}
    +
    +
    +
    +
  20. +
  21. Now we need to create an API Gateway so that reV and h5pyd can interface with the lambda function. Go to the API Gateway page in the AWS console and do these things:

    +
    +
      +
    1. Create API -> choose HTTP API (build)

    2. +
    3. Add integration -> Lambda -> use us-west-2, select your lambda function, use some generic name like hslambda-api

    4. +
    5. Configure routes -> Method is ANY, the Resource path is $default, the integration target is your lambda function

    6. +
    7. Configure stages -> Stage name is $default and auto-deploy must be enabled

    8. +
    9. Create and get the API’s Invoke URL, something like https://XXXXXXX.execute-api.us-west-2.amazonaws.com

    10. +
    +
    +
  22. +
  23. Make a .hscfg file in the home dir (/home/ec2-user/) in your Cloud9 env. Make sure you also have this config in your pcluster filesystem. The config file should have these entries:

    +
    +
    # HDFCloud configuration file
    +hs_endpoint = https://XXXXXXX.execute-api.us-west-2.amazonaws.com
    +hs_username = hslambda
    +hs_password = lambda
    +hs_api_key = None
    +hs_bucket = nrel-pds-hsds
    +
    +
    +
    +
  24. +
  25. All done! You should now be able to run the aws_pcluster test sourcing data from /nrel/nsrdb/v3/nsrdb_{}.h5 or the simple h5pyd test below.

  26. +
  27. Here are some summary notes for posterity:

    +
    +
      +
    1. We now have a lambda function hslambda that will retrieve data from the NSRDB or WTK using the HSDS service.

    2. +
    3. We have an API Gateway that we can use as an endpoint for API requests

    4. +
    5. We have configured h5pyd with the .hscfg file to hit that API endpoint with the proper username, password, and bucket target

    6. +
    7. reV will now retrieve data from the NSRDB or WTK in parallel requests to the hslambda function via h5pyd.

    8. +
    9. Woohoo! We did it!

    10. +
    +
    +
  28. +
+
+
+

Simple H5PYD Test

+

Here’s a simple h5pyd test to make sure you can retrieve data from the NSRDB/WTK via HSDS. This python example should return a numpy.ndarray object with shape (17520,). Obviously you will need to install python and h5pyd before running this test.

+
from rex import init_logger
+import h5pyd
+import logging
+
+if __name__ == '__main__':
+    logger = logging.getLogger(__name__)
+    init_logger(__name__, log_level='DEBUG')
+    fp = '/nrel/nsrdb/v3/nsrdb_2019.h5'
+    with h5pyd.File(fp, logger=__name__) as f:
+        data = f['ghi'][:, 0]
+    print(data)
+    print(type(data))
+    print(data.shape)
+
+
+
+
+

Compute Cost Estimates

+

Here are some initial compute cost results and estimates for running reV generation (the largest compute module in reV). All estimates are only for EC2 compute costs based on c5.2xlarge instances at the on-demand price of $0.34 per hour. These numbers are rough estimates! Consider making your own estimates before developing a budget. The EC2 costs could be reduced significantly if running in the EC2 spot market (see how to configure pcluster spot pricing here. The sites_per_worker input in the config_gen.json file will also influence the computational efficiency.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
reV PCluster Compute Costs (Empirical)

Compute Module

Timesteps

Sites

Total Datum

Total Compute Time (hr)

Total EC2 Cost

Cost per Datum

PVWattsv7

35088

1850

6.49e7

3.4

$1.15

1.77e-8

Windpower

17544

6268

1.10e8

1.2

$0.42

3.79e-09

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CONUS Compute Costs (Estimated)

Compute Module

Source Data

Timesteps (one year)

Sites

Total Datum

Total Compute Time (hr)

Total EC2 Cost

PVWattsv7

NSRDB (4km, 30min)

17520

~5e05

8.76e9

457.12

$155.42

Windpower

WTK (2km, 1hr)

8760

~2e6

1.75e10

195.21

$66.37

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.batched_execution.html b/misc/examples.batched_execution.html new file mode 100644 index 000000000..ada891dc8 --- /dev/null +++ b/misc/examples.batched_execution.html @@ -0,0 +1,689 @@ + + + + + + + Batched Execution — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Batched Execution

+

This example set shows how reV inputs can be parameterized and the execution +can be batched.

+
+

Batching Config Description

+
+
    +
  • “sets” in the batch config is a list of batches.

  • +
  • Each “batch” is a dictionary containing “args” and “files”.

  • +
  • “args” are the key/value pairs from which the batching combinations will be +made. Each unique combination of args represents a job. If two args are +specified with three possible values each, nine jobs will be run.

  • +
  • The unique combinations of “args” will be replaced in all files in the +“files” list. The arg must already exist in the file for the new values to +be inserted. The replacement is done recursively.

  • +
  • Batch jobs will be assigned names based on the args. Accordingly, the name +field specification should be omitted in all configs.

  • +
+
+
+
+

How to Run

+

Before submitting batch jobs, it is sometimes useful to perform a “dry-run” +which will create all of the batch sub directories without submitting jobs to +SLURM:

+
reV batch -c "../config_batch.json" --dry-run
+
+
+

Once you are happy with the dry-run, or if you are confident in your job setup, +you can submit all batch jobs using the following CLI call:

+
reV batch -c "../config_batch.json"
+
+
+

If anything goes wrong, you can cancel all batch jobs using the command:

+
reV batch -c "../config_batch.json" --cancel
+
+
+

New sub directories will be created in the folder with the batch config file +for each sub job. All job files in the same directory (and sub directories) as +the batch config file will be copied into the job folders. The reV pipeline +manager will be executed in each sub directory. The above batch cli command +can be issues repeatedly to clean up the sub directory status .jsons, +kick off the next step in the pipeline, or to rerun failed jobs. See the reV +pipeline execution example for more details on how the pipeline works.

+

You can also have the batch module submit pipeline monitoring background +processes using the --monitor-background flag as shown below.

+

Please note that the stdout/stderr of the background processes will not be +captured, so it’s important to set the log_file argument in the pipeline +config.

+
reV batch -c "../config_batch.json" --monitor-background
+
+
+

All of the batch jobs can be collected into a single file using the multi-year +collection utility. This utility is not part of the batch pipeline and needs to +be executed and configured separately. See the config_multi-year.json file +for details on how to setup this collection step. To execute, use the following +command:

+
reV multi-year -c "../config_multi-year.json"
+
+
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.eagle_node_requests.html b/misc/examples.eagle_node_requests.html new file mode 100644 index 000000000..3e415de09 --- /dev/null +++ b/misc/examples.eagle_node_requests.html @@ -0,0 +1,668 @@ + + + + + + + Eagle Node Requests — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Eagle Node Requests

+

When running reV on Eagle, it’s only necessary to specify the allocation and +the walltime. The partition will be chosen automatically and you will be given +access to the node’s full memory. So a default execution control block in the +config .json for the standard partition should look like the following:

+
"execution_control": {
+    "allocation": "rev",
+    "nodes": 5,
+    "option": "eagle",
+    "walltime": 10.0
+    },
+
+
+

A node request with high priority in the bigmem partition should look like the +following:

+
"execution_control": {
+    "allocation": "rev",
+    "feature": "-p bigmem",
+    "qos": "high",
+    "nodes": 5,
+    "option": "eagle",
+    "walltime": 10.0
+    },
+
+
+

A node request with high priority in the short partition with a 192 GB node +should look like the following:

+
"execution_control": {
+    "allocation": "rev",
+    "qos": "high",
+    "memory": 192,
+    "nodes": 5,
+    "option": "eagle",
+    "walltime": 4.0
+    },
+
+
+

Note that the way SLURM does memory allocations, if the memory is requested +explicitly in the config .json and a larger node is received, the user can +only use memory up to the requested memory value.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.full_pipeline_execution.html b/misc/examples.full_pipeline_execution.html new file mode 100644 index 000000000..b32b52e33 --- /dev/null +++ b/misc/examples.full_pipeline_execution.html @@ -0,0 +1,691 @@ + + + + + + + Full Pipeline Execution — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Full Pipeline Execution

+

This set of example files demonstrates how to run the full reV pipeline using +the pipeline manager.

+

The full pipeline can be executed using the following CLI call:

+
reV pipeline -c ./config_pipeline.json
+
+
+

You can also use the --monitor flag to continuously monitor the pipeline +and submit jobs for the next pipeline step when the current pipeline step is +complete:

+
reV pipeline -c ./config_pipeline.json --monitor
+
+
+

The continuous monitoring will stop when the full pipeline completes +successfully or if any part of a pipeline step fails. The continuous monitoring +can also be run in a nohup background process by +adding the --background flag:

+
reV pipeline -c ./config_pipeline.json --monitor --background
+
+
+

It’s important to note that background monitoring will not capture the +stdout/stderr, so you should set the log_file argument in the pipeline +config json file to log any important messages from the pipeline module.

+

Finally, if anything goes wrong you can cancel all the pipeline jobs using +the --cancel flag:

+
reV pipeline -c ./config_pipeline.json --cancel
+
+
+
+

Pipeline Input Requirements

+

The reV pipeline manager will perform several checks to ensure the following +input requirements are satisfied. These requirements are necessary to track the +pipeline status and to pipe i/o through the modules.

+
    +
  1. All pipeline modules must have the same output directory.

  2. +
  3. Only one pipeline can be run per output directory.

  4. +
  5. Each module run by the pipeline must have a unique job name (not specifying +a name in the configs is preferred, and will use the directory name plus a +suffix for each module).

  6. +
+
+
+

Failed Jobs

+

The pipeline manager will keep a status of jobs that are submitted, running, +successful, or failed. If any jobs fail in a pipeline step, the pipeline will +wait until all jobs in that step have completed, then raise a failed message. +Error messages can be found in the stdout/stderr files belonging to the +respective failed job(s). The user can re-submit the full pipeline job and +only the jobs that failed will be re-run. If full modules had previously +finished successfully, those modules will be skipped.

+
+
+

File Inputs

+

There are several files beyond the NSRDB resource data used in this example +that are too big to be stored on github:

+
    +
  1. conus_trans_lines_cache_064_sj_infsink.csv in +config_supply-curve.json is a transmission feature table from the reV +database.

  2. +
  3. rev_conus_exclusions.h5 in config_aggregation.json is an h5 +exclusions file containing exclusion layers for CONUS.

  4. +
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.html b/misc/examples.html new file mode 100644 index 000000000..0be0f670f --- /dev/null +++ b/misc/examples.html @@ -0,0 +1,699 @@ + + + + + + + Examples — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/misc/examples.marine_energy.html b/misc/examples.marine_energy.html new file mode 100644 index 000000000..1ab870cd2 --- /dev/null +++ b/misc/examples.marine_energy.html @@ -0,0 +1,645 @@ + + + + + + + reV Marine Energy — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV Marine Energy

+

This example leverages the new SAM marine hydrokinetic (MHK) energy models, the +NREL hindcast wave data, the MHK cost models in NRWAL, and the integration of +everything in to reV for large scale spatiotemporal analysis. The configs in +this example run a batched project that estimates the spatiotemporal capacity +factors and costs associated with three common wave energy reference models in +the Atlantic and Pacific Oceans.

+

National Renewable Energy Laboratory. (2020). High Resolution Ocean Surface +Wave Hindcast (US Wave) Data [data set]. Retrieved from +https://dx.doi.org/10.15473/1647329.

+

NRWAL Marine Hydrokinetic Energy Cost Models

+

PySAM MHK Wave Energy Model

+
+

Plots of the Example Marine Energy Output

+../_images/lcoe_fcr_atlantic_rm5.png +../_images/lcoe_fcr_pacific_rm5.png +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.offshore_wind.html b/misc/examples.offshore_wind.html new file mode 100644 index 000000000..4805ddb2b --- /dev/null +++ b/misc/examples.offshore_wind.html @@ -0,0 +1,666 @@ + + + + + + + Offshore Wind Modeling — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Offshore Wind Modeling

+

This example includes configs to run a reV wind analysis for a small test +extent off the east coast. This example is only meant to demonstrate how to set +up an offshore wind LCOE analysis using reV + NRWAL. Note that some inputs and +configurations are purely fictitious and should not be used in a real analysis. +For example, this test case models the same turbine onshore and offshore. The +substructure for the offshore turbines are also assumed to always be a floating +semi-submersible which is not realistic, especially for shallow waters.

+
+

reV Offshore Module Description

+

The pipeline includes the reV-NRWAL module (replaced the historical +reV-offshore module), which is run after the generation module. The offshore +module takes the gross generation (gross capacity factor, set offshore turbine +losses to zero!) and uses NRWAL to calculate generation losses and LCOE.

+

Example NRWAL configs slightly modified for use with reV can be seen in this +example. The primary modification for usage in reV is that NRWAL typically +calculates the grid connection cost with the “grid” equations. Currently, reV +uses NRWAL to calculate the array and export (to shore) tranmission costs and +then uses the supply curve transmission cost tables to calculate the grid +connection costs.

+
+
+

Treatment of Offshore Points in Supply Curve

+

Offshore points are treated identically to onshore points in the supply curve +(not run here). All resource pixels maintain their source resolution (usually +the 2km WTK resolution) until the reV aggregation step, where exclusions are +applied and the data is aggregated up to the supply curve grid. Supply curve +tranmission cost tables must include transmission costs for offshore supply +curve points. There is no seperate or special handling of offshore supply curve +transmission connection.

+
+
+

Plots of the Example Offshore Output

+../_images/mean_cf.png +../_images/mean_ws_mean-means.png +../_images/mean_lcoe.png +../_images/mean_depth.png +
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.project_points.html b/misc/examples.project_points.html new file mode 100644 index 000000000..7bb863b2e --- /dev/null +++ b/misc/examples.project_points.html @@ -0,0 +1,802 @@ + + + + + + + reV Project Points — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV Project Points

+

reV Gen +and reV Econ +use Project Points to define which resource sites (gids) to run through +PySAM and how.

+

At its most basic Project Points consists of the resource gid``s and the +``SAM configuration file associated it. This can be definited in a variety +of ways:

+
    +
  1. From a project points .csv and a single or dictionary of SAM +configuration files:

  2. +
+
import os
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+
+fpp = os.path.join(TESTDATADIR, 'project_points/pp_offshore.csv')
+sam_files = {'onshore': os.path.join(
+             TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json'),
+             'offshore': os.path.join(
+             TESTDATADIR, 'SAM/wind_gen_standard_losses_1.json')}
+
+pp = ProjectPoints(fpp, sam_files)
+display(pp.df)
+
+            gid   config
+0       2114919  onshore
+1       2114920  onshore
+2       2114921  onshore
+3       2114922  onshore
+4       2114923  onshore
+...         ...      ...
+124402  2239321  onshore
+124403  2239322  onshore
+124404  2239323  onshore
+124405  2239324  onshore
+124406  2239325  onshore
+
+[124407 rows x 2 columns]
+
+
+
    +
  1. From a list or slice of gids and a single SAM configuration file:

  2. +
+
import os
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+
+sites = slice(0, 100)  # or
+sites = [0, 5, 6, 9, 12]
+
+sam_file = os.path.join(TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json')
+
+pp = ProjectPoints(sites, sam_file)
+display(pp.df)
+
+   gid                                             config
+0    0  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+1    5  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+2    6  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+3    9  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+4   12  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+
+
+
    +
  1. From a pair or pairs of latitude and longitude coordinates and a single +SAM configuration file (NOTE: access to the resource file to be used +for reV Gen or reV Econ is needed to find the associated resource +gids):

  2. +
+
import os
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+
+lat_lons = [41.77, -71.74]
+lat_lons = array([[ 41.77, -71.74],
+                  [ 41.73, -71.7 ],
+                  [ 42.01, -71.7 ],
+                  [ 40.97, -71.74],
+                  [ 41.49, -71.78]])
+
+res_file = os.path.join(TESTDATADIR, 'nsrdb/', 'ri_100_nsrdb_2012.h5')
+sam_file = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json')
+
+pp = ProjectPoints.lat_lon_coords(lat_lons, res_file, sam_file)
+display(pp.df)
+
+   gid                                             config
+0   49  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+1   67  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+2   79  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+3   41  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+4   31  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+
+
+
    +
  1. A geographic region or regions and a single SAM configuration file +(NOTE: access to the resource file to be used for reV Gen or +reV Econ is needed to find the associated resource gids):

  2. +
+
import os
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+
+# Of form {region : region_column}
+regions = {'Rhode Island': 'state'}  # or
+regions = {'Providence': 'county', 'Kent': 'county'}
+
+res_file = os.path.join(TESTDATADIR, 'nsrdb/', 'ri_100_nsrdb_2012.h5')
+sam_file = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json')
+
+pp = ProjectPoints.regions(regions, res_file, sam_file)
+display(pp.df)
+
+    gid                                             config
+0    13  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+1    14  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+2    18  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+3    19  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+4    29  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+5    32  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+6    33  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+7    38  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+8    40  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+9    48  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+10   49  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+11   52  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+12   53  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+13   55  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+14   67  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+15   69  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+16   71  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+17   77  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+18   78  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+19   82  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+20   83  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+21   94  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+22   96  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+23   17  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+24   25  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+25   26  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+26   36  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+27   44  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+28   59  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+29   68  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+30   87  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+31   90  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+32   98  /Users/mrossol/Git_Repos/reV/tests/data/SAM/wi...
+
+
+
+

Command Line Interface (CLI)

+

Options 3 and 4 above can be run from the Command Line using the +reV-project-points +CLI

+
out_file='./project_points.csv'
+
+TESTDATADIR=.../tests/data
+res_file=${TESTDATADIR}/nsrdb/ri_100_nsrdb_2012.h5
+sam_file=${TESTDATADIR}/SAM/naris_pv_1axis_inv13.json
+
+reV-project-points --fpath=${out_file} --res_file=${res_file} --sam_file=${sam_file} from-lat-lons --lat_lon_coords 41.77 -71.74
+
+
+
out_file='./project_points.csv'
+
+TESTDATADIR=.../tests/data
+res_file=${TESTDATADIR}/nsrdb/ri_100_nsrdb_2012.h5
+sam_file=${TESTDATADIR}/SAM/naris_pv_1axis_inv13.json
+
+reV-project-points --fpath=${out_file} --res_file=${res_file} --sam_file=${sam_file} from-regions --region="Rhode Island" --region_col=state
+
+
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.rev_losses.html b/misc/examples.rev_losses.html new file mode 100644 index 000000000..8885af0e2 --- /dev/null +++ b/misc/examples.rev_losses.html @@ -0,0 +1,794 @@ + + + + + + + reV Losses — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

reV Losses

+

reV Generation +can include power curve losses and stochastically scheduled outages.

+
+

Power Curve Losses (Wind only)

+

Instead of simple haircut losses, we can add power curve losses. +The example transformation we will use is a horizontal power curve translation. +To do so, we must specify the target_losses_percent as well as the name +of the transformation. We specify both of these options with the +'reV_power_curve_losses' key in the SAM config.

+
import os
+import json
+import tempfile
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+from reV.generation.generation import Gen
+
+lat_lons = np.array([[ 41.97, -71.78],
+                     [ 41.05, -71.74],
+                     [ 41.25, -71.66]])
+
+res_file = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')
+sam_file = os.path.join(TESTDATADIR,
+                            'SAM/wind_gen_standard_losses_0.json')
+
+with open(sam_file, 'r', encoding='utf-8') as fh:
+    sam_config = json.load(fh)
+
+
+power_curve_loss_info = {
+    'target_losses_percent': 40,
+    'transformation': 'horizontal_translation'
+
+}
+with tempfile.TemporaryDirectory() as td:
+    sam_fp = os.path.join(td, 'gen.json')
+    sam_config.pop('turb_generic_loss', None)
+    sam_config['reV_power_curve_losses'] = power_curve_loss_info
+    with open(sam_fp, 'w+') as fh:
+        fh.write(json.dumps(sam_config))
+
+    output_request=('cf_mean', 'cf_profile', 'gen_profile', 'windspeed')
+    pp = ProjectPoints.lat_lon_coords(lat_lons, res_file, sam_fp)
+    gen = Gen('windpower' pp, sam_fp, res_file, output_request=output_request)
+    gen.run(max_workers=1)
+print(gen.out['cf_profile'])
+
+[[0.133, 0.202, 0.15 ],
+ [0.184, 0.045, 0.242],
+ [0.508, 0.119, 0.319],
+ ...,
+ [0.99 , 1.   , 1.   ],
+ [0.688, 1.   , 1.   ],
+ [0.628, 1.   , 1.   ]]
+
+
+
+
+

Power Curve Losses for a single site

+

The reV losses module can be used to compute the power curve shift required to meet +a target loss value for a single input site. To do this, the user must specify the +resource at the site as well as the input power curve and target loss info. An +example of this process is given below

+
import os
+import matplotlib.pyplot as plt
+from rex import Resource
+
+from reV.losses.power_curve import (
+    PowerCurve,
+    PowerCurveLossesInput,
+    PowerCurveWindResource,
+    adjust_power_curve,
+)
+
+site_ind = 100
+res_file = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')
+with Resource(res_file) as res:
+    temperatures = res["temperature_100m"][:, site_ind]
+    pressures = res["pressure_100m"][:, site_ind]
+    wind_speeds = res["windspeed_100m"][:, site_ind]
+
+sam_file = os.path.join(TESTDATADIR, 'SAM/wind_gen_standard_losses_0.json')
+with open(sam_file, 'r', encoding='utf-8') as fh:
+    sam_config = json.load(fh)
+pc_wind_speed = sam_config['wind_turbine_powercurve_windspeeds']
+pc_generation = sam_config['wind_turbine_powercurve_powerout']
+
+power_curve_loss_info = {
+    'target_losses_percent': 5,
+    'transformation': 'exponential_stretching'
+}
+
+power_curve = PowerCurve(pc_wind_speed, pc_generation)
+resource_data = PowerCurveWindResource(temperatures, pressures, wind_speeds)
+target_losses = PowerCurveLossesInput(power_curve_loss_info)
+
+new_curve = adjust_power_curve(
+    power_curve, resource_data, target_losses
+)
+
+_ = plt.plot(power_curve.wind_speed, power_curve, label='Original')
+_ = plt.plot(new_curve.wind_speed, new_curve, label='5% Losses')
+_ = plt.legend(loc='upper left')
+_ = plt.xlabel("Wind Speed (m/s)")
+_ = plt.ylabel("Generated Power (kW)")
+_ = plt.show()
+
+
+
+
+

Outage Losses (Wind and Solar)

+

We can also tell reV to stochastically schedule outages based on some +outage information that we pass in. Specifically, we need to provide the +outage duration, the number of outages (count), the allowed_months, +as well as the percentage_of_capacity_lost for each outage.

+
import os
+import json
+import tempfile
+
+import numpy as np
+import matplotlib.pyplot as plt
+
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+from reV.generation.generation import Gen
+
+lat_lons = np.array([[ 41.05, -71.74],
+                     [ 41.25, -71.66]])
+
+res_file = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')
+sam_file = os.path.join(TESTDATADIR,
+                            'SAM/wind_gen_standard_losses_0.json')
+
+with open(sam_file, 'r', encoding='utf-8') as fh:
+    sam_config = json.load(fh)
+sam_config.pop('wind_farm_losses_percent', None)
+sam_config.pop('turb_generic_loss', None)
+
+outage_info = [
+    {
+        'count': 5,
+        'duration': 24,
+        'percentage_of_capacity_lost': 100,
+        'allowed_months': ['January'],
+    }
+]
+with tempfile.TemporaryDirectory() as td:
+    sam_fp = os.path.join(td, 'gen.json')
+    sam_config['reV_outages'] = outage_info
+    with open(sam_fp, 'w+') as fh:
+        fh.write(json.dumps(sam_config))
+
+    output_request=('cf_mean', 'cf_profile', 'gen_profile')
+    pp = ProjectPoints.lat_lon_coords(lat_lons, res_file, sam_fp)
+    gen = Gen('windpower', pp, sam_fp, res_file, output_request=output_request)
+    gen.run(max_workers=1)
+print(gen.out['cf_profile'][:744].mean(axis=0))
+
+[0.67402536, 0.6644584]
+
+
+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.running_locally.html b/misc/examples.running_locally.html new file mode 100644 index 000000000..90c20e09e --- /dev/null +++ b/misc/examples.running_locally.html @@ -0,0 +1,704 @@ + + + + + + + Run reV locally — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Run reV locally

+

reV Gen +and reV Econ +can be run locally using resource .h5 files stored locally.

+
+

reV Gen

+

reV Generation uses PySAM to +compute technologically specific capcity factor means and profiles. reV Gen +uses SAM technology terms and input configuration files

+
+

windpower

+

Compute wind capacity factors for a given set of latitude and longitude +coordinates:

+
import os
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+from reV.generation.generation import Gen
+
+lat_lons = np.array([[ 41.25, -71.66],
+                        [ 41.05, -71.74],
+                        [ 41.97, -71.78],
+                        [ 41.65, -71.74],
+                        [ 41.25, -71.7 ],
+                        [ 41.05, -71.78]])
+
+res_file = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_2012.h5')
+sam_file = os.path.join(TESTDATADIR,
+                         'SAM/wind_gen_standard_losses_0.json')
+
+pp = ProjectPoints.lat_lon_coords(lat_lons, res_file, sam_file)
+gen = Gen('windpower', pp, sam_file, res_file,
+          output_request=('cf_mean', 'cf_profile'))
+gen.run(max_workers=1)
+print(gen.out['cf_profile'])
+
+[[0.319 0.538 0.287 ... 0.496 0.579 0.486]
+ [0.382 0.75  0.474 ... 0.595 0.339 0.601]
+ [0.696 0.814 0.724 ... 0.66  0.466 0.677]
+ ...
+ [0.833 0.833 0.823 ... 0.833 0.833 0.833]
+ [0.782 0.833 0.833 ... 0.833 0.833 0.833]
+ [0.756 0.801 0.833 ... 0.833 0.833 0.833]]
+
+
+
+
+

pvwatts

+

NOTE: pvwattsv5 and pvwattsv7 are both available from reV.

+

Compute pvcapacity factors for all resource gids in a Rhode Island:

+
import os
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+from reV.generation.generation import Gen
+
+regions = {'Rhode Island': 'state'}
+
+res_file = os.path.join(TESTDATADIR, 'nsrdb/', 'ri_100_nsrdb_2012.h5')
+sam_file = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json')
+
+pp = ProjectPoints.regions(regions, res_file, sam_file)
+gen = Gen('pvwattsv5', pp, sam_file, res_file,
+          output_request=('cf_mean', 'cf_profile'))
+gen.run(max_workers=1)
+print(gen.out['cf_mean'])
+
+[0.183 0.166 0.177 0.175 0.167 0.183 0.176 0.175 0.176 0.177]
+
+
+
+
+
+

Command Line Interface (CLI)

+

reV-gen +can also be run from the command line and will output the results to an .h5 +file that can be read with rex.resource.Resource.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.running_with_hsds.html b/misc/examples.running_with_hsds.html new file mode 100644 index 000000000..f8519628f --- /dev/null +++ b/misc/examples.running_with_hsds.html @@ -0,0 +1,762 @@ + + + + + + + Running with HSDS — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Running with HSDS

+

The Highly Scalable Distributed Service (HSDS) is a cloud optimized API to +enable access to .h5 files hosted on AWS. The HSDS software was developed by +the HDF Group and is hosted on Amazon Web +Services (AWS) using a combination of EC2 (Elastic Compute) and S3 (Scalable +Storage Service). You can read more about the HSDS service +in this slide deck. +You can use the NREL developer API as the HSDS endpoint for small workloads +or stand up your own HSDS local server (instructions further below) for an +enhanced parallelized data experience.

+

You might also be interested in these examples of how to set up your own local HSDS server and how to run reV on an AWS parallel cluster.

+
+

Setting up HSDS

+

To get started install the h5pyd library:

+
pip install h5pyd
+
+
+

Next, configure h5pyd by running hsconfigure from the command line, or by +creating a configuration file at ~/.hscfg:

+
hsconfigure
+hs_endpoint = https://developer.nrel.gov/api/hsds
+hs_username =
+hs_password =
+hs_api_key = {YOUR_API_KEY_HERE}
+
+
+

To get your own API key, visit https://developer.nrel.gov/signup/

+

Please note that our HSDS service is for demonstration purposes only. The API in the example above is hosted on an NREL server and will have limits on the amount of data you can access via HSDS. It is common to get an error: OSError: Error retrieving data: None errors if you attempt to access too much data or if the server is busy. Here are two references for scaling reV using HSDS and AWS:

+
    +
  1. Setup your own HSDS server on your personal computer

  2. +
  3. Run reV on the AWS Parallel Cluster Infrastructure

  4. +
+
+
+

Using HSDS with reV

+

Once h5pyd has been installed and configured, rex +can pull data directly from AWS using HSDS +To access the resource data used by reV (NSRDB or WTK) you have to turn on the +hsds flag in the resource handlers:

+
nsrdb_file = '/nrel/nsrdb/v3/nsrdb_2013.h5'
+with rex.Resource(nsrdb_file, hsds=True) as f:
+    meta_data = f.meta
+    time_index = f.time_index
+
+
+
+
+

reV Gen

+

reV generation (reV.Gen) +will automatically infer if a file path is locally on disk or from HSDS.

+

Note that for all of these examples, the sam_file input points to files in +the +reV test directory +that may not be copied in your install. You may want to download the relevant +SAM system configs from that directory and point the sam_file variable to +the correct filepath on your computer.

+
+

windpower

+

Compute wind capacity factors for a given set of latitude and longitude +coordinates:

+
import os
+import numpy as np
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+from reV.generation.generation import Gen
+from rex import init_logger
+
+init_logger('reV', log_level='DEBUG')
+
+lat_lons = np.array([[ 41.25, -71.66],
+                     [ 41.05, -71.74],
+                     [ 41.45, -71.66],
+                     [ 41.97, -71.78],
+                     [ 41.65, -71.74],
+                     [ 41.53, -71.7 ],
+                     [ 41.25, -71.7 ],
+                     [ 41.05, -71.78],
+                     [ 42.01, -71.74],
+                     [ 41.45, -71.78]])
+
+res_file = '/nrel/wtk/conus/wtk_conus_2012.h5'  # HSDS 'file' path
+sam_file = os.path.join(TESTDATADIR,
+                         'SAM/wind_gen_standard_losses_0.json')
+
+pp = ProjectPoints.lat_lon_coords(lat_lons, res_file, sam_file)
+gen = Gen('windpower', pp, sam_file, res_file,
+          output_request=('cf_mean', 'cf_profile'))
+gen.run(max_workers=1)
+print(gen.out['cf_profile'])
+
+[[0.319 0.538 0.287 ... 0.496 0.579 0.486]
+ [0.382 0.75  0.474 ... 0.595 0.339 0.601]
+ [0.696 0.814 0.724 ... 0.66  0.466 0.677]
+ ...
+ [0.833 0.833 0.823 ... 0.833 0.833 0.833]
+ [0.782 0.833 0.833 ... 0.833 0.833 0.833]
+ [0.756 0.801 0.833 ... 0.833 0.833 0.833]]
+
+
+
+
+

pvwatts

+

NOTE: pvwattsv5 and pvwattsv7 are both available from reV.

+

Compute pvcapacity factors for all resource gids in a Rhode Island:

+
import os
+from reV import TESTDATADIR
+from reV.config.project_points import ProjectPoints
+from reV.generation.generation import Gen
+from rex import init_logger
+
+init_logger('reV', log_level='DEBUG')
+
+regions = {'Rhode Island': 'state'}
+
+res_file = '/nrel/nsrdb/v3/nsrdb_2012.h5'  # HSDS 'file' path
+sam_file = os.path.join(TESTDATADIR, 'SAM/naris_pv_1axis_inv13.json')
+
+pp = ProjectPoints.regions(regions, res_file, sam_file)
+gen = Gen('pvwattsv5', pp, sam_file, res_file,
+          output_request=('cf_mean', 'cf_profile'))
+gen.run(max_workers=1)
+print(gen.out['cf_mean'])
+
+[0.183 0.166 0.177 0.175 0.167 0.183 0.176 0.175 0.176 0.177]
+
+
+
+
+
+

Command Line Interface (CLI)

+

reV-gen +can also be run from the command line and will output the results to an .h5 +file that can be read with rex.resource.Resource.

+
+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/examples.single_module_execution.html b/misc/examples.single_module_execution.html new file mode 100644 index 000000000..03ad5879d --- /dev/null +++ b/misc/examples.single_module_execution.html @@ -0,0 +1,638 @@ + + + + + + + Single Module Execution — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Single Module Execution

+

This set of example files demonstrates how to run a single reV analysis module.

+

The any module can be executed using the following CLI call:

+
reV {module} -c "/scratch/gbuster/rev/module_config.json"
+
+
+

By default, a rev_status.json file will be created in the output directory. +Each node utilized in a job will additionally generate their own status jsons +upon completion. Each node makes its own status .json to avoid a parallel +write conflict to the single rev_status.json file.

+
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/installation.html b/misc/installation.html new file mode 100644 index 000000000..a5332a9e4 --- /dev/null +++ b/misc/installation.html @@ -0,0 +1,781 @@ + + + + + + + Installation — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+
+
+ +
+

Installation

+
+

NOTE: The installation instruction below assume that you have python installed +on your machine and are using conda +as your package/environment manager.

+ + +
+
+

reV command line tools

+ +
+

Launching a run

+

Tips

+ +
reV -c "/scratch/user/rev/config_pipeline.json" pipeline
+
+
+
    +
  • Running simply generation or econ can just be done from the console:

  • +
+
reV -c "/scratch/user/rev/config_gen.json" generation
+
+
+
+
+

General Run times and Node configuration on Eagle

+
    +
  • WTK Conus: 10-20 nodes per year walltime 1-4 hours

  • +
  • NSRDB Conus: 5 nodes walltime 2 hours

  • +
+

Eagle node requests

+
+
+ +
+

Command Line Tools

+
+ +
+ + +
+
+ +
+
+
+
+ + + + \ No newline at end of file diff --git a/misc/installation_usage.html b/misc/installation_usage.html new file mode 100644 index 000000000..5eed224e8 --- /dev/null +++ b/misc/installation_usage.html @@ -0,0 +1,644 @@ + + + + + + + Installation and Usage — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+ +
+
+ + +
+
+
+
+ + + + \ No newline at end of file diff --git a/objects.inv b/objects.inv new file mode 100644 index 000000000..daa8aae6a Binary files /dev/null and b/objects.inv differ diff --git a/py-modindex.html b/py-modindex.html new file mode 100644 index 000000000..dcb4765ec --- /dev/null +++ b/py-modindex.html @@ -0,0 +1,1020 @@ + + + + + + Python Module Index — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + +

Python Module Index

+ +
+ r +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ r
+ reV +
    + reV.bespoke +
    + reV.bespoke.bespoke +
    + reV.bespoke.cli_bespoke +
    + reV.bespoke.gradient_free +
    + reV.bespoke.pack_turbs +
    + reV.bespoke.place_turbines +
    + reV.bespoke.plotting_functions +
    + reV.cli +
    + reV.config +
    + reV.config.base_analysis_config +
    + reV.config.base_config +
    + reV.config.cli_project_points +
    + reV.config.curtailment +
    + reV.config.execution +
    + reV.config.output_request +
    + reV.config.project_points +
    + reV.config.sam_config +
    + reV.econ +
    + reV.econ.cli_econ +
    + reV.econ.econ +
    + reV.econ.economies_of_scale +
    + reV.econ.utilities +
    + reV.generation +
    + reV.generation.base +
    + reV.generation.cli_gen +
    + reV.generation.generation +
    + reV.handlers +
    + reV.handlers.cli_collect +
    + reV.handlers.cli_multi_year +
    + reV.handlers.exclusions +
    + reV.handlers.multi_year +
    + reV.handlers.outputs +
    + reV.handlers.transmission +
    + reV.hybrids +
    + reV.hybrids.cli_hybrids +
    + reV.hybrids.hybrid_methods +
    + reV.hybrids.hybrids +
    + reV.losses +
    + reV.losses.power_curve +
    + reV.losses.scheduled +
    + reV.losses.utils +
    + reV.nrwal +
    + reV.nrwal.cli_nrwal +
    + reV.nrwal.nrwal +
    + reV.qa_qc +
    + reV.qa_qc.cli_qa_qc +
    + reV.qa_qc.qa_qc +
    + reV.qa_qc.summary +
    + reV.rep_profiles +
    + reV.rep_profiles.cli_rep_profiles +
    + reV.rep_profiles.rep_profiles +
    + reV.SAM +
    + reV.SAM.defaults +
    + reV.SAM.econ +
    + reV.SAM.generation +
    + reV.SAM.SAM +
    + reV.SAM.version_checker +
    + reV.SAM.windbos +
    + reV.supply_curve +
    + reV.supply_curve.aggregation +
    + reV.supply_curve.cli_sc_aggregation +
    + reV.supply_curve.cli_supply_curve +
    + reV.supply_curve.competitive_wind_farms +
    + reV.supply_curve.exclusions +
    + reV.supply_curve.extent +
    + reV.supply_curve.points +
    + reV.supply_curve.sc_aggregation +
    + reV.supply_curve.supply_curve +
    + reV.supply_curve.tech_mapping +
    + reV.utilities +
    + reV.utilities.cli_functions +
    + reV.utilities.curtailment +
    + reV.utilities.exceptions +
    + reV.utilities.pytest_utils +
    + reV.utilities.slots +
    + reV.version +
+ + +
+
+
+ +
+ +
+

© Copyright 2018, Alliance for Sustainable Energy, LLC.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/search.html b/search.html new file mode 100644 index 000000000..c20a9ffae --- /dev/null +++ b/search.html @@ -0,0 +1,640 @@ + + + + + + Search — reV 0.8.3 documentation + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
    +
  • + +
  • +
  • +
+
+
+
+
+ + + + +
+ +
+ +
+
+
+ +
+ +
+

© Copyright 2018, Alliance for Sustainable Energy, LLC.

+
+ + Built with Sphinx using a + theme + provided by Read the Docs. + + +
+
+
+
+
+ + + + + + + + + \ No newline at end of file diff --git a/searchindex.js b/searchindex.js new file mode 100644 index 000000000..623bcf2b5 --- /dev/null +++ b/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"docnames": ["_autosummary/reV", "_autosummary/reV.SAM", "_autosummary/reV.SAM.SAM", "_autosummary/reV.SAM.SAM.RevPySam", "_autosummary/reV.SAM.SAM.Sam", "_autosummary/reV.SAM.SAM.SamResourceRetriever", "_autosummary/reV.SAM.defaults", "_autosummary/reV.SAM.defaults.AbstractDefaultFromConfigFile", "_autosummary/reV.SAM.defaults.DefaultGeothermal", "_autosummary/reV.SAM.defaults.DefaultLCOE", "_autosummary/reV.SAM.defaults.DefaultLinearFresnelDsgIph", "_autosummary/reV.SAM.defaults.DefaultMhkWave", "_autosummary/reV.SAM.defaults.DefaultPvSamv1", "_autosummary/reV.SAM.defaults.DefaultPvWattsv5", "_autosummary/reV.SAM.defaults.DefaultPvWattsv8", "_autosummary/reV.SAM.defaults.DefaultSingleOwner", "_autosummary/reV.SAM.defaults.DefaultSwh", "_autosummary/reV.SAM.defaults.DefaultTcsMoltenSalt", "_autosummary/reV.SAM.defaults.DefaultTroughPhysicalProcessHeat", "_autosummary/reV.SAM.defaults.DefaultWindPower", "_autosummary/reV.SAM.econ", "_autosummary/reV.SAM.econ.Economic", "_autosummary/reV.SAM.econ.LCOE", "_autosummary/reV.SAM.econ.SingleOwner", "_autosummary/reV.SAM.generation", "_autosummary/reV.SAM.generation.AbstractSamGeneration", "_autosummary/reV.SAM.generation.AbstractSamGenerationFromWeatherFile", "_autosummary/reV.SAM.generation.AbstractSamPv", "_autosummary/reV.SAM.generation.AbstractSamSolar", "_autosummary/reV.SAM.generation.AbstractSamWind", "_autosummary/reV.SAM.generation.Geothermal", "_autosummary/reV.SAM.generation.LinearDirectSteam", "_autosummary/reV.SAM.generation.MhkWave", "_autosummary/reV.SAM.generation.PvSamv1", "_autosummary/reV.SAM.generation.PvWattsv5", "_autosummary/reV.SAM.generation.PvWattsv7", "_autosummary/reV.SAM.generation.PvWattsv8", "_autosummary/reV.SAM.generation.SolarWaterHeat", "_autosummary/reV.SAM.generation.TcsMoltenSalt", "_autosummary/reV.SAM.generation.TroughPhysicalHeat", "_autosummary/reV.SAM.generation.WindPower", "_autosummary/reV.SAM.generation.WindPowerPD", "_autosummary/reV.SAM.version_checker", "_autosummary/reV.SAM.version_checker.PySamVersionChecker", "_autosummary/reV.SAM.windbos", "_autosummary/reV.SAM.windbos.WindBos", "_autosummary/reV.bespoke", "_autosummary/reV.bespoke.bespoke", "_autosummary/reV.bespoke.bespoke.BespokeMultiPlantData", "_autosummary/reV.bespoke.bespoke.BespokeSinglePlant", "_autosummary/reV.bespoke.bespoke.BespokeSinglePlantData", "_autosummary/reV.bespoke.bespoke.BespokeWindPlants", "_autosummary/reV.bespoke.cli_bespoke", "_autosummary/reV.bespoke.gradient_free", "_autosummary/reV.bespoke.gradient_free.GeneticAlgorithm", "_autosummary/reV.bespoke.pack_turbs", "_autosummary/reV.bespoke.pack_turbs.PackTurbines", "_autosummary/reV.bespoke.pack_turbs.smallest_area_with_tiebreakers", "_autosummary/reV.bespoke.place_turbines", "_autosummary/reV.bespoke.place_turbines.PlaceTurbines", "_autosummary/reV.bespoke.place_turbines.none_until_optimized", "_autosummary/reV.bespoke.plotting_functions", "_autosummary/reV.bespoke.plotting_functions.get_xy", "_autosummary/reV.bespoke.plotting_functions.plot_poly", "_autosummary/reV.bespoke.plotting_functions.plot_turbines", "_autosummary/reV.bespoke.plotting_functions.plot_windrose", "_autosummary/reV.cli", "_autosummary/reV.config", "_autosummary/reV.config.base_analysis_config", "_autosummary/reV.config.base_analysis_config.AnalysisConfig", "_autosummary/reV.config.base_config", "_autosummary/reV.config.base_config.BaseConfig", "_autosummary/reV.config.cli_project_points", "_autosummary/reV.config.curtailment", "_autosummary/reV.config.curtailment.Curtailment", "_autosummary/reV.config.execution", "_autosummary/reV.config.execution.BaseExecutionConfig", "_autosummary/reV.config.execution.HPCConfig", "_autosummary/reV.config.execution.SlurmConfig", "_autosummary/reV.config.output_request", "_autosummary/reV.config.output_request.OutputRequest", "_autosummary/reV.config.output_request.SAMOutputRequest", "_autosummary/reV.config.project_points", "_autosummary/reV.config.project_points.PointsControl", "_autosummary/reV.config.project_points.ProjectPoints", "_autosummary/reV.config.sam_config", "_autosummary/reV.config.sam_config.SAMConfig", "_autosummary/reV.config.sam_config.SAMInputsChecker", "_autosummary/reV.econ", "_autosummary/reV.econ.cli_econ", "_autosummary/reV.econ.econ", "_autosummary/reV.econ.econ.Econ", "_autosummary/reV.econ.economies_of_scale", "_autosummary/reV.econ.economies_of_scale.EconomiesOfScale", "_autosummary/reV.econ.utilities", "_autosummary/reV.econ.utilities.lcoe_fcr", "_autosummary/reV.generation", "_autosummary/reV.generation.base", "_autosummary/reV.generation.base.BaseGen", "_autosummary/reV.generation.cli_gen", "_autosummary/reV.generation.generation", "_autosummary/reV.generation.generation.Gen", "_autosummary/reV.handlers", "_autosummary/reV.handlers.cli_collect", "_autosummary/reV.handlers.cli_multi_year", "_autosummary/reV.handlers.exclusions", "_autosummary/reV.handlers.exclusions.ExclusionLayers", "_autosummary/reV.handlers.multi_year", "_autosummary/reV.handlers.multi_year.MultiYear", "_autosummary/reV.handlers.multi_year.MultiYearGroup", "_autosummary/reV.handlers.multi_year.my_collect_groups", "_autosummary/reV.handlers.outputs", "_autosummary/reV.handlers.outputs.Outputs", "_autosummary/reV.handlers.transmission", "_autosummary/reV.handlers.transmission.TransmissionCosts", "_autosummary/reV.handlers.transmission.TransmissionFeatures", "_autosummary/reV.hybrids", "_autosummary/reV.hybrids.cli_hybrids", "_autosummary/reV.hybrids.hybrid_methods", "_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity", "_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity_factor", "_autosummary/reV.hybrids.hybrid_methods.aggregate_solar_capacity", "_autosummary/reV.hybrids.hybrid_methods.aggregate_wind_capacity", "_autosummary/reV.hybrids.hybrids", "_autosummary/reV.hybrids.hybrids.ColNameFormatter", "_autosummary/reV.hybrids.hybrids.Hybridization", "_autosummary/reV.hybrids.hybrids.HybridsData", "_autosummary/reV.hybrids.hybrids.MetaHybridizer", "_autosummary/reV.hybrids.hybrids.RatioColumns", "_autosummary/reV.losses", "_autosummary/reV.losses.power_curve", "_autosummary/reV.losses.power_curve.AbstractPowerCurveTransformation", "_autosummary/reV.losses.power_curve.ExponentialStretching", "_autosummary/reV.losses.power_curve.HorizontalTranslation", "_autosummary/reV.losses.power_curve.LinearStretching", "_autosummary/reV.losses.power_curve.PowerCurve", "_autosummary/reV.losses.power_curve.PowerCurveLosses", "_autosummary/reV.losses.power_curve.PowerCurveLossesInput", "_autosummary/reV.losses.power_curve.PowerCurveLossesMixin", "_autosummary/reV.losses.power_curve.PowerCurveWindResource", "_autosummary/reV.losses.power_curve.TRANSFORMATIONS", "_autosummary/reV.losses.power_curve.adjust_power_curve", "_autosummary/reV.losses.scheduled", "_autosummary/reV.losses.scheduled.Outage", "_autosummary/reV.losses.scheduled.OutageScheduler", "_autosummary/reV.losses.scheduled.ScheduledLossesMixin", "_autosummary/reV.losses.scheduled.SingleOutageScheduler", "_autosummary/reV.losses.utils", "_autosummary/reV.losses.utils.convert_to_full_month_names", "_autosummary/reV.losses.utils.filter_unknown_month_names", "_autosummary/reV.losses.utils.format_month_name", "_autosummary/reV.losses.utils.full_month_name_from_abbr", "_autosummary/reV.losses.utils.hourly_indices_for_months", "_autosummary/reV.losses.utils.month_index", "_autosummary/reV.losses.utils.month_indices", "_autosummary/reV.nrwal", "_autosummary/reV.nrwal.cli_nrwal", "_autosummary/reV.nrwal.nrwal", "_autosummary/reV.nrwal.nrwal.RevNrwal", "_autosummary/reV.qa_qc", "_autosummary/reV.qa_qc.cli_qa_qc", "_autosummary/reV.qa_qc.cli_qa_qc.cli_qa_qc", "_autosummary/reV.qa_qc.qa_qc", "_autosummary/reV.qa_qc.qa_qc.QaQc", "_autosummary/reV.qa_qc.qa_qc.QaQcModule", "_autosummary/reV.qa_qc.summary", "_autosummary/reV.qa_qc.summary.ExclusionsMask", "_autosummary/reV.qa_qc.summary.PlotBase", "_autosummary/reV.qa_qc.summary.SummarizeH5", "_autosummary/reV.qa_qc.summary.SummarizeSupplyCurve", "_autosummary/reV.qa_qc.summary.SummaryPlots", "_autosummary/reV.qa_qc.summary.SupplyCurvePlot", "_autosummary/reV.rep_profiles", "_autosummary/reV.rep_profiles.cli_rep_profiles", "_autosummary/reV.rep_profiles.rep_profiles", "_autosummary/reV.rep_profiles.rep_profiles.RegionRepProfile", "_autosummary/reV.rep_profiles.rep_profiles.RepProfiles", "_autosummary/reV.rep_profiles.rep_profiles.RepProfilesBase", "_autosummary/reV.rep_profiles.rep_profiles.RepresentativeMethods", "_autosummary/reV.supply_curve", "_autosummary/reV.supply_curve.aggregation", "_autosummary/reV.supply_curve.aggregation.AbstractAggFileHandler", "_autosummary/reV.supply_curve.aggregation.AggFileHandler", "_autosummary/reV.supply_curve.aggregation.Aggregation", "_autosummary/reV.supply_curve.aggregation.BaseAggregation", "_autosummary/reV.supply_curve.cli_sc_aggregation", "_autosummary/reV.supply_curve.cli_supply_curve", "_autosummary/reV.supply_curve.competitive_wind_farms", "_autosummary/reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms", "_autosummary/reV.supply_curve.exclusions", "_autosummary/reV.supply_curve.exclusions.ExclusionMask", "_autosummary/reV.supply_curve.exclusions.ExclusionMaskFromDict", "_autosummary/reV.supply_curve.exclusions.FrictionMask", "_autosummary/reV.supply_curve.exclusions.LayerMask", "_autosummary/reV.supply_curve.extent", "_autosummary/reV.supply_curve.extent.SupplyCurveExtent", "_autosummary/reV.supply_curve.points", "_autosummary/reV.supply_curve.points.AbstractSupplyCurvePoint", "_autosummary/reV.supply_curve.points.AggregationSupplyCurvePoint", "_autosummary/reV.supply_curve.points.GenerationSupplyCurvePoint", "_autosummary/reV.supply_curve.points.SupplyCurvePoint", "_autosummary/reV.supply_curve.sc_aggregation", "_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler", "_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggregation", "_autosummary/reV.supply_curve.supply_curve", "_autosummary/reV.supply_curve.supply_curve.SupplyCurve", "_autosummary/reV.supply_curve.tech_mapping", "_autosummary/reV.supply_curve.tech_mapping.TechMapping", "_autosummary/reV.utilities", "_autosummary/reV.utilities.ModuleName", "_autosummary/reV.utilities.cli_functions", "_autosummary/reV.utilities.cli_functions.format_analysis_years", "_autosummary/reV.utilities.cli_functions.init_cli_logging", "_autosummary/reV.utilities.cli_functions.parse_from_pipeline", "_autosummary/reV.utilities.curtailment", "_autosummary/reV.utilities.curtailment.curtail", "_autosummary/reV.utilities.exceptions", "_autosummary/reV.utilities.exceptions.CollectionRuntimeError", "_autosummary/reV.utilities.exceptions.CollectionValueError", "_autosummary/reV.utilities.exceptions.CollectionWarning", "_autosummary/reV.utilities.exceptions.ConfigError", "_autosummary/reV.utilities.exceptions.ConfigWarning", "_autosummary/reV.utilities.exceptions.DataShapeError", "_autosummary/reV.utilities.exceptions.EmptySupplyCurvePointError", "_autosummary/reV.utilities.exceptions.ExclusionLayerError", "_autosummary/reV.utilities.exceptions.ExecutionError", "_autosummary/reV.utilities.exceptions.ExtrapolationWarning", "_autosummary/reV.utilities.exceptions.FileInputError", "_autosummary/reV.utilities.exceptions.FileInputWarning", "_autosummary/reV.utilities.exceptions.HandlerKeyError", "_autosummary/reV.utilities.exceptions.HandlerRuntimeError", "_autosummary/reV.utilities.exceptions.HandlerValueError", "_autosummary/reV.utilities.exceptions.HandlerWarning", "_autosummary/reV.utilities.exceptions.InputError", "_autosummary/reV.utilities.exceptions.InputWarning", "_autosummary/reV.utilities.exceptions.JSONError", "_autosummary/reV.utilities.exceptions.MultiFileExclusionError", "_autosummary/reV.utilities.exceptions.NearestNeighborError", "_autosummary/reV.utilities.exceptions.OffshoreWindInputError", "_autosummary/reV.utilities.exceptions.OffshoreWindInputWarning", "_autosummary/reV.utilities.exceptions.OutputWarning", "_autosummary/reV.utilities.exceptions.ParallelExecutionWarning", "_autosummary/reV.utilities.exceptions.PipelineError", "_autosummary/reV.utilities.exceptions.ProjectPointsValueError", "_autosummary/reV.utilities.exceptions.PySAMVersionError", "_autosummary/reV.utilities.exceptions.PySAMVersionWarning", "_autosummary/reV.utilities.exceptions.ResourceError", "_autosummary/reV.utilities.exceptions.SAMExecutionError", "_autosummary/reV.utilities.exceptions.SAMExecutionWarning", "_autosummary/reV.utilities.exceptions.SAMInputError", "_autosummary/reV.utilities.exceptions.SAMInputWarning", "_autosummary/reV.utilities.exceptions.SlurmWarning", "_autosummary/reV.utilities.exceptions.SupplyCurveError", "_autosummary/reV.utilities.exceptions.SupplyCurveInputError", "_autosummary/reV.utilities.exceptions.WhileLoopPackingError", "_autosummary/reV.utilities.exceptions.reVDeprecationWarning", "_autosummary/reV.utilities.exceptions.reVError", "_autosummary/reV.utilities.exceptions.reVLossesValueError", "_autosummary/reV.utilities.exceptions.reVLossesWarning", "_autosummary/reV.utilities.log_versions", "_autosummary/reV.utilities.pytest_utils", "_autosummary/reV.utilities.pytest_utils.make_fake_h5_chunks", "_autosummary/reV.utilities.pytest_utils.pd_date_range", "_autosummary/reV.utilities.pytest_utils.write_chunk", "_autosummary/reV.utilities.slots", "_autosummary/reV.utilities.slots.SlottedDict", "_autosummary/reV.version", "_cli/cli", "_cli/reV", "_cli/reV batch", "_cli/reV bespoke", "_cli/reV collect", "_cli/reV econ", "_cli/reV generation", "_cli/reV hybrids", "_cli/reV multiyear", "_cli/reV nrwal", "_cli/reV pipeline", "_cli/reV project-points", "_cli/reV qa-qc", "_cli/reV rep-profiles", "_cli/reV reset-status", "_cli/reV script", "_cli/reV status", "_cli/reV supply-curve", "_cli/reV supply-curve-aggregation", "_cli/reV template-configs", "api", "index", "misc/examples", "misc/examples.advanced_econ_modeling", "misc/examples.aws_pcluster", "misc/examples.batched_execution", "misc/examples.eagle_node_requests", "misc/examples.full_pipeline_execution", "misc/examples.marine_energy", "misc/examples.offshore_wind", "misc/examples.project_points", "misc/examples.rev_losses", "misc/examples.running_locally", "misc/examples.running_with_hsds", "misc/examples.single_module_execution", "misc/installation", "misc/installation_usage"], "filenames": ["_autosummary/reV.rst", "_autosummary/reV.SAM.rst", "_autosummary/reV.SAM.SAM.rst", "_autosummary/reV.SAM.SAM.RevPySam.rst", "_autosummary/reV.SAM.SAM.Sam.rst", "_autosummary/reV.SAM.SAM.SamResourceRetriever.rst", "_autosummary/reV.SAM.defaults.rst", "_autosummary/reV.SAM.defaults.AbstractDefaultFromConfigFile.rst", "_autosummary/reV.SAM.defaults.DefaultGeothermal.rst", "_autosummary/reV.SAM.defaults.DefaultLCOE.rst", "_autosummary/reV.SAM.defaults.DefaultLinearFresnelDsgIph.rst", "_autosummary/reV.SAM.defaults.DefaultMhkWave.rst", "_autosummary/reV.SAM.defaults.DefaultPvSamv1.rst", "_autosummary/reV.SAM.defaults.DefaultPvWattsv5.rst", "_autosummary/reV.SAM.defaults.DefaultPvWattsv8.rst", "_autosummary/reV.SAM.defaults.DefaultSingleOwner.rst", "_autosummary/reV.SAM.defaults.DefaultSwh.rst", "_autosummary/reV.SAM.defaults.DefaultTcsMoltenSalt.rst", "_autosummary/reV.SAM.defaults.DefaultTroughPhysicalProcessHeat.rst", "_autosummary/reV.SAM.defaults.DefaultWindPower.rst", "_autosummary/reV.SAM.econ.rst", "_autosummary/reV.SAM.econ.Economic.rst", "_autosummary/reV.SAM.econ.LCOE.rst", "_autosummary/reV.SAM.econ.SingleOwner.rst", "_autosummary/reV.SAM.generation.rst", "_autosummary/reV.SAM.generation.AbstractSamGeneration.rst", "_autosummary/reV.SAM.generation.AbstractSamGenerationFromWeatherFile.rst", "_autosummary/reV.SAM.generation.AbstractSamPv.rst", "_autosummary/reV.SAM.generation.AbstractSamSolar.rst", "_autosummary/reV.SAM.generation.AbstractSamWind.rst", "_autosummary/reV.SAM.generation.Geothermal.rst", "_autosummary/reV.SAM.generation.LinearDirectSteam.rst", "_autosummary/reV.SAM.generation.MhkWave.rst", "_autosummary/reV.SAM.generation.PvSamv1.rst", "_autosummary/reV.SAM.generation.PvWattsv5.rst", "_autosummary/reV.SAM.generation.PvWattsv7.rst", "_autosummary/reV.SAM.generation.PvWattsv8.rst", "_autosummary/reV.SAM.generation.SolarWaterHeat.rst", "_autosummary/reV.SAM.generation.TcsMoltenSalt.rst", "_autosummary/reV.SAM.generation.TroughPhysicalHeat.rst", "_autosummary/reV.SAM.generation.WindPower.rst", "_autosummary/reV.SAM.generation.WindPowerPD.rst", "_autosummary/reV.SAM.version_checker.rst", "_autosummary/reV.SAM.version_checker.PySamVersionChecker.rst", "_autosummary/reV.SAM.windbos.rst", "_autosummary/reV.SAM.windbos.WindBos.rst", "_autosummary/reV.bespoke.rst", "_autosummary/reV.bespoke.bespoke.rst", "_autosummary/reV.bespoke.bespoke.BespokeMultiPlantData.rst", "_autosummary/reV.bespoke.bespoke.BespokeSinglePlant.rst", "_autosummary/reV.bespoke.bespoke.BespokeSinglePlantData.rst", "_autosummary/reV.bespoke.bespoke.BespokeWindPlants.rst", "_autosummary/reV.bespoke.cli_bespoke.rst", "_autosummary/reV.bespoke.gradient_free.rst", "_autosummary/reV.bespoke.gradient_free.GeneticAlgorithm.rst", "_autosummary/reV.bespoke.pack_turbs.rst", "_autosummary/reV.bespoke.pack_turbs.PackTurbines.rst", "_autosummary/reV.bespoke.pack_turbs.smallest_area_with_tiebreakers.rst", "_autosummary/reV.bespoke.place_turbines.rst", "_autosummary/reV.bespoke.place_turbines.PlaceTurbines.rst", "_autosummary/reV.bespoke.place_turbines.none_until_optimized.rst", "_autosummary/reV.bespoke.plotting_functions.rst", "_autosummary/reV.bespoke.plotting_functions.get_xy.rst", "_autosummary/reV.bespoke.plotting_functions.plot_poly.rst", "_autosummary/reV.bespoke.plotting_functions.plot_turbines.rst", "_autosummary/reV.bespoke.plotting_functions.plot_windrose.rst", "_autosummary/reV.cli.rst", "_autosummary/reV.config.rst", "_autosummary/reV.config.base_analysis_config.rst", "_autosummary/reV.config.base_analysis_config.AnalysisConfig.rst", "_autosummary/reV.config.base_config.rst", "_autosummary/reV.config.base_config.BaseConfig.rst", "_autosummary/reV.config.cli_project_points.rst", "_autosummary/reV.config.curtailment.rst", "_autosummary/reV.config.curtailment.Curtailment.rst", "_autosummary/reV.config.execution.rst", "_autosummary/reV.config.execution.BaseExecutionConfig.rst", "_autosummary/reV.config.execution.HPCConfig.rst", "_autosummary/reV.config.execution.SlurmConfig.rst", "_autosummary/reV.config.output_request.rst", "_autosummary/reV.config.output_request.OutputRequest.rst", "_autosummary/reV.config.output_request.SAMOutputRequest.rst", "_autosummary/reV.config.project_points.rst", "_autosummary/reV.config.project_points.PointsControl.rst", "_autosummary/reV.config.project_points.ProjectPoints.rst", "_autosummary/reV.config.sam_config.rst", "_autosummary/reV.config.sam_config.SAMConfig.rst", "_autosummary/reV.config.sam_config.SAMInputsChecker.rst", "_autosummary/reV.econ.rst", "_autosummary/reV.econ.cli_econ.rst", "_autosummary/reV.econ.econ.rst", "_autosummary/reV.econ.econ.Econ.rst", "_autosummary/reV.econ.economies_of_scale.rst", "_autosummary/reV.econ.economies_of_scale.EconomiesOfScale.rst", "_autosummary/reV.econ.utilities.rst", "_autosummary/reV.econ.utilities.lcoe_fcr.rst", "_autosummary/reV.generation.rst", "_autosummary/reV.generation.base.rst", "_autosummary/reV.generation.base.BaseGen.rst", "_autosummary/reV.generation.cli_gen.rst", "_autosummary/reV.generation.generation.rst", "_autosummary/reV.generation.generation.Gen.rst", "_autosummary/reV.handlers.rst", "_autosummary/reV.handlers.cli_collect.rst", "_autosummary/reV.handlers.cli_multi_year.rst", "_autosummary/reV.handlers.exclusions.rst", "_autosummary/reV.handlers.exclusions.ExclusionLayers.rst", "_autosummary/reV.handlers.multi_year.rst", "_autosummary/reV.handlers.multi_year.MultiYear.rst", "_autosummary/reV.handlers.multi_year.MultiYearGroup.rst", "_autosummary/reV.handlers.multi_year.my_collect_groups.rst", "_autosummary/reV.handlers.outputs.rst", "_autosummary/reV.handlers.outputs.Outputs.rst", "_autosummary/reV.handlers.transmission.rst", "_autosummary/reV.handlers.transmission.TransmissionCosts.rst", "_autosummary/reV.handlers.transmission.TransmissionFeatures.rst", "_autosummary/reV.hybrids.rst", "_autosummary/reV.hybrids.cli_hybrids.rst", "_autosummary/reV.hybrids.hybrid_methods.rst", "_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity.rst", "_autosummary/reV.hybrids.hybrid_methods.aggregate_capacity_factor.rst", "_autosummary/reV.hybrids.hybrid_methods.aggregate_solar_capacity.rst", "_autosummary/reV.hybrids.hybrid_methods.aggregate_wind_capacity.rst", "_autosummary/reV.hybrids.hybrids.rst", "_autosummary/reV.hybrids.hybrids.ColNameFormatter.rst", "_autosummary/reV.hybrids.hybrids.Hybridization.rst", "_autosummary/reV.hybrids.hybrids.HybridsData.rst", "_autosummary/reV.hybrids.hybrids.MetaHybridizer.rst", "_autosummary/reV.hybrids.hybrids.RatioColumns.rst", "_autosummary/reV.losses.rst", "_autosummary/reV.losses.power_curve.rst", "_autosummary/reV.losses.power_curve.AbstractPowerCurveTransformation.rst", "_autosummary/reV.losses.power_curve.ExponentialStretching.rst", "_autosummary/reV.losses.power_curve.HorizontalTranslation.rst", "_autosummary/reV.losses.power_curve.LinearStretching.rst", "_autosummary/reV.losses.power_curve.PowerCurve.rst", "_autosummary/reV.losses.power_curve.PowerCurveLosses.rst", "_autosummary/reV.losses.power_curve.PowerCurveLossesInput.rst", "_autosummary/reV.losses.power_curve.PowerCurveLossesMixin.rst", "_autosummary/reV.losses.power_curve.PowerCurveWindResource.rst", "_autosummary/reV.losses.power_curve.TRANSFORMATIONS.rst", "_autosummary/reV.losses.power_curve.adjust_power_curve.rst", "_autosummary/reV.losses.scheduled.rst", "_autosummary/reV.losses.scheduled.Outage.rst", "_autosummary/reV.losses.scheduled.OutageScheduler.rst", "_autosummary/reV.losses.scheduled.ScheduledLossesMixin.rst", "_autosummary/reV.losses.scheduled.SingleOutageScheduler.rst", "_autosummary/reV.losses.utils.rst", "_autosummary/reV.losses.utils.convert_to_full_month_names.rst", "_autosummary/reV.losses.utils.filter_unknown_month_names.rst", "_autosummary/reV.losses.utils.format_month_name.rst", "_autosummary/reV.losses.utils.full_month_name_from_abbr.rst", "_autosummary/reV.losses.utils.hourly_indices_for_months.rst", "_autosummary/reV.losses.utils.month_index.rst", "_autosummary/reV.losses.utils.month_indices.rst", "_autosummary/reV.nrwal.rst", "_autosummary/reV.nrwal.cli_nrwal.rst", "_autosummary/reV.nrwal.nrwal.rst", "_autosummary/reV.nrwal.nrwal.RevNrwal.rst", "_autosummary/reV.qa_qc.rst", "_autosummary/reV.qa_qc.cli_qa_qc.rst", "_autosummary/reV.qa_qc.cli_qa_qc.cli_qa_qc.rst", "_autosummary/reV.qa_qc.qa_qc.rst", "_autosummary/reV.qa_qc.qa_qc.QaQc.rst", "_autosummary/reV.qa_qc.qa_qc.QaQcModule.rst", "_autosummary/reV.qa_qc.summary.rst", "_autosummary/reV.qa_qc.summary.ExclusionsMask.rst", "_autosummary/reV.qa_qc.summary.PlotBase.rst", "_autosummary/reV.qa_qc.summary.SummarizeH5.rst", "_autosummary/reV.qa_qc.summary.SummarizeSupplyCurve.rst", "_autosummary/reV.qa_qc.summary.SummaryPlots.rst", "_autosummary/reV.qa_qc.summary.SupplyCurvePlot.rst", "_autosummary/reV.rep_profiles.rst", "_autosummary/reV.rep_profiles.cli_rep_profiles.rst", "_autosummary/reV.rep_profiles.rep_profiles.rst", "_autosummary/reV.rep_profiles.rep_profiles.RegionRepProfile.rst", "_autosummary/reV.rep_profiles.rep_profiles.RepProfiles.rst", "_autosummary/reV.rep_profiles.rep_profiles.RepProfilesBase.rst", "_autosummary/reV.rep_profiles.rep_profiles.RepresentativeMethods.rst", "_autosummary/reV.supply_curve.rst", "_autosummary/reV.supply_curve.aggregation.rst", "_autosummary/reV.supply_curve.aggregation.AbstractAggFileHandler.rst", "_autosummary/reV.supply_curve.aggregation.AggFileHandler.rst", "_autosummary/reV.supply_curve.aggregation.Aggregation.rst", "_autosummary/reV.supply_curve.aggregation.BaseAggregation.rst", "_autosummary/reV.supply_curve.cli_sc_aggregation.rst", "_autosummary/reV.supply_curve.cli_supply_curve.rst", "_autosummary/reV.supply_curve.competitive_wind_farms.rst", "_autosummary/reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.rst", "_autosummary/reV.supply_curve.exclusions.rst", "_autosummary/reV.supply_curve.exclusions.ExclusionMask.rst", "_autosummary/reV.supply_curve.exclusions.ExclusionMaskFromDict.rst", "_autosummary/reV.supply_curve.exclusions.FrictionMask.rst", "_autosummary/reV.supply_curve.exclusions.LayerMask.rst", "_autosummary/reV.supply_curve.extent.rst", "_autosummary/reV.supply_curve.extent.SupplyCurveExtent.rst", "_autosummary/reV.supply_curve.points.rst", "_autosummary/reV.supply_curve.points.AbstractSupplyCurvePoint.rst", "_autosummary/reV.supply_curve.points.AggregationSupplyCurvePoint.rst", "_autosummary/reV.supply_curve.points.GenerationSupplyCurvePoint.rst", "_autosummary/reV.supply_curve.points.SupplyCurvePoint.rst", "_autosummary/reV.supply_curve.sc_aggregation.rst", "_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.rst", "_autosummary/reV.supply_curve.sc_aggregation.SupplyCurveAggregation.rst", "_autosummary/reV.supply_curve.supply_curve.rst", "_autosummary/reV.supply_curve.supply_curve.SupplyCurve.rst", "_autosummary/reV.supply_curve.tech_mapping.rst", "_autosummary/reV.supply_curve.tech_mapping.TechMapping.rst", "_autosummary/reV.utilities.rst", "_autosummary/reV.utilities.ModuleName.rst", "_autosummary/reV.utilities.cli_functions.rst", "_autosummary/reV.utilities.cli_functions.format_analysis_years.rst", "_autosummary/reV.utilities.cli_functions.init_cli_logging.rst", "_autosummary/reV.utilities.cli_functions.parse_from_pipeline.rst", "_autosummary/reV.utilities.curtailment.rst", "_autosummary/reV.utilities.curtailment.curtail.rst", "_autosummary/reV.utilities.exceptions.rst", "_autosummary/reV.utilities.exceptions.CollectionRuntimeError.rst", "_autosummary/reV.utilities.exceptions.CollectionValueError.rst", "_autosummary/reV.utilities.exceptions.CollectionWarning.rst", "_autosummary/reV.utilities.exceptions.ConfigError.rst", "_autosummary/reV.utilities.exceptions.ConfigWarning.rst", "_autosummary/reV.utilities.exceptions.DataShapeError.rst", "_autosummary/reV.utilities.exceptions.EmptySupplyCurvePointError.rst", "_autosummary/reV.utilities.exceptions.ExclusionLayerError.rst", "_autosummary/reV.utilities.exceptions.ExecutionError.rst", "_autosummary/reV.utilities.exceptions.ExtrapolationWarning.rst", "_autosummary/reV.utilities.exceptions.FileInputError.rst", "_autosummary/reV.utilities.exceptions.FileInputWarning.rst", "_autosummary/reV.utilities.exceptions.HandlerKeyError.rst", "_autosummary/reV.utilities.exceptions.HandlerRuntimeError.rst", "_autosummary/reV.utilities.exceptions.HandlerValueError.rst", "_autosummary/reV.utilities.exceptions.HandlerWarning.rst", "_autosummary/reV.utilities.exceptions.InputError.rst", "_autosummary/reV.utilities.exceptions.InputWarning.rst", "_autosummary/reV.utilities.exceptions.JSONError.rst", "_autosummary/reV.utilities.exceptions.MultiFileExclusionError.rst", "_autosummary/reV.utilities.exceptions.NearestNeighborError.rst", "_autosummary/reV.utilities.exceptions.OffshoreWindInputError.rst", "_autosummary/reV.utilities.exceptions.OffshoreWindInputWarning.rst", "_autosummary/reV.utilities.exceptions.OutputWarning.rst", "_autosummary/reV.utilities.exceptions.ParallelExecutionWarning.rst", "_autosummary/reV.utilities.exceptions.PipelineError.rst", "_autosummary/reV.utilities.exceptions.ProjectPointsValueError.rst", "_autosummary/reV.utilities.exceptions.PySAMVersionError.rst", "_autosummary/reV.utilities.exceptions.PySAMVersionWarning.rst", "_autosummary/reV.utilities.exceptions.ResourceError.rst", "_autosummary/reV.utilities.exceptions.SAMExecutionError.rst", "_autosummary/reV.utilities.exceptions.SAMExecutionWarning.rst", "_autosummary/reV.utilities.exceptions.SAMInputError.rst", "_autosummary/reV.utilities.exceptions.SAMInputWarning.rst", "_autosummary/reV.utilities.exceptions.SlurmWarning.rst", "_autosummary/reV.utilities.exceptions.SupplyCurveError.rst", "_autosummary/reV.utilities.exceptions.SupplyCurveInputError.rst", "_autosummary/reV.utilities.exceptions.WhileLoopPackingError.rst", "_autosummary/reV.utilities.exceptions.reVDeprecationWarning.rst", "_autosummary/reV.utilities.exceptions.reVError.rst", "_autosummary/reV.utilities.exceptions.reVLossesValueError.rst", "_autosummary/reV.utilities.exceptions.reVLossesWarning.rst", "_autosummary/reV.utilities.log_versions.rst", "_autosummary/reV.utilities.pytest_utils.rst", "_autosummary/reV.utilities.pytest_utils.make_fake_h5_chunks.rst", "_autosummary/reV.utilities.pytest_utils.pd_date_range.rst", "_autosummary/reV.utilities.pytest_utils.write_chunk.rst", "_autosummary/reV.utilities.slots.rst", "_autosummary/reV.utilities.slots.SlottedDict.rst", "_autosummary/reV.version.rst", "_cli/cli.rst", "_cli/reV.rst", "_cli/reV batch.rst", "_cli/reV bespoke.rst", "_cli/reV collect.rst", "_cli/reV econ.rst", "_cli/reV generation.rst", "_cli/reV hybrids.rst", "_cli/reV multiyear.rst", "_cli/reV nrwal.rst", "_cli/reV pipeline.rst", "_cli/reV project-points.rst", "_cli/reV qa-qc.rst", "_cli/reV rep-profiles.rst", "_cli/reV reset-status.rst", "_cli/reV script.rst", "_cli/reV status.rst", "_cli/reV supply-curve.rst", "_cli/reV supply-curve-aggregation.rst", "_cli/reV template-configs.rst", "api.rst", "index.rst", "misc/examples.rst", "misc/examples.advanced_econ_modeling.rst", "misc/examples.aws_pcluster.rst", "misc/examples.batched_execution.rst", "misc/examples.eagle_node_requests.rst", "misc/examples.full_pipeline_execution.rst", "misc/examples.marine_energy.rst", "misc/examples.offshore_wind.rst", "misc/examples.project_points.rst", "misc/examples.rev_losses.rst", "misc/examples.running_locally.rst", "misc/examples.running_with_hsds.rst", "misc/examples.single_module_execution.rst", "misc/installation.rst", "misc/installation_usage.rst"], "titles": ["reV", "reV.SAM", "reV.SAM.SAM", "reV.SAM.SAM.RevPySam", "reV.SAM.SAM.Sam", "reV.SAM.SAM.SamResourceRetriever", "reV.SAM.defaults", "reV.SAM.defaults.AbstractDefaultFromConfigFile", "reV.SAM.defaults.DefaultGeothermal", "reV.SAM.defaults.DefaultLCOE", "reV.SAM.defaults.DefaultLinearFresnelDsgIph", "reV.SAM.defaults.DefaultMhkWave", "reV.SAM.defaults.DefaultPvSamv1", "reV.SAM.defaults.DefaultPvWattsv5", "reV.SAM.defaults.DefaultPvWattsv8", "reV.SAM.defaults.DefaultSingleOwner", "reV.SAM.defaults.DefaultSwh", "reV.SAM.defaults.DefaultTcsMoltenSalt", "reV.SAM.defaults.DefaultTroughPhysicalProcessHeat", "reV.SAM.defaults.DefaultWindPower", "reV.SAM.econ", "reV.SAM.econ.Economic", "reV.SAM.econ.LCOE", "reV.SAM.econ.SingleOwner", "reV.SAM.generation", "reV.SAM.generation.AbstractSamGeneration", "reV.SAM.generation.AbstractSamGenerationFromWeatherFile", "reV.SAM.generation.AbstractSamPv", "reV.SAM.generation.AbstractSamSolar", "reV.SAM.generation.AbstractSamWind", "reV.SAM.generation.Geothermal", "reV.SAM.generation.LinearDirectSteam", "reV.SAM.generation.MhkWave", "reV.SAM.generation.PvSamv1", "reV.SAM.generation.PvWattsv5", "reV.SAM.generation.PvWattsv7", "reV.SAM.generation.PvWattsv8", "reV.SAM.generation.SolarWaterHeat", "reV.SAM.generation.TcsMoltenSalt", "reV.SAM.generation.TroughPhysicalHeat", "reV.SAM.generation.WindPower", "reV.SAM.generation.WindPowerPD", "reV.SAM.version_checker", "reV.SAM.version_checker.PySamVersionChecker", "reV.SAM.windbos", "reV.SAM.windbos.WindBos", "reV.bespoke", "reV.bespoke.bespoke", "reV.bespoke.bespoke.BespokeMultiPlantData", "reV.bespoke.bespoke.BespokeSinglePlant", "reV.bespoke.bespoke.BespokeSinglePlantData", "reV.bespoke.bespoke.BespokeWindPlants", "reV.bespoke.cli_bespoke", "reV.bespoke.gradient_free", "reV.bespoke.gradient_free.GeneticAlgorithm", "reV.bespoke.pack_turbs", "reV.bespoke.pack_turbs.PackTurbines", "reV.bespoke.pack_turbs.smallest_area_with_tiebreakers", "reV.bespoke.place_turbines", "reV.bespoke.place_turbines.PlaceTurbines", "reV.bespoke.place_turbines.none_until_optimized", "reV.bespoke.plotting_functions", "reV.bespoke.plotting_functions.get_xy", "reV.bespoke.plotting_functions.plot_poly", "reV.bespoke.plotting_functions.plot_turbines", "reV.bespoke.plotting_functions.plot_windrose", "reV.cli", "reV.config", "reV.config.base_analysis_config", "reV.config.base_analysis_config.AnalysisConfig", "reV.config.base_config", "reV.config.base_config.BaseConfig", "reV.config.cli_project_points", "reV.config.curtailment", "reV.config.curtailment.Curtailment", "reV.config.execution", "reV.config.execution.BaseExecutionConfig", "reV.config.execution.HPCConfig", "reV.config.execution.SlurmConfig", "reV.config.output_request", "reV.config.output_request.OutputRequest", "reV.config.output_request.SAMOutputRequest", "reV.config.project_points", "reV.config.project_points.PointsControl", "reV.config.project_points.ProjectPoints", "reV.config.sam_config", "reV.config.sam_config.SAMConfig", "reV.config.sam_config.SAMInputsChecker", "reV.econ", "reV.econ.cli_econ", "reV.econ.econ", "reV.econ.econ.Econ", "reV.econ.economies_of_scale", "reV.econ.economies_of_scale.EconomiesOfScale", "reV.econ.utilities", "reV.econ.utilities.lcoe_fcr", "reV.generation", "reV.generation.base", "reV.generation.base.BaseGen", "reV.generation.cli_gen", "reV.generation.generation", "reV.generation.generation.Gen", "reV.handlers", "reV.handlers.cli_collect", "reV.handlers.cli_multi_year", "reV.handlers.exclusions", "reV.handlers.exclusions.ExclusionLayers", "reV.handlers.multi_year", "reV.handlers.multi_year.MultiYear", "reV.handlers.multi_year.MultiYearGroup", "reV.handlers.multi_year.my_collect_groups", "reV.handlers.outputs", "reV.handlers.outputs.Outputs", "reV.handlers.transmission", "reV.handlers.transmission.TransmissionCosts", "reV.handlers.transmission.TransmissionFeatures", "reV.hybrids", "reV.hybrids.cli_hybrids", "reV.hybrids.hybrid_methods", "reV.hybrids.hybrid_methods.aggregate_capacity", "reV.hybrids.hybrid_methods.aggregate_capacity_factor", "reV.hybrids.hybrid_methods.aggregate_solar_capacity", "reV.hybrids.hybrid_methods.aggregate_wind_capacity", "reV.hybrids.hybrids", "reV.hybrids.hybrids.ColNameFormatter", "reV.hybrids.hybrids.Hybridization", "reV.hybrids.hybrids.HybridsData", "reV.hybrids.hybrids.MetaHybridizer", "reV.hybrids.hybrids.RatioColumns", "reV.losses", "reV.losses.power_curve", "reV.losses.power_curve.AbstractPowerCurveTransformation", "reV.losses.power_curve.ExponentialStretching", "reV.losses.power_curve.HorizontalTranslation", "reV.losses.power_curve.LinearStretching", "reV.losses.power_curve.PowerCurve", "reV.losses.power_curve.PowerCurveLosses", "reV.losses.power_curve.PowerCurveLossesInput", "reV.losses.power_curve.PowerCurveLossesMixin", "reV.losses.power_curve.PowerCurveWindResource", "reV.losses.power_curve.TRANSFORMATIONS", "reV.losses.power_curve.adjust_power_curve", "reV.losses.scheduled", "reV.losses.scheduled.Outage", "reV.losses.scheduled.OutageScheduler", "reV.losses.scheduled.ScheduledLossesMixin", "reV.losses.scheduled.SingleOutageScheduler", "reV.losses.utils", "reV.losses.utils.convert_to_full_month_names", "reV.losses.utils.filter_unknown_month_names", "reV.losses.utils.format_month_name", "reV.losses.utils.full_month_name_from_abbr", "reV.losses.utils.hourly_indices_for_months", "reV.losses.utils.month_index", "reV.losses.utils.month_indices", "reV.nrwal", "reV.nrwal.cli_nrwal", "reV.nrwal.nrwal", "reV.nrwal.nrwal.RevNrwal", "reV.qa_qc", "reV.qa_qc.cli_qa_qc", "reV.qa_qc.cli_qa_qc.cli_qa_qc", "reV.qa_qc.qa_qc", "reV.qa_qc.qa_qc.QaQc", "reV.qa_qc.qa_qc.QaQcModule", "reV.qa_qc.summary", "reV.qa_qc.summary.ExclusionsMask", "reV.qa_qc.summary.PlotBase", "reV.qa_qc.summary.SummarizeH5", "reV.qa_qc.summary.SummarizeSupplyCurve", "reV.qa_qc.summary.SummaryPlots", "reV.qa_qc.summary.SupplyCurvePlot", "reV.rep_profiles", "reV.rep_profiles.cli_rep_profiles", "reV.rep_profiles.rep_profiles", "reV.rep_profiles.rep_profiles.RegionRepProfile", "reV.rep_profiles.rep_profiles.RepProfiles", "reV.rep_profiles.rep_profiles.RepProfilesBase", "reV.rep_profiles.rep_profiles.RepresentativeMethods", "reV.supply_curve", "reV.supply_curve.aggregation", "reV.supply_curve.aggregation.AbstractAggFileHandler", "reV.supply_curve.aggregation.AggFileHandler", "reV.supply_curve.aggregation.Aggregation", "reV.supply_curve.aggregation.BaseAggregation", "reV.supply_curve.cli_sc_aggregation", "reV.supply_curve.cli_supply_curve", "reV.supply_curve.competitive_wind_farms", "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms", "reV.supply_curve.exclusions", "reV.supply_curve.exclusions.ExclusionMask", "reV.supply_curve.exclusions.ExclusionMaskFromDict", "reV.supply_curve.exclusions.FrictionMask", "reV.supply_curve.exclusions.LayerMask", "reV.supply_curve.extent", "reV.supply_curve.extent.SupplyCurveExtent", "reV.supply_curve.points", "reV.supply_curve.points.AbstractSupplyCurvePoint", "reV.supply_curve.points.AggregationSupplyCurvePoint", "reV.supply_curve.points.GenerationSupplyCurvePoint", "reV.supply_curve.points.SupplyCurvePoint", "reV.supply_curve.sc_aggregation", "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler", "reV.supply_curve.sc_aggregation.SupplyCurveAggregation", "reV.supply_curve.supply_curve", "reV.supply_curve.supply_curve.SupplyCurve", "reV.supply_curve.tech_mapping", "reV.supply_curve.tech_mapping.TechMapping", "reV.utilities", "reV.utilities.ModuleName", "reV.utilities.cli_functions", "reV.utilities.cli_functions.format_analysis_years", "reV.utilities.cli_functions.init_cli_logging", "reV.utilities.cli_functions.parse_from_pipeline", "reV.utilities.curtailment", "reV.utilities.curtailment.curtail", "reV.utilities.exceptions", "reV.utilities.exceptions.CollectionRuntimeError", "reV.utilities.exceptions.CollectionValueError", "reV.utilities.exceptions.CollectionWarning", "reV.utilities.exceptions.ConfigError", "reV.utilities.exceptions.ConfigWarning", "reV.utilities.exceptions.DataShapeError", "reV.utilities.exceptions.EmptySupplyCurvePointError", "reV.utilities.exceptions.ExclusionLayerError", "reV.utilities.exceptions.ExecutionError", "reV.utilities.exceptions.ExtrapolationWarning", "reV.utilities.exceptions.FileInputError", "reV.utilities.exceptions.FileInputWarning", "reV.utilities.exceptions.HandlerKeyError", "reV.utilities.exceptions.HandlerRuntimeError", "reV.utilities.exceptions.HandlerValueError", "reV.utilities.exceptions.HandlerWarning", "reV.utilities.exceptions.InputError", "reV.utilities.exceptions.InputWarning", "reV.utilities.exceptions.JSONError", "reV.utilities.exceptions.MultiFileExclusionError", "reV.utilities.exceptions.NearestNeighborError", "reV.utilities.exceptions.OffshoreWindInputError", "reV.utilities.exceptions.OffshoreWindInputWarning", "reV.utilities.exceptions.OutputWarning", "reV.utilities.exceptions.ParallelExecutionWarning", "reV.utilities.exceptions.PipelineError", "reV.utilities.exceptions.ProjectPointsValueError", "reV.utilities.exceptions.PySAMVersionError", "reV.utilities.exceptions.PySAMVersionWarning", "reV.utilities.exceptions.ResourceError", "reV.utilities.exceptions.SAMExecutionError", "reV.utilities.exceptions.SAMExecutionWarning", "reV.utilities.exceptions.SAMInputError", "reV.utilities.exceptions.SAMInputWarning", "reV.utilities.exceptions.SlurmWarning", "reV.utilities.exceptions.SupplyCurveError", "reV.utilities.exceptions.SupplyCurveInputError", "reV.utilities.exceptions.WhileLoopPackingError", "reV.utilities.exceptions.reVDeprecationWarning", "reV.utilities.exceptions.reVError", "reV.utilities.exceptions.reVLossesValueError", "reV.utilities.exceptions.reVLossesWarning", "reV.utilities.log_versions", "reV.utilities.pytest_utils", "reV.utilities.pytest_utils.make_fake_h5_chunks", "reV.utilities.pytest_utils.pd_date_range", "reV.utilities.pytest_utils.write_chunk", "reV.utilities.slots", "reV.utilities.slots.SlottedDict", "reV.version", "Command Line Interfaces (CLIs)", "reV", "reV batch", "reV bespoke", "reV collect", "reV econ", "reV generation", "reV hybrids", "reV multiyear", "reV nrwal", "reV pipeline", "reV project-points", "reV qa-qc", "reV rep-profiles", "reV reset-status", "reV script", "reV status", "reV supply-curve", "reV supply-curve-aggregation", "reV template-configs", "<no title>", "reV documentation", "Examples", "SAM Single Owner Modeling", "Running reV on an AWS Parallel Cluster", "Batched Execution", "Eagle Node Requests", "Full Pipeline Execution", "reV Marine Energy", "Offshore Wind Modeling", "reV Project Points", "reV Losses", "Run reV locally", "Running with HSDS", "Single Module Execution", "Installation", "Installation and Usage"], "terms": {"The": [0, 5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 54, 56, 59, 63, 64, 65, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 98, 101, 110, 112, 124, 125, 127, 131, 132, 133, 134, 135, 136, 137, 143, 144, 145, 146, 152, 153, 154, 158, 161, 175, 176, 177, 178, 183, 193, 195, 198, 199, 200, 202, 203, 205, 206, 209, 212, 213, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 288, 290, 291, 292, 293, 294, 295, 296, 298, 300, 301, 302], "renew": [0, 51, 101, 270, 273, 288, 291, 295, 302], "energi": [0, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 93, 95, 101, 137, 199, 203, 270, 273, 285, 288, 289, 291, 302], "potenti": [0, 30, 59, 69, 71, 74, 76, 77, 78, 86, 110, 203, 239, 275, 285, 288, 302], "model": [0, 3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 44, 45, 49, 101, 268, 273, 288, 289, 295, 302], "interfac": [1, 2, 20, 24, 66, 131, 268, 289, 291], "modul": [1, 2, 3, 4, 5, 7, 8, 13, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 49, 51, 55, 69, 71, 74, 76, 77, 78, 79, 86, 88, 90, 91, 92, 97, 98, 100, 101, 113, 123, 129, 130, 142, 150, 155, 156, 157, 158, 159, 161, 164, 203, 204, 206, 209, 213, 257, 258, 268, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 289, 290, 291, 292, 294, 298, 302], "wrap": [2, 20, 24], "nrel": [2, 20, 24, 27, 28, 33, 34, 35, 36, 38, 51, 203, 270, 288, 291, 295, 300, 302], "pysam": [2, 3, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 51, 91, 98, 101, 259, 270, 272, 273, 295, 297, 299], "librari": [2, 157, 158, 276, 282, 300], "addit": [2, 20, 24, 69, 71, 74, 76, 77, 78, 86, 136, 158, 163, 166, 170, 171, 198, 199, 200, 203, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "featur": [2, 20, 24, 77, 78, 113, 114, 115, 205, 255, 261, 263, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 290, 293, 294], "class": [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 53, 54, 55, 56, 58, 59, 64, 65, 68, 69, 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 90, 91, 92, 93, 97, 98, 100, 101, 105, 106, 107, 108, 109, 111, 112, 113, 114, 115, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 142, 143, 144, 145, 146, 157, 158, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 174, 175, 176, 177, 178, 180, 181, 182, 183, 184, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 264, 265, 270, 272, 273, 274, 276, 285, 288], "meta": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 91, 98, 101, 108, 112, 118, 119, 120, 121, 122, 125, 126, 127, 158, 168, 176, 177, 195, 198, 199, 200, 202, 203, 205, 207, 263, 270, 272, 273, 274, 276, 278, 284, 285, 300], "sam_sys_input": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 138, 145], "output_request": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 91, 98, 101, 158, 270, 272, 273, 276, 298, 299, 300], "site_sys_input": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "none": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 60, 63, 64, 65, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 98, 101, 106, 108, 109, 110, 112, 114, 115, 119, 120, 121, 122, 125, 127, 136, 141, 145, 146, 151, 158, 161, 163, 166, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 193, 198, 199, 200, 202, 203, 205, 207, 211, 215, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 298, 300], "sourc": [3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 50, 51, 54, 56, 57, 59, 60, 62, 63, 64, 65, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 87, 91, 93, 95, 98, 101, 106, 108, 109, 110, 112, 114, 115, 119, 120, 121, 122, 124, 125, 126, 127, 131, 132, 133, 134, 135, 136, 137, 138, 139, 141, 143, 144, 145, 146, 148, 149, 150, 151, 152, 153, 154, 158, 161, 163, 164, 166, 167, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 205, 207, 209, 211, 212, 213, 215, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 261, 262, 263, 265, 272, 276, 288, 291, 296, 302], "base": [3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 50, 51, 54, 56, 59, 68, 69, 70, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 87, 91, 93, 101, 106, 108, 109, 112, 114, 115, 124, 125, 126, 127, 128, 131, 132, 133, 134, 135, 136, 137, 138, 139, 143, 144, 145, 146, 158, 163, 164, 166, 167, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 205, 207, 209, 215, 265, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 292, 298], "simul": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 91, 98, 101, 223, 247, 249, 252, 253, 270, 273], "gener": [3, 5, 22, 23, 49, 51, 54, 69, 74, 84, 86, 91, 110, 125, 131, 132, 133, 134, 135, 136, 138, 139, 144, 145, 155, 157, 158, 175, 176, 177, 183, 189, 198, 199, 200, 202, 203, 205, 210, 215, 256, 267, 268, 269, 270, 271, 272, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 290, 291, 296, 298, 299, 300, 301, 303], "econ": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 97, 98, 101, 155, 199, 202, 203, 267, 268, 273, 277, 285, 288, 290, 297, 299, 302], "initi": [3, 7, 8, 13, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 54, 59, 84, 91, 101, 108, 110, 112, 125, 136, 158, 176, 177, 191, 195, 198, 199, 200, 212, 269, 270, 272, 273, 275, 276, 277, 288, 291], "object": [3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 50, 51, 54, 56, 57, 59, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 87, 91, 93, 98, 101, 106, 109, 112, 115, 124, 125, 126, 127, 131, 135, 136, 137, 138, 139, 143, 144, 145, 146, 158, 163, 164, 167, 168, 169, 175, 178, 181, 182, 188, 190, 193, 195, 198, 199, 200, 202, 205, 207, 215, 259, 265, 270, 291], "paramet": [3, 4, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 50, 51, 54, 56, 57, 59, 60, 62, 63, 64, 65, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 87, 91, 93, 95, 98, 101, 106, 108, 109, 110, 112, 114, 115, 119, 120, 121, 122, 124, 125, 126, 127, 131, 132, 133, 134, 135, 136, 137, 139, 141, 143, 144, 145, 146, 148, 149, 150, 151, 152, 153, 154, 158, 161, 163, 164, 166, 167, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 205, 207, 211, 212, 213, 215, 259, 261, 263, 265], "pd": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 84, 91, 93, 98, 101, 108, 112, 125, 126, 127, 145, 158, 169, 175, 176, 177, 195, 199, 202, 203, 205, 261, 262, 263, 270, 272, 273, 276, 280, 285, 291], "datafram": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 84, 91, 93, 98, 101, 108, 112, 114, 115, 119, 120, 121, 122, 124, 125, 126, 127, 145, 158, 166, 167, 168, 169, 170, 171, 175, 176, 177, 188, 195, 199, 202, 205, 270, 272, 273, 276, 280, 284], "seri": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 98, 101, 119, 120, 121, 122, 145, 198, 199, 270, 272, 273], "data": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 59, 69, 71, 74, 76, 77, 78, 84, 86, 91, 93, 98, 101, 102, 108, 110, 112, 119, 120, 121, 122, 125, 126, 127, 136, 139, 141, 145, 157, 158, 161, 163, 165, 166, 167, 168, 170, 171, 175, 176, 177, 178, 183, 184, 191, 193, 195, 198, 199, 200, 202, 203, 205, 222, 224, 240, 261, 263, 265, 270, 271, 272, 273, 275, 276, 279, 280, 284, 285, 288, 290, 291, 294, 295, 296, 297, 300], "correspond": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 48, 49, 50, 51, 69, 71, 74, 76, 77, 78, 83, 84, 86, 91, 98, 101, 108, 110, 112, 125, 127, 131, 135, 136, 143, 146, 152, 153, 158, 175, 176, 177, 183, 195, 198, 199, 203, 207, 270, 271, 272, 273, 275, 276, 280, 290], "resourc": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 51, 59, 74, 84, 86, 91, 98, 101, 106, 108, 112, 131, 136, 138, 139, 141, 145, 157, 158, 175, 176, 181, 182, 183, 184, 191, 195, 198, 199, 200, 202, 203, 206, 207, 215, 246, 270, 272, 273, 276, 278, 280, 282, 285, 288, 291, 294, 296, 297, 298, 299, 300], "input": [3, 4, 5, 7, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 49, 51, 54, 59, 69, 71, 73, 74, 76, 77, 78, 84, 85, 86, 87, 91, 93, 98, 101, 108, 109, 110, 112, 124, 125, 126, 127, 131, 132, 133, 134, 135, 136, 137, 138, 139, 143, 144, 145, 146, 148, 149, 151, 152, 153, 154, 158, 164, 176, 183, 184, 193, 198, 199, 200, 202, 203, 205, 207, 211, 213, 215, 220, 221, 227, 228, 233, 234, 236, 238, 239, 249, 250, 269, 270, 271, 272, 273, 274, 275, 276, 277, 280, 282, 283, 284, 285, 288, 289, 290, 291, 292, 296, 298, 299, 300], "singl": [3, 4, 5, 15, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 48, 49, 50, 51, 54, 69, 78, 84, 90, 91, 98, 101, 106, 110, 125, 127, 131, 135, 136, 139, 143, 144, 145, 146, 158, 176, 183, 193, 198, 199, 200, 203, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 280, 282, 285, 288, 289, 292, 297], "locat": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 50, 51, 54, 56, 59, 64, 84, 91, 101, 108, 112, 144, 145, 178, 195, 203, 213, 270, 272, 273], "should": [3, 5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 54, 59, 60, 77, 78, 84, 91, 93, 98, 101, 108, 109, 110, 112, 124, 125, 127, 131, 133, 135, 136, 137, 143, 145, 158, 161, 175, 176, 177, 178, 183, 188, 193, 198, 199, 200, 203, 205, 209, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 290, 291, 292, 293, 294, 296, 302], "includ": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 84, 91, 93, 98, 101, 108, 112, 143, 152, 154, 158, 175, 176, 177, 183, 188, 191, 193, 195, 198, 199, 200, 203, 205, 207, 263, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 283, 284, 285, 288, 290, 291, 296, 298], "valu": [3, 4, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 48, 49, 50, 51, 54, 57, 59, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 93, 98, 101, 106, 108, 110, 112, 125, 127, 128, 131, 132, 133, 134, 135, 136, 137, 139, 143, 144, 145, 152, 158, 161, 163, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 198, 199, 200, 202, 203, 205, 207, 209, 211, 213, 226, 243, 257, 265, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 283, 284, 285, 290, 291, 292, 293, 298], "latitud": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 59, 84, 101, 106, 108, 112, 170, 190, 191, 192, 195, 198, 199, 200, 203, 207, 273, 278, 297, 299, 300], "longitud": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 59, 84, 101, 106, 108, 112, 170, 190, 191, 192, 195, 198, 199, 200, 203, 207, 273, 278, 297, 299, 300], "elev": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101, 198, 199, 203, 273], "timezon": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101, 198, 199, 203, 273], "can": [3, 5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 51, 56, 59, 74, 78, 80, 81, 84, 91, 95, 98, 101, 108, 109, 110, 112, 114, 115, 125, 136, 137, 139, 143, 144, 146, 148, 158, 161, 166, 170, 171, 176, 181, 182, 183, 184, 191, 193, 195, 198, 199, 202, 203, 205, 211, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 288, 290, 291, 292, 293, 294, 296, 297, 298, 299, 300, 301, 302], "run": [3, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 48, 49, 50, 51, 54, 59, 69, 71, 74, 76, 77, 78, 86, 87, 91, 98, 101, 108, 110, 112, 125, 127, 131, 144, 157, 158, 161, 163, 168, 169, 176, 177, 178, 183, 184, 188, 190, 191, 192, 198, 199, 203, 205, 207, 211, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 289, 293, 294, 295, 296, 297, 298, 301, 303], "dict": [3, 4, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 51, 69, 71, 74, 76, 77, 78, 84, 86, 87, 91, 93, 98, 101, 106, 108, 110, 112, 125, 127, 137, 143, 158, 161, 163, 164, 166, 170, 171, 176, 177, 181, 182, 183, 184, 190, 191, 192, 193, 195, 198, 199, 200, 202, 203, 205, 213, 263, 265, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285], "site": [3, 4, 5, 8, 13, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 51, 59, 76, 77, 78, 83, 84, 91, 98, 101, 108, 112, 136, 138, 139, 141, 144, 158, 163, 168, 178, 183, 198, 199, 270, 272, 273, 276, 288, 289, 290, 291, 297], "agnost": [3, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "system": [3, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 44, 45, 48, 49, 50, 51, 59, 93, 215, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 300], "argument": [3, 5, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 59, 110, 181, 182, 183, 184, 191, 193, 198, 199, 200, 202, 203, 205, 215, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 292, 294], "list": [3, 4, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 54, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 91, 93, 98, 101, 106, 108, 109, 112, 135, 143, 144, 145, 148, 149, 152, 158, 163, 164, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 198, 199, 200, 202, 203, 205, 207, 211, 213, 261, 263, 265, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 292, 297], "request": [3, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 77, 78, 79, 80, 81, 84, 91, 98, 101, 145, 146, 158, 195, 203, 268, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 283, 284, 285, 286, 288, 289, 291, 302], "output": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 59, 62, 69, 74, 79, 80, 81, 86, 91, 93, 98, 101, 107, 108, 109, 110, 125, 126, 152, 154, 157, 158, 161, 163, 164, 166, 170, 171, 175, 176, 177, 182, 183, 188, 198, 199, 200, 202, 203, 205, 207, 212, 240, 261, 263, 265, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 283, 284, 285, 288, 289, 291, 294, 299, 300, 301], "e": [3, 5, 7, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 76, 77, 78, 80, 81, 86, 91, 101, 108, 109, 112, 125, 127, 131, 133, 136, 144, 145, 158, 175, 176, 177, 178, 193, 203, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 283, 284, 285, 288, 291, 302], "g": [3, 5, 7, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 57, 76, 77, 78, 86, 91, 101, 108, 109, 112, 125, 127, 158, 175, 176, 177, 178, 193, 203, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 283, 284, 285, 291], "cf_mean": [3, 5, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 101, 109, 110, 158, 199, 203, 270, 273, 275, 276, 285, 298, 299, 300], "annual_energi": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "cf_profil": [3, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101, 110, 175, 176, 177, 275, 280, 298, 299, 300], "gen_profil": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 298], "energy_yield": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "ppa_pric": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91], "lcoe_fcr": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 101, 110, 199, 203, 272, 275, 285], "option": [3, 5, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 54, 56, 59, 63, 64, 65, 69, 71, 75, 76, 77, 78, 84, 86, 91, 98, 101, 108, 109, 110, 112, 114, 115, 125, 127, 136, 137, 141, 143, 144, 145, 146, 158, 161, 163, 166, 168, 169, 170, 171, 176, 181, 182, 183, 184, 188, 191, 193, 198, 199, 200, 202, 203, 205, 207, 211, 212, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 291, 293, 297, 298, 303], "set": [3, 4, 5, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 56, 59, 69, 71, 74, 76, 77, 78, 80, 81, 83, 86, 91, 98, 101, 108, 110, 112, 125, 126, 127, 131, 135, 143, 144, 146, 154, 158, 176, 178, 190, 191, 192, 193, 203, 209, 213, 215, 265, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 280, 285, 288, 289, 290, 292, 294, 295, 296, 299, 301], "specif": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 59, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 106, 108, 112, 114, 115, 135, 137, 143, 144, 145, 146, 158, 193, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 290, 292, 298, 299], "complement": [3, 21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 101, 270, 273], "method": [3, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 50, 51, 54, 56, 59, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 87, 91, 93, 95, 98, 101, 106, 108, 109, 112, 114, 115, 121, 122, 124, 125, 126, 127, 128, 131, 132, 133, 134, 135, 136, 137, 138, 139, 143, 144, 145, 146, 158, 163, 164, 166, 167, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 205, 207, 209, 214, 262, 265, 270, 280, 284, 285, 290, 291], "attribut": [3, 4, 5, 7, 8, 13, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 49, 51, 57, 59, 60, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 87, 91, 93, 98, 101, 106, 108, 109, 112, 119, 120, 121, 122, 124, 125, 126, 127, 128, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 143, 145, 146, 158, 161, 163, 164, 166, 167, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 207, 209, 265, 270, 272, 273, 279, 285], "properti": [3, 4, 7, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 49, 51, 59, 69, 71, 74, 76, 77, 78, 83, 84, 86, 91, 93, 98, 101, 106, 108, 109, 112, 125, 126, 127, 131, 132, 133, 134, 135, 136, 137, 138, 139, 143, 145, 158, 161, 163, 164, 166, 167, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 207, 273, 274, 279], "get": [3, 4, 5, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 51, 69, 71, 74, 76, 77, 78, 83, 84, 86, 91, 93, 98, 101, 106, 108, 112, 114, 115, 125, 126, 135, 139, 148, 158, 164, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 195, 197, 198, 199, 200, 202, 203, 207, 209, 211, 265, 268, 280, 285, 288, 291, 300], "number": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 54, 59, 64, 76, 77, 78, 80, 81, 83, 84, 91, 93, 98, 101, 128, 136, 141, 143, 144, 146, 161, 163, 168, 175, 176, 177, 178, 183, 188, 195, 197, 198, 199, 200, 203, 205, 207, 215, 266, 270, 271, 272, 273, 279, 280, 284, 285, 291, 298], "thi": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 54, 57, 59, 69, 71, 74, 76, 77, 78, 79, 83, 84, 86, 91, 93, 98, 101, 108, 109, 110, 112, 121, 122, 124, 125, 126, 127, 131, 132, 133, 134, 135, 136, 137, 138, 139, 141, 143, 144, 145, 146, 148, 151, 152, 153, 157, 158, 164, 175, 176, 177, 178, 183, 193, 195, 197, 198, 199, 200, 202, 203, 205, 206, 209, 211, 213, 215, 263, 265, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 288, 290, 291, 292, 294, 295, 296, 297, 298, 300, 301, 302], "static": [3, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 69, 71, 74, 76, 77, 78, 86, 91, 93, 98, 101, 108, 112, 166, 170, 171, 178, 183, 197, 198, 199, 200, 205, 207], "get_sam_r": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "arg": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 91, 93, 98, 101, 108, 112, 195, 199, 203, 262, 268, 269, 278, 285, 292], "kwarg": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 59, 86, 91, 98, 101, 108, 112, 163, 166, 170, 171, 181, 182, 183, 184, 191, 193, 198, 199, 200, 202, 203, 262, 270, 285], "iter": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 91, 98, 101, 135, 136, 139, 148, 149, 152, 154, 265, 272, 273], "year": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 69, 91, 93, 95, 98, 101, 104, 108, 109, 110, 145, 176, 199, 203, 211, 268, 270, 272, 273, 275, 277, 280, 285, 288, 291, 292, 302], "file": [3, 5, 7, 8, 13, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 103, 106, 107, 108, 109, 110, 111, 112, 114, 115, 125, 126, 138, 145, 157, 158, 161, 163, 166, 167, 168, 169, 170, 171, 175, 176, 177, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 198, 199, 200, 202, 203, 205, 207, 212, 213, 227, 228, 235, 236, 240, 261, 263, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 282, 284, 285, 286, 288, 289, 290, 291, 292, 297, 299, 300, 301], "drop_leap": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 98, 101, 273], "drop": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 98, 101, 135, 198, 200, 273], "feb": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 151], "29th": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "from": [3, 4, 5, 7, 8, 13, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 54, 59, 62, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 93, 98, 101, 106, 107, 108, 109, 110, 112, 125, 126, 131, 132, 133, 134, 135, 136, 137, 138, 141, 144, 145, 146, 158, 163, 168, 171, 175, 176, 177, 178, 183, 184, 189, 190, 191, 192, 193, 198, 199, 200, 202, 203, 205, 207, 213, 265, 267, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 291, 292, 294, 295, 297, 298, 299, 300, 303], "df": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 84, 91, 98, 101, 108, 112, 177, 297], "time": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 54, 78, 91, 98, 101, 108, 112, 125, 126, 143, 144, 145, 146, 158, 163, 168, 175, 176, 177, 178, 198, 199, 200, 203, 207, 215, 261, 263, 270, 272, 273, 282, 285, 291, 303], "index": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 50, 51, 80, 81, 83, 84, 91, 98, 101, 106, 108, 112, 125, 126, 127, 128, 145, 152, 153, 154, 158, 175, 176, 177, 178, 183, 195, 197, 198, 199, 200, 203, 207, 270, 273], "an": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 48, 49, 51, 54, 57, 59, 69, 71, 74, 76, 77, 78, 84, 86, 91, 93, 101, 106, 108, 121, 122, 124, 125, 131, 132, 133, 134, 135, 136, 137, 139, 143, 144, 145, 146, 148, 149, 152, 153, 154, 158, 161, 176, 183, 184, 195, 197, 198, 199, 200, 203, 205, 207, 211, 263, 265, 269, 270, 272, 273, 276, 277, 280, 284, 285, 288, 289, 290, 294, 296, 298, 299, 300], "contain": [3, 7, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 59, 69, 71, 74, 76, 77, 78, 84, 86, 91, 101, 106, 114, 115, 119, 120, 121, 122, 125, 126, 127, 131, 132, 133, 134, 135, 136, 137, 143, 144, 145, 146, 148, 158, 163, 166, 167, 169, 170, 171, 176, 183, 184, 190, 191, 192, 195, 198, 199, 200, 203, 205, 207, 211, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 282, 284, 285, 291, 292, 294], "panda": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 86, 91, 93, 98, 101, 108, 112, 114, 115, 145, 166, 167, 168, 169, 170, 171, 188, 198, 199, 203, 205, 259, 270, 284, 285], "month": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 74, 143, 144, 146, 148, 149, 150, 151, 152, 153, 154], "dai": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 98, 101, 273], "return": [3, 4, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 51, 57, 59, 60, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 91, 93, 95, 98, 101, 106, 108, 109, 112, 114, 115, 119, 120, 121, 122, 124, 125, 126, 127, 128, 131, 132, 133, 134, 135, 136, 138, 139, 141, 144, 146, 148, 149, 150, 151, 152, 153, 154, 158, 163, 164, 166, 167, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 205, 207, 209, 211, 213, 215, 261, 270, 272, 273, 280, 284, 285, 288, 291, 302], "all": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 98, 101, 108, 110, 112, 114, 115, 124, 125, 127, 131, 135, 136, 141, 143, 144, 146, 148, 158, 161, 163, 168, 169, 170, 176, 178, 181, 182, 183, 184, 193, 195, 198, 199, 200, 202, 203, 205, 207, 209, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 283, 284, 285, 291, 292, 294, 296, 299, 300], "februari": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "timestep": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 108, 112, 178, 291], "remov": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 98, 101, 124, 154, 188, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291], "ensure_res_len": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "arr": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 178, 198, 199, 200], "time_index": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 50, 51, 86, 91, 98, 101, 108, 112, 158, 176, 177, 270, 273, 300], "ensur": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 98, 101, 110, 135, 144, 190, 191, 192, 273, 275, 294], "ha": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 69, 71, 74, 76, 77, 78, 84, 86, 91, 93, 101, 125, 127, 146, 157, 158, 176, 177, 195, 203, 205, 269, 270, 273, 284, 285, 288, 291, 300], "constant": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 131, 199, 202, 203, 285], "step": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 86, 91, 101, 108, 109, 110, 112, 135, 158, 161, 163, 164, 166, 176, 198, 199, 200, 203, 205, 213, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 292, 294, 296], "onli": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 51, 59, 84, 91, 98, 101, 108, 112, 125, 127, 133, 136, 144, 146, 158, 176, 184, 197, 198, 199, 200, 202, 203, 205, 211, 213, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 289, 291, 293, 294, 296, 300, 302], "cover": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "365": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 98, 101], "leap": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 98, 101, 273], "If": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 98, 101, 108, 109, 110, 112, 114, 115, 121, 122, 125, 127, 131, 135, 136, 138, 145, 146, 151, 158, 161, 168, 175, 176, 177, 178, 193, 198, 199, 200, 203, 205, 211, 213, 268, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 286, 288, 291, 292, 294, 302], "last": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 69, 71, 74, 76, 77, 78, 80, 81, 86, 91, 98, 101], "ndarrai": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 84, 93, 95, 106, 108, 112, 114, 127, 158, 166, 167, 170, 171, 175, 178, 183, 184, 188, 190, 191, 192, 195, 198, 199, 200, 203, 207, 261, 291], "arrai": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 54, 56, 62, 64, 65, 91, 98, 101, 106, 108, 112, 125, 135, 136, 139, 144, 146, 158, 175, 176, 177, 178, 183, 184, 190, 191, 192, 195, 198, 199, 200, 203, 207, 261, 263, 296, 297, 298, 299, 300], "truncat": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 132, 133, 134], "datatimeindex": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "associ": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 84, 101, 108, 188, 195, 197, 198, 199, 200, 203, 205, 269, 273, 277, 284, 285, 295, 297], "us": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 48, 49, 50, 51, 54, 57, 59, 60, 69, 71, 74, 76, 77, 78, 84, 86, 91, 93, 95, 98, 101, 106, 108, 109, 110, 112, 118, 125, 127, 131, 133, 136, 137, 138, 139, 143, 144, 145, 146, 157, 158, 161, 163, 166, 168, 170, 171, 175, 176, 177, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 198, 199, 202, 203, 205, 207, 209, 212, 215, 260, 261, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 289, 290, 291, 292, 293, 294, 296, 297, 298, 299, 301, 302], "check": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 49, 69, 71, 74, 76, 77, 78, 86, 87, 93, 108, 112, 114, 115, 121, 122, 126, 146, 158, 161, 188, 190, 191, 192, 198, 199, 200, 227, 228, 233, 268, 277, 279, 288, 291, 294, 302], "frequenc": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 65, 86], "ar": [3, 4, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 59, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 108, 109, 110, 112, 114, 115, 119, 120, 125, 127, 131, 132, 133, 134, 135, 136, 138, 141, 143, 145, 148, 150, 154, 158, 161, 176, 190, 191, 192, 193, 195, 198, 199, 203, 205, 207, 209, 213, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 281, 282, 283, 284, 285, 286, 288, 290, 291, 292, 294, 296, 299, 300, 302], "make_datetim": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "i": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 48, 49, 50, 51, 54, 59, 60, 63, 64, 65, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 91, 93, 98, 101, 106, 108, 109, 110, 112, 114, 115, 119, 120, 121, 122, 125, 126, 127, 128, 131, 132, 133, 134, 135, 136, 137, 138, 141, 143, 144, 145, 146, 150, 151, 153, 157, 158, 161, 168, 175, 176, 177, 178, 181, 182, 183, 184, 188, 191, 193, 195, 197, 198, 199, 200, 202, 203, 205, 206, 213, 215, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 290, 291, 292, 293, 294, 296, 297, 298, 300, 302], "datetim": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 98, 101, 108, 112], "dt": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "accessor": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "classmethod": [3, 4, 5, 7, 8, 13, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 49, 51, 83, 84, 87, 91, 98, 101, 108, 112, 114, 115, 124, 163, 166, 168, 169, 170, 171, 175, 178, 183, 188, 190, 191, 192, 198, 199, 200, 203, 207, 209], "get_time_interv": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "interv": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "must": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 51, 59, 91, 93, 101, 108, 109, 135, 137, 139, 143, 145, 149, 151, 152, 153, 154, 158, 175, 176, 177, 183, 184, 198, 199, 200, 202, 203, 205, 207, 270, 272, 273, 276, 280, 284, 285, 288, 291, 292, 294, 296, 298], "have": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 91, 92, 98, 101, 110, 125, 127, 135, 136, 145, 146, 158, 175, 176, 177, 198, 199, 200, 202, 203, 205, 265, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 291, 292, 294, 300, 302], "access": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 91, 98, 101, 203, 270, 285, 291, 293, 297, 300], "ad": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 98, 101, 108, 112, 145, 158, 168, 193, 198, 199, 200, 205, 273, 276, 284, 290, 294], "time_interv": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "int": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 51, 54, 69, 71, 74, 76, 77, 78, 83, 84, 86, 91, 98, 101, 106, 108, 112, 114, 115, 135, 136, 137, 141, 143, 144, 145, 146, 153, 161, 163, 166, 168, 175, 176, 177, 178, 183, 184, 188, 193, 195, 197, 198, 199, 200, 203, 205, 207, 211, 215, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "indic": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 83, 91, 98, 101, 108, 114, 115, 127, 143, 144, 146, 152, 154, 195, 205, 207, 270, 272, 273, 284], "over": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 74, 91, 109, 269, 288, 291], "which": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 54, 59, 63, 64, 65, 74, 76, 77, 78, 84, 91, 98, 101, 110, 119, 120, 121, 122, 131, 132, 133, 134, 135, 136, 143, 145, 146, 158, 176, 178, 182, 195, 202, 203, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 292, 296, 297], "hour": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 74, 78, 143, 144, 145, 146, 152, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 291, 302], "count": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 80, 81, 128, 143, 146, 198, 298], "so": [3, 4, 8, 13, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 51, 69, 71, 74, 76, 77, 78, 86, 98, 101, 108, 112, 125, 127, 143, 148, 188, 203, 270, 274, 277, 285, 291, 292, 293, 294, 298], "0": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 54, 56, 63, 80, 81, 84, 86, 91, 98, 101, 112, 114, 115, 125, 127, 128, 135, 137, 143, 144, 153, 154, 176, 178, 183, 191, 193, 198, 199, 200, 203, 205, 215, 262, 270, 272, 273, 274, 280, 284, 285, 288, 291, 293, 297, 298, 299, 300, 302], "5": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 54, 63, 125, 127, 153, 193, 199, 203, 205, 207, 270, 274, 284, 285, 288, 293, 297, 298, 302], "2": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 49, 51, 54, 69, 71, 74, 76, 77, 78, 86, 91, 101, 112, 128, 132, 133, 134, 136, 139, 188, 195, 197, 198, 199, 200, 203, 205, 270, 272, 273, 284, 285, 291, 297, 303], "outputs_to_utc_arr": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "convert": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 54, 86, 136, 148, 150, 151, 152, 153, 154, 158, 193, 211, 278], "like": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 54, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 125, 127, 131, 133, 136, 139, 158, 203, 270, 273, 274, 276, 285, 291, 293], "utc": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 101, 112, 203, 270, 272, 273], "np": [3, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 54, 93, 95, 106, 108, 112, 127, 135, 144, 146, 158, 175, 178, 183, 195, 198, 199, 200, 203, 207, 298, 299, 300], "collect_output": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "output_lookup": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "collect": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 103, 107, 108, 109, 110, 118, 138, 176, 203, 209, 217, 218, 219, 261, 267, 268, 275, 277, 280, 285, 288, 292, 302], "timeseri": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 108, 112, 175, 178, 183, 198, 199, 270], "save": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 76, 77, 78, 86, 125, 158, 163, 166, 168, 169, 170, 171, 176, 177, 178, 183, 188, 207, 270, 274, 276, 278, 280], "self": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 56, 59, 80, 81, 84, 91, 98, 101, 128, 131, 199, 270], "lookup": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 178, 190, 191, 192, 195, 203], "dictionari": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 108, 110, 112, 114, 115, 125, 127, 137, 138, 143, 144, 145, 158, 161, 163, 164, 181, 182, 183, 184, 190, 191, 192, 193, 198, 199, 200, 202, 203, 205, 213, 263, 265, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 292, 297], "map": [3, 5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 110, 114, 115, 125, 127, 158, 166, 183, 184, 188, 191, 193, 195, 198, 199, 200, 203, 205, 206, 207, 237, 270, 272, 273, 274, 275, 276, 284, 285], "kei": [3, 4, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 49, 51, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 93, 98, 101, 108, 110, 112, 137, 138, 143, 144, 145, 158, 161, 176, 177, 188, 190, 191, 192, 198, 199, 200, 202, 203, 205, 213, 265, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 283, 284, 285, 290, 291, 292, 298, 300], "special": [3, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 182, 296], "assign_input": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "assign": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 54, 292], "genericsystem": [3, 4, 21, 25, 26, 28, 29, 41], "opt": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "hostedtoolcach": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "python": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 69, 71, 74, 76, 77, 78, 86, 93, 176, 199, 203, 270, 280, 282, 285, 288, 291, 302], "3": [3, 4, 8, 13, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 49, 51, 101, 112, 132, 134, 143, 148, 203, 209, 270, 285, 288, 291, 297, 302], "8": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 79, 288, 291, 297, 298, 302], "18": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 112, 139, 269, 297], "x64": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "lib": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "python3": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "packag": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 93, 102, 108, 112, 259, 288, 291, 302], "cpython": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "38": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 297], "x86_64": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 291], "linux": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 291], "gnu": [3, 4, 8, 13, 21, 22, 23, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "attr_dict": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "heirarch": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "_attr_dict": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "variabl": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 54, 59, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 93, 95, 98, 101, 108, 112, 125, 127, 131, 132, 133, 134, 199, 202, 203, 265, 270, 272, 273, 274, 285, 291, 300], "group": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 101, 108, 109, 110, 112, 163, 164, 168, 270, 272, 273, 275, 291, 300], "lowest": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 205, 284], "level": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 69, 71, 74, 76, 77, 78, 84, 86, 95, 108, 112, 114, 115, 205, 277, 288, 290], "name": [3, 4, 5, 7, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 93, 98, 101, 108, 109, 110, 112, 124, 125, 126, 127, 143, 148, 149, 150, 151, 152, 153, 154, 158, 161, 163, 164, 166, 168, 170, 175, 176, 177, 183, 184, 190, 191, 192, 193, 195, 198, 199, 200, 202, 203, 205, 207, 209, 212, 261, 263, 265, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 291, 292, 294, 298, 302], "default": [3, 4, 5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 63, 64, 65, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 98, 101, 106, 108, 109, 110, 112, 114, 115, 125, 127, 136, 137, 141, 143, 144, 158, 161, 163, 166, 168, 169, 170, 171, 176, 181, 182, 183, 184, 188, 191, 193, 198, 199, 200, 203, 205, 207, 211, 221, 234, 268, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 291, 293, 301], "execut": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 69, 83, 91, 101, 158, 176, 203, 205, 223, 225, 237, 241, 242, 247, 248, 252, 253, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 289, 290, 291, 293, 302], "call": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 51, 69, 71, 74, 76, 77, 78, 86, 91, 101, 131, 144, 145, 146, 158, 205, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 291, 292, 294, 301], "rais": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 69, 71, 74, 76, 77, 78, 80, 81, 86, 108, 114, 115, 128, 131, 138, 158, 161, 198, 199, 200, 213, 294], "samexecutionerror": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "error": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 69, 71, 74, 76, 77, 78, 86, 114, 115, 138, 143, 145, 158, 175, 176, 177, 178, 188, 216, 220, 222, 223, 224, 225, 227, 233, 235, 236, 237, 238, 242, 243, 244, 246, 247, 249, 251, 252, 253, 254, 256, 257, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 294, 300], "avail": [3, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 76, 77, 78, 91, 106, 108, 112, 114, 115, 143, 146, 158, 168, 170, 171, 176, 183, 188, 190, 191, 192, 198, 199, 200, 203, 205, 207, 209, 268, 270, 272, 276, 280, 284, 285, 291, 299, 300], "input_list": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "_input": [3, 4, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 86], "wrapper": [4, 59, 262], "framework": [4, 49, 56, 59, 68, 70, 71, 80, 81, 85, 125, 127, 158, 175, 177, 180, 181, 182, 183, 184, 195, 196, 199, 201, 202, 203, 206, 207, 264, 288, 291], "raise_warn": 4, "fals": [4, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 64, 74, 80, 81, 86, 91, 98, 101, 106, 108, 110, 112, 114, 115, 125, 127, 158, 163, 176, 177, 183, 184, 188, 190, 191, 192, 193, 199, 203, 205, 261, 270, 271, 272, 273, 274, 275, 276, 280, 284, 285, 291], "flat": [4, 175, 199], "bool": [4, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 64, 69, 71, 86, 91, 98, 101, 106, 108, 110, 112, 114, 115, 125, 126, 127, 143, 146, 158, 163, 176, 177, 183, 184, 188, 190, 191, 192, 193, 198, 199, 200, 203, 205, 212, 261, 270, 271, 272, 273, 274, 275, 276, 280, 284, 285], "flag": [4, 27, 30, 33, 34, 35, 36, 49, 51, 69, 71, 77, 78, 80, 81, 86, 91, 98, 101, 106, 108, 110, 112, 114, 115, 125, 143, 158, 163, 176, 177, 183, 184, 188, 190, 191, 192, 193, 198, 199, 200, 203, 205, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 282, 284, 285, 292, 294, 300], "warn": [4, 49, 69, 71, 74, 76, 77, 78, 86, 87, 133, 135, 136, 146, 198, 199, 200, 219, 221, 226, 228, 232, 234, 239, 240, 241, 245, 248, 250, 251, 255, 258, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "becaus": [4, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145], "thei": [4, 5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 101, 125, 127, 143, 149, 176, 190, 191, 192, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "found": [4, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 59, 69, 71, 74, 76, 77, 78, 84, 86, 93, 106, 126, 136, 158, 207, 213, 277, 291, 294], "factori": 5, "util": [5, 52, 76, 77, 78, 89, 91, 98, 99, 101, 103, 104, 117, 132, 133, 134, 156, 160, 173, 174, 177, 185, 186, 205, 272, 273, 284, 288, 292, 301], "handler": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 101, 181, 182, 187, 195, 198, 199, 200, 202, 205, 215, 217, 218, 229, 230, 231, 273, 300], "res_fil": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 84, 91, 98, 101, 278, 297, 298, 299, 300], "project_point": [5, 51, 91, 98, 101, 108, 112, 270, 271, 272, 273, 297, 298, 299, 300], "gid_map": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 101, 270, 273], "lr_res_fil": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101], "nn_map": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "bias_correct": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 101, 270, 273], "str": [5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 45, 48, 49, 51, 59, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 93, 98, 101, 106, 108, 109, 110, 112, 114, 115, 124, 125, 126, 127, 136, 141, 143, 150, 151, 153, 158, 161, 163, 164, 166, 167, 168, 169, 170, 171, 175, 176, 177, 178, 181, 182, 183, 184, 188, 190, 191, 192, 193, 195, 198, 199, 200, 202, 203, 205, 207, 209, 211, 212, 213, 261, 263, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "full": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 77, 78, 91, 101, 108, 112, 148, 151, 158, 176, 177, 183, 184, 190, 191, 192, 195, 197, 198, 199, 200, 203, 205, 261, 270, 271, 272, 273, 276, 280, 284, 288, 289, 291, 293, 302], "path": [5, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 108, 110, 112, 114, 115, 158, 161, 163, 166, 168, 169, 170, 171, 176, 183, 188, 190, 191, 192, 195, 203, 205, 212, 213, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 282, 284, 285, 291, 297, 298, 299, 300], "retriev": [5, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 108, 112, 175, 291, 295, 300], "config": [5, 7, 8, 13, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 59, 91, 98, 101, 108, 112, 114, 115, 138, 145, 158, 161, 164, 213, 215, 267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 282, 284, 285, 288, 289, 290, 291, 293, 294, 295, 296, 297, 298, 299, 300, 302], "projectpoint": [5, 83, 91, 98, 101, 243, 271, 278, 297, 298, 299, 300], "project": [5, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 51, 72, 82, 83, 84, 91, 98, 101, 106, 203, 213, 215, 267, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 283, 284, 285, 288, 289, 290, 291, 295, 302], "point": [5, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 72, 82, 83, 84, 91, 98, 101, 125, 127, 131, 158, 163, 164, 166, 176, 183, 184, 188, 195, 203, 205, 207, 215, 267, 268, 270, 271, 272, 273, 274, 276, 277, 280, 284, 285, 288, 289, 290, 300, 302], "instanc": [5, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 69, 71, 74, 76, 77, 78, 83, 84, 86, 91, 98, 101, 106, 108, 112, 119, 120, 121, 122, 125, 127, 128, 143, 146, 190, 191, 192, 265, 291], "technologi": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 51, 84, 87, 91, 98, 101, 108, 112, 202, 203, 270, 273, 285, 299], "forc": [5, 158, 193, 276, 281], "interpret": [5, 49, 51, 270], "type": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 54, 83, 84, 91, 98, 101, 108, 112, 131, 132, 133, 134, 135, 136, 137, 138, 139, 143, 144, 145, 146, 161, 164, 193, 198, 199, 200, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 286, 291], "exampl": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 84, 91, 101, 110, 112, 125, 127, 131, 136, 145, 148, 150, 151, 153, 158, 176, 193, 203, 205, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 290, 291, 292, 294, 298, 300, 301], "pvwatt": [5, 27, 84, 101, 273, 289], "tcsmolten": 5, "mean": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 76, 77, 78, 86, 91, 101, 104, 108, 109, 112, 120, 136, 145, 158, 168, 169, 175, 176, 177, 178, 183, 198, 199, 200, 203, 205, 209, 270, 272, 273, 276, 280, 284, 285, 298, 299], "expect": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 84, 86, 98, 101, 112, 135, 145, 191, 198, 199, 200, 213], "solarresourc": [5, 27, 28, 33, 34, 35, 36, 38], "nsrdb": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 86, 91, 98, 101, 202, 203, 273, 285, 288, 291, 294, 297, 299, 300, 302], "tupl": [5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 54, 57, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 98, 101, 106, 108, 109, 112, 125, 127, 128, 131, 132, 133, 134, 158, 181, 182, 183, 184, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 205, 207, 270, 272, 273, 274, 276, 284, 285], "uniqu": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 84, 101, 114, 115, 126, 143, 158, 176, 177, 195, 198, 199, 203, 205, 270, 273, 276, 280, 285, 292, 294], "integ": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 137, 143, 144, 146, 195, 205, 207, 211, 270, 272, 273, 284], "gid": [5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 48, 49, 50, 51, 84, 91, 98, 101, 112, 114, 115, 136, 141, 158, 175, 176, 183, 184, 188, 195, 197, 198, 199, 200, 202, 203, 205, 207, 261, 270, 271, 272, 273, 276, 280, 284, 285, 290, 297, 299, 300], "enabl": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 101, 270, 273, 291, 300], "user": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 59, 69, 71, 74, 76, 77, 78, 79, 86, 91, 98, 101, 144, 145, 146, 161, 193, 203, 211, 270, 277, 279, 288, 290, 291, 293, 294, 297, 298, 302], "non": [5, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 84, 98, 101, 112, 135, 136, 188, 198, 199, 200, 205, 261, 270, 273, 284, 291], "pre": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 69, 71, 74, 76, 77, 78, 84, 91, 98, 101, 108, 112, 158, 164, 183, 184, 190, 191, 192, 195, 198, 199, 200, 203, 270, 272, 273, 276, 285], "extract": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 76, 77, 78, 84, 87, 91, 98, 101, 108, 109, 112, 125, 126, 139, 158, 164, 172, 174, 175, 176, 177, 183, 184, 191, 193, 198, 199, 200, 203, 270, 272, 273, 274, 276, 278, 280, 285, 288], "low": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101, 273], "resolut": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 86, 101, 108, 112, 157, 158, 183, 184, 195, 197, 198, 199, 200, 203, 207, 270, 273, 276, 285, 288, 295, 296], "dynam": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101, 273, 288, 291], "interpol": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101, 273], "nomin": [5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 91, 98, 101, 202, 203, 273, 285], "need": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 59, 84, 98, 101, 108, 110, 112, 114, 115, 158, 205, 268, 269, 273, 275, 276, 288, 291, 292, 297, 298, 302], "same": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 59, 84, 91, 101, 106, 108, 125, 127, 136, 139, 145, 158, 175, 176, 177, 192, 199, 202, 203, 205, 215, 270, 272, 273, 274, 276, 280, 284, 285, 292, 294, 296], "format": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 93, 98, 101, 106, 112, 124, 127, 148, 149, 150, 152, 153, 154, 181, 182, 183, 184, 191, 192, 198, 199, 200, 202, 203, 205, 211, 246, 270, 273, 284, 285, 288], "resource_fil": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101, 272, 273], "both": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 101, 125, 126, 127, 205, 270, 273, 274, 284, 288, 298, 299, 300, 302], "handl": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 54, 76, 77, 78, 79, 86, 91, 98, 101, 106, 108, 109, 111, 112, 113, 115, 125, 127, 158, 163, 164, 175, 181, 182, 188, 190, 191, 192, 202, 232, 262, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 296], "rex": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 101, 106, 112, 182, 215, 259, 270, 273, 288, 291, 298, 299, 300], "windresourc": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 101, 270, 273], "1d": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 65, 101, 106, 108, 112, 178, 183, 184, 195, 197, 198, 199, 200, 203, 270, 273], "nearest": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 84, 237], "neighbor": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 84, 188, 203, 205, 207, 237, 270, 284, 285], "spatial": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 50, 51, 112, 176, 203, 261, 280, 288, 290], "For": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 59, 77, 78, 84, 91, 101, 110, 125, 127, 131, 136, 145, 158, 176, 203, 205, 268, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 291, 296], "detail": [5, 12, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 101, 131, 133, 136, 137, 146, 203, 268, 270, 273, 285, 288, 291, 292], "see": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 91, 101, 108, 112, 131, 133, 135, 136, 137, 144, 145, 146, 158, 188, 193, 203, 205, 209, 268, 270, 272, 273, 276, 284, 285, 288, 290, 291, 292], "multiresolutionresourc": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "docstr": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51], "provid": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 56, 69, 71, 74, 76, 77, 78, 86, 108, 112, 131, 137, 143, 144, 146, 158, 183, 184, 198, 199, 200, 203, 205, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 297, 298], "wind": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 44, 45, 46, 47, 49, 50, 51, 52, 56, 58, 59, 63, 64, 65, 74, 84, 86, 101, 121, 122, 125, 126, 127, 131, 132, 133, 134, 135, 136, 138, 139, 141, 155, 157, 158, 187, 188, 198, 199, 200, 205, 215, 238, 239, 270, 273, 274, 276, 284, 288, 289, 299, 300], "solar": [5, 16, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 74, 86, 101, 108, 112, 121, 122, 125, 126, 127, 157, 158, 199, 205, 270, 273, 274, 276, 284, 288, 289], "bia": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 101, 178, 270, 273], "correct": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 49, 51, 80, 101, 270, 273, 288, 300, 302], "tabl": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 84, 91, 101, 114, 125, 158, 163, 169, 170, 171, 175, 176, 177, 178, 188, 199, 203, 205, 270, 272, 273, 274, 276, 280, 284, 285, 288, 294, 296], "column": [5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 84, 91, 93, 98, 101, 108, 112, 118, 119, 120, 121, 122, 124, 125, 126, 127, 158, 163, 164, 169, 170, 171, 175, 176, 177, 178, 188, 195, 197, 198, 199, 200, 202, 203, 205, 207, 270, 272, 273, 274, 276, 278, 280, 284, 285, 290, 297], "adder": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 101, 108, 112, 270, 273], "scalar": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 93, 101, 112, 183, 198, 199, 202, 203, 205, 270, 273, 285], "field": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 101, 128, 270, 273, 292], "match": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 91, 93, 98, 101, 125, 127, 136, 141, 144, 148, 149, 150, 151, 152, 153, 154, 158, 183, 199, 203, 205, 209, 270, 272, 273, 274, 276, 284, 285], "true": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 91, 98, 101, 108, 110, 112, 114, 115, 125, 131, 133, 134, 143, 146, 151, 158, 176, 177, 193, 198, 199, 200, 203, 205, 270, 271, 273, 274, 275, 276, 280, 284, 285, 291, 300], "regardless": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 101, 205, 270, 271, 273, 284], "present": [5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 76, 77, 78, 80, 81, 86, 101, 128, 176, 188, 198, 199, 202, 203, 205, 270, 273, 280], "re": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 51, 101, 141, 176, 177, 197, 198, 199, 200, 203, 207, 268, 270, 271, 273, 285, 291, 294, 298], "either": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 108, 109, 112, 126, 158, 164, 176, 183, 198, 199, 203, 205, 270, 272, 273, 276, 280, 284, 291], "1": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 54, 59, 74, 76, 77, 78, 80, 81, 86, 91, 101, 112, 114, 115, 125, 127, 128, 132, 136, 139, 153, 154, 158, 168, 175, 176, 177, 178, 183, 191, 193, 198, 199, 200, 203, 205, 207, 209, 214, 215, 262, 270, 272, 273, 274, 276, 280, 284, 285, 291, 297, 298, 299, 300, 303], "windspe": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 91, 98, 101, 270, 273, 298], "ghi": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 101, 270, 273, 291], "dni": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 98, 101, 270, 273], "depend": [5, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 77, 78, 101, 108, 112, 114, 115, 135, 270, 273, 288, 302], "factor": [5, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 101, 108, 112, 120, 145, 178, 199, 203, 270, 272, 273, 285, 295, 296, 299, 300], "samresourc": [5, 108, 112, 215], "pass": [5, 21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 59, 84, 91, 98, 101, 108, 145, 148, 269, 270, 277, 288, 298], "implement": [6, 93, 131, 136, 140, 199, 203, 285], "abstract": [7, 25, 26, 27, 29, 131, 132, 133, 134, 177, 181, 184, 197], "config_file_nam": 7, "json": [7, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 69, 71, 74, 76, 77, 78, 84, 86, 87, 91, 98, 101, 108, 112, 114, 115, 145, 158, 205, 235, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 282, 284, 285, 286, 288, 290, 291, 292, 293, 294, 297, 298, 299, 300, 301, 302], "pysam_modul": [7, 8, 13], "pvwattsv5": [7, 13, 24, 101, 299, 300], "geotherm": [7, 8, 51, 84, 91, 101, 158, 270, 272, 273, 276, 288], "etc": [7, 50, 51, 69, 71, 74, 76, 77, 78, 86, 90, 91, 101, 158, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "init_default_pysam_obj": [7, 8, 13], "defualt": [7, 8, 13], "pysm": [7, 8, 13], "abstractdefaultfromconfigfil": [8, 13], "lcoe": [9, 21, 23, 45, 49, 69, 90, 91, 93, 95, 157, 163, 164, 171, 199, 202, 203, 205, 272, 284, 285, 296], "calcul": [9, 15, 22, 23, 27, 29, 30, 31, 33, 34, 35, 36, 39, 40, 41, 49, 91, 92, 93, 95, 98, 101, 109, 125, 127, 135, 136, 138, 139, 141, 144, 146, 158, 175, 176, 177, 178, 193, 198, 199, 200, 203, 204, 205, 207, 272, 273, 274, 276, 280, 284, 285, 288, 296], "linear": [10, 31, 133, 134, 136], "direct": [10, 31, 41, 49, 50, 51, 65, 188, 198, 199, 200, 205, 270, 284], "steam": [10, 31], "heat": [10, 16, 18, 31, 39, 166], "mhkwave": [11, 101], "pv": [12, 27, 33, 34, 35, 36, 86, 87, 288], "pvsamv1": [12, 27, 28, 34, 35, 36, 38, 101], "pvwattsv8": [14, 27, 33, 34, 35, 51, 91, 101, 158, 270, 272, 273, 276], "owner": [15, 23, 45, 90, 289], "ppa": [15, 21, 22, 23, 290], "water": [16, 37, 296], "csp": [17, 38, 288], "parabol": 18, "trough": [18, 39], "process": [18, 31, 39, 49, 51, 59, 76, 77, 78, 86, 91, 101, 125, 146, 163, 168, 183, 203, 270, 272, 273, 274, 280, 285, 288, 291, 292, 294, 298], "windpow": [19, 24, 29, 41, 49, 51, 84, 91, 98, 101, 158, 270, 272, 273, 276, 289, 291, 298], "lcoefcr": [20, 22, 91, 98, 101, 272], "singleown": [20, 21, 22, 45, 91, 272], "revpysam": [21, 25], "": [21, 22, 23, 30, 41, 45, 49, 51, 56, 69, 71, 74, 76, 77, 78, 84, 86, 91, 93, 98, 101, 133, 135, 136, 139, 158, 163, 164, 169, 175, 176, 177, 178, 188, 193, 195, 198, 199, 200, 203, 205, 211, 213, 269, 270, 271, 272, 273, 276, 277, 280, 281, 282, 283, 284, 285, 286, 288, 290, 291, 292, 293, 294, 297, 298, 302], "price": [21, 22, 23, 290, 291], "mwh": [21, 22, 23, 93, 95, 203, 205], "nativ": [21, 22, 23, 91, 98, 101], "unit": [21, 22, 23, 49, 108, 112, 133, 135, 136, 203, 205, 288, 302], "cent": [21, 22, 23], "kwh": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 93, 95, 270], "mult": [21, 22, 23], "10": [21, 22, 23, 27, 29, 30, 33, 34, 35, 36, 40, 49, 51, 84, 112, 203, 207, 270, 285, 288, 293, 295, 297, 302], "npv": [21, 22, 23], "net": [21, 22, 23], "dollar": [21, 22, 23], "1000": [21, 22, 23, 176, 177, 280], "lcoe_nom": [21, 22, 23, 91], "lcoe_real": [21, 22, 23, 91], "real": [21, 22, 23, 296], "flip_actual_irr": [21, 22, 23, 91], "actual": [21, 22, 23, 30, 49, 84, 131, 132, 133, 134, 176, 280, 290, 291], "irr": [21, 22, 23], "gross_revenu": [21, 22, 23, 91], "cash": [21, 22, 23], "flow": [21, 22, 23], "total": [21, 22, 23, 30, 45, 49, 51, 54, 59, 119, 121, 122, 136, 137, 143, 144, 146, 195, 197, 198, 199, 200, 203, 205, 269, 270, 272, 273, 284, 291], "revenu": [21, 22, 23], "rev_run": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45], "site_df": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45], "row": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 84, 91, 98, 101, 108, 112, 158, 175, 176, 177, 183, 184, 190, 191, 192, 195, 197, 198, 199, 200, 203, 207, 272, 273, 276, 280, 297], "via": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 91, 101, 145, 158, 269, 270, 272, 273, 276, 288, 291, 300], "loc": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 84, 298], "iloc": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 84], "label": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 93, 110, 158, 176, 177, 198, 199, 200, 202, 203, 205, 275, 276, 280, 284, 298], "forward": [21, 22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45], "sim": 21, "numer": [21, 93, 163, 169], "result": [21, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 84, 91, 98, 101, 108, 112, 133, 136, 138, 144, 145, 199, 202, 203, 207, 215, 237, 270, 271, 272, 273, 285, 291, 299, 300], "econom": [22, 23, 101, 203, 273, 285, 288, 290], "fcr": [22, 90, 93, 205], "points_control": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 84, 91, 98, 101], "cf_file": [22, 23, 91, 272], "control": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 69, 76, 77, 78, 91, 98, 101, 131, 159, 162, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 293], "pointscontrol": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 91, 98, 101], "info": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 51, 69, 71, 74, 76, 77, 78, 86, 101, 138, 141, 144, 145, 146, 193, 259, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 298], "capac": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 56, 59, 91, 93, 95, 101, 108, 112, 114, 115, 119, 120, 121, 122, 125, 127, 143, 145, 171, 199, 203, 205, 270, 272, 273, 274, 284, 285, 288, 295, 296, 299, 300], "look": [22, 23, 54, 69, 77, 78, 135, 293], "cf_mean_": [22, 23], "cf_profile_": [22, 23], "cf": [22, 23, 27, 33, 34, 35, 36, 95, 108, 112, 175, 176, 177, 178, 291], "dataset": [22, 23, 49, 51, 74, 91, 98, 101, 108, 109, 110, 112, 158, 163, 168, 175, 176, 177, 181, 182, 183, 184, 191, 192, 195, 198, 199, 200, 202, 203, 205, 207, 215, 270, 271, 272, 273, 275, 276, 280, 284, 285], "out": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 51, 69, 71, 74, 76, 77, 78, 80, 81, 86, 91, 93, 98, 101, 158, 198, 199, 203, 213, 268, 269, 270, 272, 273, 276, 277, 282, 285, 288, 291, 298, 299, 300], "nest": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 91, 98, 101, 283], "where": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 54, 56, 59, 69, 71, 74, 76, 77, 78, 84, 86, 91, 92, 93, 98, 101, 125, 132, 133, 134, 136, 144, 158, 181, 182, 183, 184, 191, 193, 198, 199, 200, 202, 203, 205, 213, 215, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 291, 296], "top": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 84], "second": [22, 23, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 50, 54, 91, 101, 176, 269, 272, 273, 277, 280, 291], "windbo": [23, 91, 272], "tcsmolensalt": 24, "scheduledlossesmixin": 25, "abc": [25, 26, 27, 28, 29, 98, 131, 148, 177, 181, 184, 197], "datetimeindex": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 91, 98, 101, 108, 112, 125, 126, 145, 176, 177, 261, 263], "There": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 112, 294, 296], "requir": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 50, 51, 69, 71, 74, 76, 77, 78, 86, 91, 93, 98, 101, 110, 112, 137, 143, 144, 158, 163, 181, 182, 183, 184, 190, 191, 199, 202, 203, 205, 269, 270, 271, 272, 273, 274, 275, 276, 278, 279, 280, 282, 284, 285, 289, 291, 298], "respect": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 50, 51, 83, 91, 98, 101, 112, 161, 190, 191, 192, 199, 205, 209, 270, 279, 280, 291, 294], "remap": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "done": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 121, 122, 139, 288, 291, 292, 302], "typic": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 101, 110, 133, 135, 136, 176, 183, 184, 202, 203, 205, 268, 270, 272, 273, 275, 280, 284, 285, 288, 296], "wtk": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 74, 91, 98, 101, 202, 203, 270, 273, 285, 288, 291, 296, 298, 299, 300, 302], "dn": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 291], "wind_spe": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 50, 65, 74, 131, 135, 139, 298], "decemb": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "31st": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "check_resource_data": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "nan": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 158, 198, 199, 200], "set_resource_data": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "placehold": [25, 29, 108, 112, 181, 200, 202], "tz_elev_check": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "Will": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 50, 84, 86, 106], "has_timezon": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "fraction": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 74, 76, 77, 78, 91, 98, 101, 114, 115, 193, 198, 205, 272, 273, 284], "float": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 54, 56, 59, 63, 64, 74, 76, 77, 78, 91, 93, 95, 98, 101, 106, 108, 112, 114, 115, 131, 132, 133, 134, 135, 136, 137, 143, 163, 181, 182, 183, 184, 190, 191, 193, 198, 199, 200, 202, 203, 205, 207, 270, 272, 273, 284, 285, 296], "hourli": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 144, 145, 146, 152, 270, 288], "frac": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45], "profil": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 86, 91, 101, 106, 108, 112, 117, 118, 125, 126, 127, 157, 158, 172, 173, 174, 175, 176, 177, 178, 183, 184, 198, 199, 200, 203, 267, 268, 270, 272, 273, 274, 276, 277, 285, 288, 299, 302], "local": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 76, 77, 78, 91, 101, 203, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 289, 300], "numpi": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 93, 108, 112, 135, 136, 139, 198, 200, 215, 259, 291, 298, 300], "datatyp": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 108, 112], "float32": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 101, 112], "length": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 50, 83, 84, 91, 98, 101, 112, 126, 136, 139, 144, 146, 178, 195, 198, 199, 200, 291], "8760": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 95, 101, 112, 144, 146, 270, 273, 291], "annual": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 93, 95, 107, 131, 136, 137, 199, 203, 270, 285], "yield": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 59, 133, 136, 195], "kw": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 59, 93, 135, 298], "power": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 49, 51, 92, 93, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 188, 199, 202, 203, 205, 268, 270, 284, 285, 288, 289, 290], "run_gen_and_econ": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41], "possibl": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 74, 84, 114, 115, 136, 143, 144, 146, 282, 292], "follow": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 57, 69, 71, 74, 76, 77, 78, 84, 86, 91, 101, 137, 143, 176, 203, 269, 270, 272, 273, 280, 281, 285, 288, 290, 291, 292, 293, 294, 301, 302], "analysi": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 46, 47, 49, 51, 68, 69, 84, 86, 88, 91, 101, 155, 157, 158, 195, 203, 211, 269, 270, 272, 273, 276, 277, 280, 285, 288, 295, 296, 301], "outage_config_kei": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145], "rev_outag": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145, 298], "specifi": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 109, 110, 125, 127, 138, 141, 143, 145, 146, 158, 161, 176, 203, 205, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 283, 284, 285, 288, 292, 293, 294, 298], "outag": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 144, 145, 146, 289], "inform": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 74, 137, 138, 143, 144, 145, 146, 268, 269, 270, 277, 288, 298], "outage_seed_config_kei": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145], "rev_outages_se": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145], "random": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 74, 144, 145, 146, 215], "seed": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 74, 144, 145, 146, 215], "add_scheduled_loss": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145], "add": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 84, 91, 98, 101, 108, 112, 145, 146, 158, 168, 190, 191, 192, 205, 207, 265, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 291, 298], "stochast": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 144, 145, 146, 298], "schedul": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 298], "loss": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 84, 257, 258, 270, 289, 296], "function": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 52, 54, 55, 57, 58, 59, 60, 61, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 89, 93, 94, 99, 103, 104, 107, 117, 118, 130, 132, 133, 134, 136, 138, 141, 144, 145, 146, 147, 148, 151, 152, 156, 160, 173, 185, 186, 208, 210, 213, 214, 260, 270, 288, 291], "read": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 50, 51, 91, 98, 101, 108, 112, 138, 145, 235, 270, 291, 299, 300], "comput": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 91, 101, 108, 110, 112, 114, 115, 119, 120, 121, 122, 125, 136, 138, 141, 145, 157, 158, 163, 165, 168, 176, 183, 184, 191, 198, 199, 200, 203, 205, 207, 270, 272, 273, 274, 275, 276, 280, 284, 285, 288, 289, 298, 299, 300], "string": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 43, 49, 51, 54, 59, 63, 64, 69, 71, 74, 76, 77, 78, 84, 86, 91, 93, 98, 101, 108, 112, 125, 127, 137, 143, 145, 148, 149, 150, 153, 158, 176, 195, 199, 203, 209, 211, 269, 270, 271, 272, 273, 274, 275, 276, 278, 279, 280, 282, 284, 285], "been": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 136, 145, 146, 157, 265, 268, 271, 288, 291, 300], "dump": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145, 163, 168, 169, 298], "otherwis": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 69, 71, 74, 76, 77, 78, 86, 91, 98, 101, 108, 145, 149, 152, 154, 158, 176, 199, 202, 203, 205, 272, 273, 276, 280, 284, 285], "descript": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 106, 143, 144, 145, 193, 270, 288, 289], "allow": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 56, 79, 91, 101, 121, 122, 125, 127, 143, 144, 145, 146, 158, 161, 203, 205, 207, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 283, 284, 285, 286, 288], "each": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 51, 54, 64, 80, 81, 84, 91, 101, 110, 114, 115, 125, 131, 136, 144, 145, 148, 158, 161, 168, 176, 177, 183, 188, 190, 191, 192, 195, 198, 199, 200, 202, 203, 205, 207, 209, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 288, 290, 291, 292, 294, 298, 301], "signifi": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 74, 145], "adjust": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 59, 125, 127, 136, 138, 139, 141, 145, 274], "yearli": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 110, 145, 275], "note": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 91, 101, 108, 112, 121, 122, 125, 127, 131, 145, 158, 176, 203, 205, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 289, 292, 293, 294, 296, 297, 299, 300, 302], "other": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 56, 74, 101, 125, 127, 143, 145, 146, 148, 176, 193, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 290, 291], "effect": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 74, 84, 86, 91, 145, 158, 203, 205, 215, 270, 284, 285], "combin": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 91, 98, 101, 110, 127, 145, 158, 183, 184, 190, 191, 192, 199, 203, 269, 270, 275, 285, 291, 292, 300], "33": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145, 297], "70": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145, 203], "farm": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 45, 59, 125, 143, 144, 145, 146, 187, 188, 205, 274, 284], "down": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 135, 143, 145], "reduc": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 51, 86, 92, 93, 108, 112, 145, 195, 270, 291], "80": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145], "30": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 84, 145, 214, 291, 297], "remain": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145, 205, 281, 284], "oper": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 59, 93, 95, 145, 157, 158, 203, 270, 276, 285], "dure": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 54, 59, 74, 98, 101, 114, 115, 125, 143, 145, 146, 188, 203, 205, 219, 227, 228, 232, 233, 269, 270, 273, 274, 284, 285], "20": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 84, 112, 145, 203, 269, 270, 285, 288, 297, 302], "origin": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 49, 51, 83, 131, 132, 133, 134, 136, 138, 145, 158, 203, 270, 276, 291, 298], "outage_se": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 145], "A": [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 48, 49, 51, 57, 59, 60, 62, 74, 86, 91, 101, 119, 120, 121, 122, 131, 135, 136, 137, 143, 144, 145, 146, 148, 152, 154, 158, 193, 198, 199, 200, 205, 209, 262, 269, 270, 272, 273, 276, 277, 280, 282, 284, 288, 293, 297, 302], "abstractsamgener": [26, 28, 29, 32, 41, 145], "weather": [26, 30, 31, 37, 39], "disk": [26, 30, 31, 37, 39, 51, 91, 98, 101, 108, 110, 112, 163, 169, 212, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 300], "pysam_weather_tag": 26, "some": [26, 30, 31, 37, 39, 79, 91, 101, 108, 112, 125, 136, 268, 272, 273, 274, 288, 291, 296, 298], "raw": [26, 30, 31, 37, 39, 93, 158, 193, 276], "write": [26, 30, 31, 37, 39, 101, 108, 112, 158, 203, 261, 263, 273, 276, 285, 298, 301], "delet": [26, 30, 31, 37, 39, 269, 271, 291], "after": [26, 27, 29, 30, 31, 33, 34, 35, 36, 37, 39, 40, 49, 51, 91, 101, 108, 112, 125, 127, 131, 132, 133, 134, 157, 158, 188, 193, 203, 205, 268, 270, 272, 273, 274, 276, 281, 284, 296], "complet": [26, 30, 31, 37, 39, 51, 91, 101, 158, 268, 270, 272, 273, 276, 277, 281, 294, 301], "abstractsamsolar": [27, 38], "photovolta": [27, 34, 35, 36, 288], "older": [27, 33, 34, 35, 36], "version": [27, 33, 34, 35, 36, 42, 43, 108, 112, 244, 245, 259, 268, 278, 288, 302], "document": [27, 29, 30, 33, 34, 35, 36, 40, 50, 51, 91, 101, 131, 137, 144, 158, 203, 268, 270, 272, 273, 276, 285], "configur": [27, 29, 30, 33, 34, 35, 36, 40, 51, 67, 68, 70, 71, 74, 75, 76, 77, 78, 82, 84, 85, 86, 91, 98, 101, 108, 112, 158, 213, 220, 221, 234, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 286, 291, 292, 296, 297, 299, 300, 303], "you": [27, 29, 30, 33, 34, 35, 36, 40, 49, 51, 91, 101, 110, 112, 114, 115, 131, 158, 176, 203, 205, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 290, 291, 292, 293, 294, 300, 302], "mai": [27, 29, 30, 33, 34, 35, 36, 40, 51, 91, 93, 101, 108, 112, 138, 145, 146, 151, 158, 176, 203, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 300], "also": [27, 29, 30, 33, 34, 35, 36, 40, 49, 51, 74, 84, 91, 93, 98, 101, 108, 110, 112, 125, 137, 143, 146, 148, 158, 178, 199, 203, 205, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 291, 292, 294, 296, 298, 299, 300, 302], "outage_info": [27, 29, 30, 33, 34, 35, 36, 40, 298], "6": [27, 29, 30, 33, 34, 35, 36, 40, 153, 291, 297], "durat": [27, 29, 30, 33, 34, 35, 36, 40, 143, 144, 146, 298], "24": [27, 29, 30, 33, 34, 35, 36, 40, 201, 297, 298], "percentage_of_capacity_lost": [27, 29, 30, 33, 34, 35, 36, 40, 143, 298], "100": [27, 29, 30, 33, 34, 35, 36, 40, 51, 54, 83, 84, 91, 98, 101, 108, 112, 114, 115, 125, 127, 137, 143, 146, 163, 166, 183, 203, 270, 272, 274, 285, 291, 297, 298], "allowed_month": [27, 29, 30, 33, 34, 35, 36, 40, 143, 146, 298], "januari": [27, 29, 30, 33, 34, 35, 36, 40, 51, 101, 270, 273, 298], "march": [27, 29, 30, 33, 34, 35, 36, 40, 148], "allow_outage_overlap": [27, 29, 30, 33, 34, 35, 36, 40, 143], "demo": [27, 29, 30, 33, 34, 35, 36, 40], "notebook": [27, 29, 30, 33, 34, 35, 36, 40], "instruct": [27, 29, 30, 33, 34, 35, 36, 40, 288, 291, 300, 302], "how": [27, 29, 30, 33, 34, 35, 36, 40, 51, 91, 98, 101, 203, 207, 268, 270, 272, 273, 285, 289, 290, 291, 294, 296, 297, 300, 301], "rng": [27, 29, 30, 33, 34, 35, 36, 40], "time_index_step": [27, 29, 30, 33, 34, 35, 36, 40, 86, 108, 112], "repres": [27, 29, 30, 33, 34, 35, 36, 40, 49, 51, 69, 71, 74, 76, 77, 78, 86, 91, 93, 101, 106, 112, 117, 125, 126, 127, 131, 136, 137, 139, 143, 148, 149, 157, 158, 161, 172, 173, 174, 175, 176, 177, 178, 195, 203, 205, 207, 270, 271, 272, 273, 274, 276, 277, 279, 280, 282, 284, 285, 288, 290, 292], "size": [27, 29, 30, 33, 34, 35, 36, 40, 54, 86, 91, 98, 101, 106, 108, 112, 205, 272, 273, 284], "sampl": [27, 29, 30, 33, 34, 35, 36, 40, 59, 65, 139, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285], "tempor": [27, 29, 30, 33, 34, 35, 36, 40, 50, 86, 101, 108, 112, 168, 273, 288], "minut": [27, 29, 30, 33, 34, 35, 36, 40, 288, 291], "while": [27, 29, 30, 33, 34, 35, 36, 40, 51, 112, 176, 254, 270, 277, 280, 291], "forth": [27, 29, 30, 33, 34, 35, 36, 40], "shape": [27, 29, 33, 34, 35, 36, 40, 41, 49, 51, 59, 62, 63, 101, 106, 108, 112, 176, 177, 183, 184, 190, 191, 192, 195, 197, 198, 199, 200, 202, 203, 207, 222, 270, 273, 285, 291], "appli": [27, 29, 33, 34, 35, 36, 40, 48, 49, 51, 59, 80, 81, 93, 101, 114, 115, 125, 127, 131, 132, 133, 134, 136, 137, 161, 175, 176, 177, 193, 198, 199, 200, 202, 203, 205, 270, 273, 274, 279, 280, 284, 285, 296], "still": [27, 29, 33, 34, 35, 36, 40, 198, 199, 200, 288], "multipl": [27, 29, 33, 34, 35, 36, 40, 51, 59, 84, 91, 98, 101, 107, 108, 110, 144, 146, 158, 178, 183, 198, 199, 203, 205, 269, 270, 271, 272, 273, 275, 276, 277, 281, 282, 283, 285, 288], "fail": [27, 29, 33, 34, 35, 36, 40, 283, 289, 292], "clearski": [27, 33, 34, 35, 36, 86], "boolean": [27, 33, 34, 35, 36, 86, 106, 108, 112, 144, 146, 158, 163, 188, 190, 191, 192, 198, 199, 200, 203], "wether": [27, 33, 34, 35, 36, 144, 146], "valueerror": [27, 33, 34, 35, 36, 80, 81, 128, 161, 218, 231], "lat": [27, 33, 34, 35, 36, 108, 112, 195, 198, 199, 200, 207, 263, 267, 297], "lon": [27, 33, 34, 35, 36, 108, 112, 195, 198, 199, 200, 207, 263, 267, 297], "outsid": [27, 33, 34, 35, 36], "90": [27, 33, 34, 35, 36, 49, 51, 101, 270, 273, 297], "180": [27, 33, 34, 35, 36, 49, 51, 101, 270, 273], "set_latitude_tilt_az": [27, 33, 34, 35, 36], "tilt": [27, 33, 34, 35, 36], "az": [27, 33, 34, 35, 36], "wa": [27, 30, 33, 34, 35, 36, 49, 51, 91, 106, 125, 127, 132, 133, 134, 136, 157, 183, 188, 193, 198, 199, 200, 203, 205, 270, 288, 291, 300, 302], "absolut": [27, 33, 34, 35, 36, 38, 54, 69, 71, 74, 76, 77, 78, 86, 178], "azimuth": [27, 33, 34, 35, 36], "system_capacity_ac": [27, 33, 34, 35, 36, 110, 275], "ac": [27, 33, 34, 35, 36, 101, 110, 199, 205, 273, 275, 284], "namepl": [27, 30, 33, 34, 35, 36], "dc": [27, 33, 34, 35, 36, 110, 275], "ilr": [27, 33, 34, 35, 36], "product": [27, 30, 33, 34, 35, 36, 49, 51, 59, 93, 95, 137, 199, 203, 270, 285], "cf_mean_ac": [27, 33, 34, 35, 36, 110, 275], "up": [27, 33, 34, 35, 36, 49, 51, 108, 112, 144, 146, 176, 198, 199, 200, 205, 209, 268, 270, 280, 284, 288, 289, 292, 293, 296], "cf_profile_ac": [27, 33, 34, 35, 36, 110, 275], "invert": [27, 33, 34, 35, 36], "alia": [27, 33, 34, 35, 36, 84, 128, 182], "clipped_pow": [27, 33, 34, 35, 36, 110, 275], "clip": [27, 33, 34, 35, 36, 135], "behind": [27, 33, 34, 35, 36, 106, 108, 112, 163, 190, 191, 192], "agg_albedo": [27, 28, 33, 34, 35, 36, 38], "albedo": [27, 28, 33, 34, 35, 36, 38, 86], "aggreg": [27, 28, 33, 34, 35, 36, 38, 49, 51, 101, 119, 120, 144, 175, 176, 177, 185, 195, 197, 198, 199, 200, 201, 202, 203, 205, 267, 268, 270, 273, 277, 280, 284, 288, 291, 296, 302], "monthli": [27, 28, 33, 34, 35, 36, 38], "w": [27, 28, 33, 34, 35, 36, 38, 69, 71, 74, 76, 77, 78, 84, 86, 108, 112, 136, 288, 298, 302], "len": [27, 28, 33, 34, 35, 36, 38, 41, 49, 112, 195], "12": [27, 28, 30, 33, 34, 35, 36, 38, 74, 112, 174, 291, 297], "tech": [27, 28, 33, 34, 35, 36, 38, 43, 84, 91, 98, 101, 108, 112, 206, 207], "spec": [27, 28, 33, 34, 35, 36, 38, 91, 98, 101, 137, 143], "doc": [27, 28, 33, 34, 35, 36, 38], "http": [27, 28, 33, 34, 35, 36, 38, 49, 139, 209, 282, 288, 291, 295, 300, 302], "readthedoc": [27, 28, 33, 34, 35, 36, 38], "io": [27, 28, 33, 34, 35, 36, 38, 291], "en": [27, 28, 33, 34, 35, 36, 38], "master": [27, 28, 33, 34, 35, 36, 38, 291], "html": [27, 28, 33, 34, 35, 36, 38, 166, 170, 171], "less": [27, 28, 33, 34, 35, 36, 38, 51, 143, 198, 203, 270, 285, 291], "monthly_albedo": [27, 28, 33, 34, 35, 36, 38], "powercurvelossesmixin": [29, 41], "rev_power_curve_loss": [29, 40, 41, 138, 298], "powercurvelossesinput": [29, 40, 141, 298], "target_losses_perc": [29, 40, 137, 298], "9": [29, 40, 101, 288, 297, 302], "transform": [29, 40, 49, 51, 106, 131, 132, 133, 134, 136, 137, 183, 184, 198, 199, 200, 203, 270, 285, 298], "exponential_stretch": [29, 40, 140, 298], "mention": [29, 40, 59], "abov": [29, 30, 40, 51, 59, 132, 133, 134, 135, 141, 203, 268, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 292, 297, 300], "power_curve_config_kei": [29, 40, 41, 138], "curv": [29, 40, 41, 45, 49, 51, 59, 93, 101, 113, 114, 115, 125, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 157, 158, 163, 169, 171, 175, 176, 177, 179, 183, 184, 185, 186, 188, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 207, 267, 268, 270, 273, 274, 276, 277, 280, 288, 289, 294, 302], "target": [29, 40, 41, 69, 91, 98, 101, 108, 112, 131, 136, 137, 138, 141, 195, 213, 291, 298], "add_power_curve_loss": [29, 40, 41, 138], "account": [29, 40, 41, 77, 78, 138, 139, 141, 205, 291], "new": [29, 40, 41, 42, 60, 69, 71, 74, 76, 77, 78, 83, 84, 86, 110, 121, 122, 128, 131, 132, 133, 134, 138, 141, 146, 198, 199, 200, 202, 203, 205, 265, 275, 284, 285, 288, 291, 292, 295, 302], "percentag": [29, 40, 41, 136, 137, 138, 141, 143, 144], "adjust_power_curv": [29, 40, 41, 138, 298], "shift": [29, 40, 41, 131, 132, 133, 134, 138, 139, 141, 298], "input_power_curv": [29, 40, 41, 138], "powercurv": [29, 40, 41, 131, 132, 133, 134, 136, 138, 141, 298], "wind_resource_from_input": [29, 40, 41, 138], "weight": [29, 40, 41, 49, 51, 120, 136, 138, 175, 176, 177, 178, 193, 198, 199, 200, 203, 270, 280, 285], "powercurvewindresourc": [29, 40, 41, 138, 141, 298], "revlossesvalueerror": [29, 40, 41, 138], "compat": [29, 40, 41, 138, 158, 163, 276], "wind_resource_model_choic": [29, 40, 41, 138], "abstractsamgenerationfromweatherfil": [30, 31, 37, 39], "As": [30, 209], "2022": [30, 112, 209], "well": [30, 51, 91, 101, 136, 146, 188, 203, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 291, 298], "replac": [30, 69, 71, 74, 76, 77, 78, 86, 101, 158, 190, 191, 192, 209, 213, 290, 291, 292, 296], "lifetim": 30, "plant": [30, 46, 47, 48, 49, 50, 51, 52, 56, 58, 59, 63, 64, 92, 93, 270], "It": [30, 51, 112, 143, 144, 146, 203, 270, 282, 285, 291, 294, 300], "decid": 30, "would": [30, 49, 51, 101, 125, 127, 131, 146, 203, 205, 269, 270, 273, 274, 285], "therefor": [30, 51, 112, 136, 158, 203, 269, 270, 276, 285], "just": [30, 101, 108, 175, 195, 273, 288, 291, 302], "gross": [30, 296], "doe": [30, 51, 69, 71, 74, 76, 77, 78, 86, 98, 101, 108, 112, 121, 122, 125, 127, 146, 151, 158, 203, 205, 207, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 293], "throw": [30, 291], "ani": [30, 45, 48, 49, 51, 59, 69, 71, 74, 76, 77, 78, 86, 91, 98, 101, 125, 127, 132, 133, 134, 136, 146, 150, 152, 157, 158, 176, 183, 199, 203, 205, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 290, 291, 294, 301, 302], "getem": 30, "current": [30, 49, 83, 91, 98, 101, 164, 198, 199, 200, 268, 269, 277, 283, 291, 294, 296], "empti": [30, 51, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 93, 98, 101, 203, 211, 270, 285], "behavior": [30, 112, 176, 203, 280], "easili": [30, 290], "updat": [30, 43, 49, 51, 69, 71, 74, 76, 77, 78, 86, 108, 110, 112, 114, 115, 146, 168, 188, 205, 213, 265, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291], "futur": [30, 91, 98, 101, 203, 272, 273], "start": [30, 49, 51, 69, 71, 74, 76, 77, 78, 80, 81, 83, 84, 86, 91, 98, 101, 128, 146, 152, 154, 268, 270, 273, 277, 281, 283, 291, 300], "notabl": 30, "exhaust": [30, 288], "resource_typ": 30, "hydrotherm": 30, "eg": 30, "resource_potenti": 30, "mw": [30, 49, 51, 59, 114, 115, 199, 202, 203, 205, 270, 284, 285], "automat": [30, 51, 101, 108, 110, 112, 158, 205, 211, 270, 271, 273, 275, 293, 300], "left": [30, 51, 91, 101, 158, 203, 270, 272, 273, 276, 277, 285, 298], "overridden": [30, 74], "case": [30, 51, 69, 71, 74, 76, 77, 78, 84, 86, 91, 98, 101, 136, 148, 149, 152, 153, 154, 158, 203, 205, 270, 272, 273, 276, 277, 283, 284, 285, 286, 296], "resource_temp": 30, "temperatur": [30, 49, 50, 74, 139, 298], "c": [30, 74, 106, 139, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 290, 291, 292, 294, 301, 302], "To": [30, 91, 98, 101, 125, 127, 144, 146, 176, 268, 271, 274, 280, 282, 288, 292, 298, 300], "overrid": [30, 74, 131, 291], "own": [30, 131, 269, 282, 288, 291, 300, 301], "csv": [30, 49, 51, 84, 91, 98, 101, 114, 115, 158, 161, 163, 168, 169, 170, 171, 188, 202, 203, 205, 269, 270, 271, 272, 273, 276, 278, 284, 285, 290, 294, 297], "In": [30, 51, 69, 71, 74, 76, 77, 78, 86, 91, 101, 132, 134, 137, 143, 150, 158, 205, 270, 272, 273, 276, 277, 284, 291], "ignor": [30, 51, 91, 101, 193, 198, 199, 200, 203, 205, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285], "determin": [30, 54, 91, 98, 101, 109, 178, 195, 198, 199, 200, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "sole": 30, "resource_depth": 30, "depth": [30, 84], "m": [30, 41, 49, 51, 84, 133, 136, 139, 270, 288, 298, 302], "analysis_typ": 30, "assumpt": [30, 49, 136, 148], "num_wel": 30, "affect": [30, 205, 207, 284], "unlik": [30, 51, 136, 270], "particular": [30, 51, 91, 101, 132, 134, 137, 143, 150, 203, 270, 272, 273, 285], "obtain": [30, 51, 139, 203, 270, 285], "work": [30, 69, 71, 74, 76, 77, 78, 86, 198, 200, 283, 291, 292, 302], "leav": 30, "altern": [30, 91, 176, 272, 280], "num_wells_getem": 30, "assum": [30, 49, 51, 69, 71, 74, 76, 77, 78, 86, 101, 112, 121, 122, 203, 270, 272, 273, 277, 280, 285, 288, 296, 302], "block": [30, 203, 285, 291, 293], "probabl": [30, 41, 49, 51, 54, 74, 215, 270, 291], "extra": [30, 51, 76, 77, 78, 146, 148, 149, 150, 152, 153, 154, 203, 205, 207, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 283, 284, 285, 291], "develop": [30, 157, 291, 300, 303], "conversion_typ": 30, "convers": [30, 209], "binari": [30, 54, 193, 198, 199, 200], "flash": 30, "design_temp": 30, "design": [30, 54, 59, 112, 291], "lower": [30, 51, 54, 84, 91, 98, 101, 125, 127, 203, 270, 273, 274, 277, 285], "than": [30, 51, 108, 112, 125, 127, 131, 132, 133, 134, 143, 176, 198, 203, 270, 272, 273, 274, 277, 280, 285, 291], "latter": [30, 51, 101, 203, 270, 273, 285], "order": [30, 49, 59, 69, 71, 74, 76, 77, 78, 80, 81, 86, 106, 131, 136, 144, 158, 276], "avoid": [30, 144, 146, 188, 205, 271, 284, 301], "cost": [30, 44, 45, 49, 51, 59, 91, 92, 93, 95, 101, 114, 115, 199, 202, 203, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 289, 295, 296], "inj_prod_well_ratio": 30, "inject": 30, "ratio": [30, 121, 122, 125, 127, 274], "gui": 30, "recommend": [30, 143, 291, 303], "75": [30, 299, 300], "num_confirmation_wel": 30, "confirm": 30, "drill": 30, "2023": [30, 288, 302], "howev": [30, 51, 91, 101, 108, 158, 176, 203, 205, 270, 272, 273, 276, 277, 280, 284, 285], "lead": 30, "neg": 30, "small": [30, 133, 291, 296, 300], "equal": [30, 80, 81, 84, 125, 127, 176, 183, 198, 199, 200, 203, 207, 274, 280, 288], "drill_cost_per_wel": 30, "limit": [30, 76, 77, 78, 91, 98, 101, 114, 115, 121, 122, 125, 127, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 300], "when": [30, 51, 60, 91, 98, 101, 108, 112, 131, 136, 144, 146, 163, 168, 176, 193, 205, 226, 270, 271, 273, 280, 284, 291, 293, 294], "perform": [30, 51, 54, 59, 69, 71, 74, 76, 77, 78, 86, 91, 98, 101, 161, 198, 199, 200, 203, 205, 270, 271, 272, 273, 279, 280, 283, 284, 285, 291, 292, 294], "capital_cost_per_kw": [30, 59], "capit": [30, 45, 49, 51, 59, 91, 92, 93, 95, 101, 199, 203, 205, 270, 272, 273, 284, 285], "capital_cost": [30, 49, 51, 59, 91, 93, 95, 101, 110, 199, 203, 270, 272, 273, 275, 285], "fixed_operating_cost": [30, 49, 51, 59, 91, 93, 95, 110, 199, 203, 270, 275, 285], "fix": [30, 49, 51, 59, 79, 84, 93, 95, 128, 205, 270, 284], "per": [30, 49, 59, 76, 77, 78, 83, 91, 93, 98, 101, 143, 144, 176, 195, 197, 198, 199, 200, 203, 205, 280, 284, 285, 288, 291, 294, 302], "fresnel": 31, "linearfresneldsgiph": 31, "wave": [32, 295], "hindcast": [32, 295], "u": [32, 132, 133, 134, 283, 291, 295], "abstractsampv": [33, 34, 35, 36], "heater": 37, "swh": 37, "concentr": [38, 288], "tower": 38, "molten": 38, "salt": 38, "physic": [39, 136, 291], "troughphysicalprocessheat": 39, "abstractsamwind": [40, 138], "ws_edg": [41, 49], "wd_edg": [41, 49], "wind_dist": [41, 49], "speed": [41, 49, 50, 65, 74, 108, 112, 131, 132, 133, 134, 135, 136, 139, 141, 198, 199, 200, 215, 298], "joint": [41, 49, 51, 270], "probabilti": 41, "distrubt": 41, "distribut": [41, 43, 49, 51, 86, 131, 132, 133, 134, 136, 170, 270, 300], "bin": [41, 49, 51, 65, 199, 203, 270, 285], "edg": [41, 49, 51, 270], "winddirect": [41, 49], "deg": [41, 49], "dist": [41, 49], "2d": [41, 49, 50, 51, 65, 101, 112, 163, 168, 183, 191, 195, 198, 199, 200, 203, 207, 270, 273], "send": [41, 291], "creat": [42, 51, 59, 69, 71, 73, 74, 76, 77, 78, 79, 86, 108, 112, 128, 163, 166, 170, 171, 174, 190, 191, 192, 201, 202, 203, 206, 207, 214, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 290, 291, 292, 300, 301, 302], "mon": [42, 73, 79], "14": [42, 297], "40": [42, 297, 298], "42": [42, 51, 112, 203, 270, 285, 291, 297, 300], "2020": [42, 295], "author": [42, 73, 79, 118, 123, 174, 201, 206, 214], "gbuster": [42, 73, 79, 174, 201, 206, 214, 301], "modifi": [43, 80, 81, 290, 291, 296], "pysam_vers": 43, "checker": 43, "reflect": 43, "v2_correction_kei": 43, "valid": [43, 69, 71, 74, 76, 77, 78, 86, 101, 126, 127, 131, 137, 143, 188, 195, 198, 199, 200, 202], "balanc": [44, 45], "pair": [45, 51, 69, 71, 74, 76, 77, 78, 84, 86, 101, 108, 109, 112, 125, 127, 193, 202, 203, 269, 270, 273, 274, 277, 285, 290, 291, 292, 297], "machine_r": 45, "turbin": [45, 49, 51, 54, 55, 56, 58, 59, 61, 64, 131, 132, 133, 134, 135, 270, 296], "machin": [45, 288, 302], "rate": [45, 59, 74, 93, 95, 132, 134, 135, 136, 205, 284, 290], "hub_height": [45, 49], "hub": [45, 48, 49, 84], "height": [45, 48, 49, 84], "rotor_diamet": 45, "rotor": [45, 49, 51, 270], "diamet": [45, 49, 51, 270], "number_of_turbin": 45, "turbine_capital_cost": 45, "zero": [45, 74, 91, 101, 135, 136, 144, 198, 199, 200, 215, 272, 273, 296], "attr": [45, 108, 112, 199, 203, 207], "bos_cost": [45, 91], "turbine_cost": [45, 91], "sales_tax_mult": 45, "sale": 45, "tax": [45, 290], "multipli": [45, 49, 51, 59, 93, 114, 115, 134, 199, 202, 203, 205, 207, 270, 284, 285], "instal": [45, 101, 244, 245, 291, 300], "sales_tax_cost": [45, 91], "total_installed_cost": [45, 91], "bo": 45, "breakdown": 45, "Not": [45, 283], "maintain": [45, 80, 81, 193, 296], "polymorph": 45, "break": [45, 57], "pylint": 45, "w0613": 45, "unus": 45, "tool": [46, 47, 116, 172, 303], "res_fpath": [48, 51, 203, 207, 270, 285], "sc_gid_to_hh": 48, "sc_gid_to_res_gid": 48, "multi": [48, 49, 51, 84, 91, 98, 101, 104, 108, 109, 110, 176, 199, 203, 236, 268, 275, 277, 278, 280, 285, 292], "preload": [48, 50], "intend": [48, 50, 108, 112, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "facilit": [48, 50, 124, 136, 146], "load": [48, 49, 50, 51, 77, 78, 84, 91, 98, 101, 108, 112, 114, 115, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 298], "bespokewindpl": 48, "slow": [48, 49, 50, 51, 270], "parallel": [48, 49, 50, 51, 76, 77, 78, 84, 91, 98, 101, 168, 175, 176, 183, 184, 203, 205, 206, 207, 241, 270, 272, 273, 280, 282, 284, 285, 289, 300, 301], "hdf5": [48, 50, 51, 91, 101, 110, 125, 158, 176, 203, 205, 270, 271, 273, 275, 276, 280, 284, 285, 288], "h5": [48, 49, 51, 91, 98, 101, 106, 108, 109, 111, 112, 125, 158, 161, 163, 168, 175, 176, 177, 181, 182, 183, 184, 190, 191, 192, 193, 195, 198, 199, 200, 202, 203, 207, 219, 232, 261, 263, 269, 270, 271, 276, 278, 285, 291, 294, 297, 298, 299, 300], "sc": [48, 49, 51, 125, 127, 176, 177, 183, 184, 188, 195, 197, 198, 199, 200, 203, 205, 207, 270, 274, 285, 291], "pull": [48, 51, 175, 176, 177, 203, 270, 280, 285, 300], "given": [48, 49, 51, 59, 69, 71, 74, 76, 77, 78, 80, 81, 84, 86, 91, 98, 101, 106, 108, 114, 115, 131, 133, 135, 136, 137, 144, 146, 152, 163, 170, 183, 188, 190, 191, 192, 193, 198, 199, 200, 203, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 288, 293, 298, 299, 300], "oif": 48, "teh": 48, "befor": [48, 76, 77, 78, 80, 81, 91, 98, 101, 125, 127, 144, 158, 188, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 291, 292], "get_preloaded_data_for_gid": 48, "sc_gid": [48, 176, 188, 195, 203, 280], "bespokesingleplantdata": [48, 49], "act": 48, "stand": [48, 291, 300], "excl": [49, 106, 198, 199, 200, 207], "tm_dset": [49, 51, 183, 184, 191, 195, 198, 199, 200, 203, 270, 285], "objective_funct": [49, 51, 54, 59, 270], "capital_cost_funct": [49, 51, 59, 270], "fixed_operating_cost_funct": [49, 51, 59, 270], "variable_operating_cost_funct": [49, 51, 59, 270], "min_spac": [49, 51, 56, 59, 270], "5x": [49, 51, 270], "wake_loss_multipli": [49, 51, 59, 270], "ga_kwarg": [49, 51, 270], "system_capac": [49, 51, 59, 93, 110, 199, 203, 270, 275, 285], "ws_bin": [49, 51, 270], "wd_bin": [49, 51, 270], "360": [49, 51, 270], "45": [49, 51, 270, 300], "excl_dict": [49, 51, 164, 181, 182, 183, 184, 191, 198, 199, 200, 202, 203, 270, 285], "inclusion_mask": [49, 51, 183, 191, 198, 199, 200, 203], "data_lay": [49, 51, 198, 199, 200, 202, 203, 270, 285], "64": [49, 51, 183, 184, 195, 197, 198, 199, 200, 203, 270, 285], "excl_area": [49, 51, 183, 184, 198, 199, 200, 203, 270, 285], "exclusion_shap": [49, 51, 197, 198, 199, 200], "eos_mult_baseline_cap_mw": 49, "200": [49, 51, 203, 270, 285, 291], "prior_meta": [49, 51], "pre_loaded_data": [49, 51], "close": [49, 106, 108, 112, 132, 134, 136, 176, 181, 182, 190, 191, 192, 195, 198, 199, 200, 202, 262, 280], "analyz": [49, 59, 69, 84, 86, 91, 98, 101, 157, 158, 197, 198, 199, 200, 273], "optim": [49, 51, 52, 56, 59, 60, 131, 132, 133, 134, 136, 270, 300], "layout": [49, 51, 59, 61, 270], "exclus": [49, 51, 56, 59, 60, 74, 83, 84, 163, 164, 166, 181, 182, 183, 184, 187, 188, 195, 197, 198, 199, 200, 202, 203, 205, 206, 207, 224, 236, 270, 284, 285, 288, 294, 296], "suppli": [49, 51, 93, 101, 113, 114, 115, 125, 157, 158, 163, 169, 171, 175, 176, 177, 179, 183, 184, 185, 186, 188, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 207, 267, 268, 270, 273, 274, 276, 277, 280, 281, 282, 288, 289, 302], "exclusionmask": [49, 181, 182, 191, 192, 193, 198, 199, 200, 202], "filepath": [49, 51, 86, 91, 98, 101, 108, 109, 112, 125, 126, 158, 164, 175, 176, 177, 181, 182, 183, 184, 191, 198, 199, 200, 202, 203, 205, 207, 270, 271, 273, 274, 276, 278, 280, 284, 285, 300], "sam": [49, 51, 59, 81, 84, 85, 86, 87, 91, 93, 98, 101, 108, 112, 138, 139, 145, 158, 215, 223, 244, 245, 247, 248, 249, 250, 252, 253, 270, 272, 273, 276, 278, 289, 295, 297, 298, 299, 300], "minim": [49, 51, 54, 59, 270], "n_turbin": [49, 51, 59, 270], "aep": [49, 51, 59, 93, 270], "fixed_charge_r": [49, 51, 59, 91, 93, 95, 110, 199, 203, 205, 270, 275, 284, 285], "part": [49, 51, 59, 158, 269, 270, 271, 276, 277, 282, 292, 294], "wind_plant": [49, 51, 59, 270], "through": [49, 51, 59, 91, 98, 101, 108, 157, 158, 270, 276, 288, 291, 294, 297], "evalu": [49, 51, 59, 91, 93, 101, 203, 270, 272, 273, 285], "variable_operating_cost": [49, 51, 59, 91, 93, 95, 110, 199, 203, 270, 275, 285], "minimum": [49, 51, 56, 57, 59, 126, 163, 164, 181, 182, 183, 184, 190, 191, 193, 202, 203, 270, 285], "space": [49, 51, 56, 59, 84, 91, 98, 101, 150, 270, 273, 288], "between": [49, 51, 54, 56, 59, 83, 91, 101, 108, 124, 125, 126, 127, 136, 144, 163, 164, 166, 205, 207, 270, 272, 273, 274, 284, 291], "meter": [49, 51, 59, 270], "scale": [49, 51, 59, 92, 93, 98, 101, 108, 112, 132, 134, 139, 166, 170, 176, 177, 193, 199, 203, 205, 270, 273, 280, 285, 288, 291, 295, 300], "lost": [49, 51, 59, 137, 143, 270], "due": [49, 51, 59, 137, 144, 203, 270, 291], "wake": [49, 51, 59, 270], "NOT": [49, 51, 59, 114, 115, 270], "come": [49, 51, 59, 270], "even": [49, 51, 59, 69, 71, 74, 76, 77, 78, 86, 112, 136, 270, 281], "keyword": [49, 51, 59, 91, 101, 181, 182, 183, 184, 191, 193, 198, 199, 200, 202, 203, 269, 270, 272, 273, 277, 285], "ga": [49, 51, 59, 270], "geneticalgorithm": [49, 51, 59, 270], "ws_mean": [49, 51, 270], "windspeed_mean": [49, 51, 270], "temperature_mean": [49, 51, 270], "pressure_mean": [49, 51, 270], "entri": [49, 51, 69, 74, 76, 77, 78, 83, 86, 91, 98, 101, 176, 177, 195, 198, 199, 200, 203, 207, 270, 280, 291], "stop": [49, 51, 80, 81, 84, 128, 270, 291, 294], "inclus": [49, 51, 74, 83, 84, 91, 98, 101, 163, 183, 184, 189, 190, 191, 192, 193, 198, 199, 200, 203, 262, 270, 285], "four": [49, 51, 205, 270, 284], "15": [49, 51, 112, 270, 291, 297, 298], "270": [49, 51, 270], "arug": [49, 163, 181, 182, 183, 184, 191, 198, 199, 200, 202, 203], "layer_dset_nam": [49, 51, 181, 182, 183, 184, 191, 198, 199, 200, 202, 203, 270, 285], "supply_curv": [49, 51, 163, 164, 270, 285], "layermask": [49, 51, 163, 181, 182, 183, 184, 190, 191, 192, 198, 199, 200, 202, 203, 270, 285], "mask": [49, 51, 59, 158, 163, 164, 166, 181, 182, 183, 184, 188, 189, 190, 191, 192, 193, 198, 199, 200, 202, 203, 270, 285], "exclud": [49, 51, 110, 183, 188, 190, 191, 192, 193, 198, 199, 200, 203, 270, 275, 285], "against": [49, 51, 69, 71, 87, 198, 199, 200, 203, 270, 285], "layer": [49, 51, 105, 106, 163, 189, 190, 191, 192, 193, 195, 197, 198, 199, 200, 202, 203, 205, 207, 270, 285, 288, 294], "anoth": [49, 198, 199, 200, 202, 203, 265], "dset": [49, 51, 108, 109, 110, 112, 158, 163, 164, 168, 195, 198, 199, 200, 202, 203, 207, 261, 270, 275, 276, 285], "fpath": [49, 51, 158, 161, 164, 198, 199, 200, 202, 203, 270, 278, 285, 297], "along": [49, 132, 133, 134, 195, 197, 198, 199, 200], "axi": [49, 91, 98, 101, 132, 133, 134, 195, 197, 198, 199, 200, 298], "area": [49, 51, 56, 57, 59, 106, 163, 164, 181, 182, 183, 184, 190, 191, 198, 199, 200, 202, 203, 270, 285], "pixel": [49, 51, 101, 106, 183, 184, 197, 198, 199, 200, 203, 207, 270, 273, 285, 296], "km2": [49, 106, 164, 183, 184, 198, 199, 200, 202, 203], "try": [49, 144, 146, 176, 183, 184, 198, 199, 200, 203, 280, 291], "infer": [49, 51, 135, 183, 184, 193, 198, 199, 200, 202, 203, 270, 271, 285, 300], "excl_fpath": [49, 51, 164, 181, 182, 183, 184, 191, 198, 199, 200, 202, 203, 207, 270, 285], "extent": [49, 51, 106, 183, 184, 197, 198, 199, 200, 203, 207, 270, 285, 288, 296], "col": [49, 108, 112, 183, 184, 190, 191, 192, 195, 197, 198, 199, 200, 203, 278], "thing": [49, 198, 199, 200, 291], "consider": [49, 51, 101, 198, 199, 200, 270, 273], "baselin": [49, 178], "economi": [49, 92, 93, 199, 203, 285], "eo": 49, "divid": 49, "By": [49, 51, 91, 101, 109, 110, 125, 127, 136, 141, 144, 158, 161, 176, 193, 203, 205, 211, 268, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 283, 284, 285, 301], "align": 49, "atb": 49, "here": [49, 112, 131, 291, 296, 300], "tinyurl": [49, 139, 209], "com": [49, 139, 209, 288, 291, 302], "y85hnu6h": 49, "belong": [49, 51, 83, 84, 91, 98, 101, 195, 270, 294], "prior": [49, 51, 110, 158, 270, 271, 275, 276, 288, 302], "turbine_x_coord": [49, 51, 270], "turbine_y_coord": [49, 51, 270], "techmap": [49, 51, 106, 181, 182, 183, 184, 191, 195, 199, 200, 202, 203, 270, 285], "forecast": [49, 51, 101, 270, 273], "ecmwf": [49, 51, 101, 270, 273], "histor": [49, 51, 101, 270, 273, 296], "meteorologi": [49, 51, 101, 270, 273], "exit": [49, 198, 199, 200, 268, 277, 278], "open": [49, 106, 108, 112, 190, 191, 192, 198, 199, 200, 202, 288, 298], "get_weighted_res_t": 49, "n_time": 49, "get_weighted_res_dir": 49, "mean_wind_dir": [49, 198, 199, 200], "degre": [49, 74, 203], "north": [49, 288], "include_mask": [49, 59, 198, 199, 200], "pixel_side_length": [49, 59], "side": [49, 51, 203, 270, 285], "original_sam_sys_input": 49, "yet": [49, 291], "wind_plant_pd": 49, "final": [49, 51, 84, 91, 98, 101, 108, 112, 163, 166, 181, 182, 183, 184, 190, 191, 193, 202, 203, 270, 273, 285, 288, 294], "sc_point": [49, 188, 203, 205, 207, 284, 285], "aggscpoint": 49, "basic": [49, 291, 297], "res_df": 49, "compliant": 49, "pressur": [49, 50, 139, 298], "sort": [49, 51, 80, 81, 144, 188, 205, 284], "annual_time_index": 49, "wind_plant_t": 49, "corresond": 49, "normal": [49, 86, 108, 112, 191, 198, 199, 200, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "sum": [49, 51, 119, 121, 122, 169, 183, 198, 199, 200, 203, 205, 270, 284, 285], "initialize_wind_plant_t": 49, "windpowerpd": [49, 59], "plant_optim": 49, "placement": 49, "placeturbin": [49, 60], "recalc_lco": [49, 199, 203, 285], "recalcul": 49, "get_lcoe_kwarg": 49, "namespac": [49, 69, 71, 74, 76, 77, 78, 84, 86, 93, 98, 101, 202, 273], "lcoe_kwarg": 49, "These": [49, 51, 74, 136, 139, 143, 203, 205, 270, 272, 273, 280, 284, 285, 291, 294], "fixed_operating_co": 49, "get_wind_handl": 49, "wildcard": [49, 51, 101, 270, 271, 273], "multiyearwindresourc": [49, 51, 270], "check_depend": 49, "run_wind_plant_t": 49, "export": [49, 158, 276, 296], "run_plant_optim": 49, "agg_data_lay": [49, 198, 199, 200], "bsp": 49, "data_ind": 50, "wind_dir": [50, 188, 205, 284], "temp": 50, "bespokesinglepl": [50, 51], "dimens": [50, 51, 101, 270, 273], "expos": 50, "directli": [50, 51, 93, 158, 175, 176, 177, 178, 270, 276, 280, 291, 300], "sam_fil": [51, 84, 91, 101, 158, 270, 272, 273, 276, 278, 290, 297, 298, 299, 300], "area_filter_kernel": [51, 164, 181, 182, 183, 184, 191, 202, 203, 270, 285], "queen": [51, 163, 164, 181, 182, 183, 184, 190, 191, 202, 203, 270, 285], "min_area": [51, 163, 164, 181, 182, 183, 184, 190, 191, 202, 203, 270, 285], "pre_extract_inclus": [51, 183, 184, 203, 270, 285, 291], "prior_run": [51, 270], "pre_load_data": [51, 270], "baseaggreg": [51, 183, 203], "much": [51, 270, 291, 300], "pipe": [51, 101, 270, 273, 294], "usual": [51, 101, 157, 158, 199, 270, 273, 276, 291, 296], "grid": [51, 183, 195, 197, 198, 199, 200, 203, 205, 270, 284, 285, 296], "everi": [51, 101, 144, 178, 195, 198, 199, 200, 203, 207, 269, 270, 273, 277, 283, 285, 291], "public": [51, 270, 291], "methodologi": [51, 270], "more": [51, 91, 101, 125, 127, 131, 132, 133, 134, 143, 144, 146, 164, 181, 182, 183, 184, 190, 191, 202, 203, 268, 270, 272, 273, 274, 282, 288, 290, 291, 292, 300], "spread": [51, 101, 203, 269, 270, 273, 285], "across": [51, 101, 132, 133, 134, 136, 178, 203, 269, 270, 271, 272, 273, 285, 288], "defin": [51, 59, 63, 108, 112, 125, 127, 158, 183, 184, 191, 199, 203, 207, 269, 270, 274, 276, 277, 285, 297], "appear": [51, 203, 270, 285, 291], "onc": [51, 80, 81, 91, 98, 101, 109, 203, 268, 270, 285, 292, 300], "h5_dir": [51, 91, 98, 101, 270, 273, 278], "prefix": [51, 91, 93, 98, 101, 109, 125, 127, 269, 270, 273, 274, 278], "suffix": [51, 91, 98, 101, 109, 270, 273, 278, 294], "former": [51, 101, 270, 273], "readabl": [51, 101, 270, 273, 288], "renewable_resourc": [51, 270], "multi_year_resourc": [51, 270], "conform": [51, 101, 270, 273], "observ": [51, 101, 270, 273], "00": [51, 101, 112, 270, 273], "1st": [51, 101, 270, 273], "under": [51, 74, 101, 110, 158, 203, 270, 273, 275, 276, 285, 288], "its": [51, 60, 101, 199, 209, 270, 273, 288, 291, 297, 301, 302], "link": [51, 203, 270, 285], "metric": [51, 176, 203, 270, 280, 285], "custom": [51, 101, 157, 216, 270, 273, 291], "coupl": [51, 203, 270, 285], "high": [51, 197, 198, 199, 200, 203, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 290, 291, 293, 295], "separ": [51, 62, 110, 203, 270, 271, 275, 282, 285, 292], "coordin": [51, 57, 59, 62, 84, 106, 108, 112, 190, 191, 192, 195, 198, 199, 200, 203, 207, 270, 278, 285, 297, 299, 300], "written": [51, 91, 101, 108, 112, 125, 158, 176, 212, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "express": [51, 166, 170, 171, 270], "slice": [51, 84, 91, 98, 101, 108, 112, 146, 183, 184, 195, 197, 198, 199, 200, 203, 207, 261, 270, 272, 273, 297], "two": [51, 57, 74, 80, 81, 83, 91, 98, 101, 124, 125, 127, 144, 193, 195, 199, 203, 270, 272, 273, 274, 277, 282, 291, 292, 300], "below": [51, 74, 91, 101, 131, 135, 141, 203, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 288, 291, 292, 298, 300, 302], "do": [51, 69, 71, 74, 76, 77, 78, 86, 91, 98, 101, 114, 115, 144, 158, 205, 207, 269, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 298], "capital_cost_multipli": [51, 270], "fixed_operating_cost_multipli": [51, 270], "variable_operating_cost_multipli": [51, 270], "treat": [51, 131, 132, 133, 134, 270, 291, 296], "guidelin": [51, 91, 101, 270, 272, 273], "supplycurveext": [51, 203, 270, 285], "import": [51, 59, 84, 86, 101, 112, 203, 270, 285, 291, 292, 294, 297, 298, 299, 300], "valid_sc_point": [51, 195, 203, 270, 285], "tolist": [51, 203, 270, 285], "choic": [51, 146, 270], "them": [51, 80, 81, 146, 268, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "cli": [51, 52, 72, 89, 99, 103, 104, 117, 156, 160, 173, 185, 186, 209, 210, 212, 243, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 288, 289, 292, 294, 301, 302], "usag": [51, 270, 296], "to_csv": [51, 270], "id": [51, 84, 86, 91, 98, 101, 114, 115, 158, 270, 272, 273, 276, 291], "onshor": [51, 91, 101, 158, 270, 272, 273, 276, 296, 297], "sam_config": [51, 84, 91, 98, 101, 108, 112, 158, 270, 272, 273, 276, 298], "yaml": [51, 86, 91, 101, 158, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 286], "offshor": [51, 91, 101, 155, 157, 158, 188, 198, 199, 205, 238, 239, 270, 272, 273, 276, 284, 289, 297], "sam_key_1": [51, 91, 101, 158, 270, 272, 273, 276], "sam_value_1": [51, 91, 101, 158, 270, 272, 273, 276], "sam_key_2": [51, 91, 101, 158, 270, 272, 273, 276], "sam_value_2": [51, 91, 101, 158, 270, 272, 273, 276], "select": [51, 54, 91, 101, 146, 176, 270, 272, 273, 280, 291], "typical_exclus": [51, 203, 270, 285], "exclude_valu": [51, 193, 203, 270, 285], "255": [51, 203, 270, 285], "another_exclus": [51, 203, 270, 285], "exclusion_with_nodata": [51, 203, 270, 285], "exclude_rang": [51, 193, 203, 270, 285], "exclude_nodata": [51, 193, 203, 270, 285], "nodata_valu": [51, 193, 203, 270, 285], "partial_setback": [51, 203, 270, 285], "use_as_weight": [51, 193, 203, 270, 285], "height_limit": [51, 203, 270, 285], "slope": [51, 203, 270, 285], "include_rang": [51, 193, 203, 270, 285], "developable_land": [51, 203, 270, 285], "force_include_valu": [51, 193, 203, 270, 285], "more_developable_land": [51, 203, 270, 285], "force_include_rang": [51, 193, 203, 270, 285], "rook": [51, 164, 203, 270, 285], "contigu": [51, 163, 181, 182, 183, 184, 190, 191, 202, 203, 270, 285], "filter": [51, 163, 164, 181, 182, 183, 184, 190, 191, 202, 203, 270, 283, 285], "connect": [51, 114, 115, 203, 205, 270, 284, 285, 296], "cluster": [51, 203, 270, 285, 289, 300], "compar": [51, 74, 131, 136, 178, 203, 270, 285], "km": [51, 114, 115, 163, 181, 182, 183, 184, 190, 191, 198, 199, 200, 202, 203, 205, 270, 285], "keep": [51, 144, 146, 203, 205, 270, 284, 285, 291, 294], "isol": [51, 203, 270, 285], "land": [51, 203, 270, 285, 288], "within": [51, 91, 108, 112, 136, 152, 158, 163, 168, 176, 203, 205, 270, 272, 276, 280, 284, 285], "mark": [51, 203, 270, 285], "explan": [51, 139, 203, 270, 285], "mani": [51, 91, 98, 101, 133, 203, 207, 270, 272, 273, 285, 291], "cell": [51, 176, 183, 203, 270, 280, 285], "64x64": [51, 176, 203, 270, 280, 285], "output_layer_nam": [51, 203, 270, 285], "layer_nam": [51, 190, 191, 192, 203, 270, 285], "another_output_layer_nam": [51, 203, 270, 285], "input_layer_nam": [51, 203, 270, 285], "mode": [51, 108, 112, 203, 270, 285], "omit": [51, 203, 270, 285, 292], "one": [51, 91, 98, 101, 110, 112, 121, 122, 125, 127, 132, 133, 134, 135, 136, 137, 151, 158, 175, 176, 181, 182, 183, 184, 190, 191, 193, 202, 203, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 291, 294], "min": [51, 169, 178, 193, 203, 270, 285], "max": [51, 135, 136, 146, 161, 169, 193, 203, 270, 279, 285], "categori": [51, 203, 270, 285], "describ": [51, 203, 270, 285], "exist": [51, 69, 71, 74, 76, 77, 78, 86, 110, 121, 122, 126, 136, 158, 203, 205, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 292], "faster": [51, 108, 112, 183, 184, 203, 270, 285], "fly": [51, 183, 184, 203, 270, 285], "worker": [51, 76, 77, 78, 83, 84, 91, 98, 101, 161, 163, 168, 176, 183, 184, 203, 205, 207, 270, 272, 273, 279, 280, 284, 285], "satisfi": [51, 270, 294], "represent": [51, 132, 133, 134, 176, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288], "x": [51, 56, 59, 62, 64, 91, 98, 101, 106, 132, 133, 134, 203, 270, 297], "bottom": [51, 270], "y": [51, 56, 62, 64, 106, 270], "previou": [51, 91, 109, 158, 205, 213, 270, 271, 272, 276, 284], "consid": [51, 60, 86, 91, 101, 158, 203, 205, 270, 272, 273, 276, 284, 291], "miss": [51, 101, 114, 115, 119, 120, 158, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "consum": [51, 270], "front": [51, 270], "drastic": [51, 270], "signific": [51, 270], "overal": [51, 270], "speedup": [51, 270], "o": [51, 84, 91, 101, 270, 272, 273, 294, 297, 298, 299, 300], "capabl": [51, 270, 288], "amount": [51, 133, 270, 300], "ram": [51, 270], "sure": [51, 69, 71, 74, 76, 77, 78, 86, 144, 146, 158, 270, 276, 282, 288, 291, 302], "split": [51, 83, 84, 91, 98, 101, 149, 270, 272, 273], "node": [51, 76, 77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 289, 291, 301, 303], "36": [51, 270, 297], "conu": [51, 270, 288, 294, 300, 302], "larg": [51, 136, 144, 146, 203, 270, 271, 285, 288, 291, 295], "memori": [51, 76, 77, 78, 91, 98, 101, 259, 264, 265, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 293], "smaller": [51, 132, 134, 270], "completed_gid": 51, "slice_lookup": 51, "sc_point_gid": [51, 188, 195, 197, 198, 199, 200, 203], "sam_sys_inputs_with_site_data": 51, "becom": 51, "site_data": [51, 91, 98, 101, 158, 272, 273, 276, 290], "save_output": 51, "out_fpath": [51, 91, 101, 110, 158, 183, 188, 203, 205], "parent": [51, 54], "directori": [51, 69, 71, 74, 76, 77, 78, 86, 91, 98, 101, 109, 161, 163, 164, 166, 168, 169, 170, 171, 212, 213, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 288, 291, 292, 294, 300, 301, 302], "alreadi": [51, 110, 121, 122, 146, 275, 292], "desir": [51, 59, 69, 71, 74, 76, 77, 78, 86, 101, 135, 137, 144, 146, 152, 153, 154, 207, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291], "extens": 51, "run_seri": [51, 183, 203], "0081": [51, 183, 203], "standalon": [51, 183, 203], "serial": [51, 69, 71, 76, 77, 78, 168, 176, 268, 280], "take": [51, 54, 84, 91, 101, 109, 143, 144, 146, 176, 205, 272, 273, 280, 291, 296], "variant": [51, 290], "run_parallel": [51, 183, 203], "max_work": [51, 76, 77, 78, 91, 101, 161, 163, 168, 176, 183, 203, 205, 207, 270, 272, 273, 279, 280, 284, 285, 291, 298, 299, 300], "core": [51, 91, 98, 101, 161, 168, 183, 203, 206, 207, 270, 279, 285, 291], "summari": [51, 125, 126, 127, 163, 175, 176, 177, 183, 184, 188, 195, 198, 199, 200, 202, 203, 205, 274, 280, 284, 285, 288, 291], "cpu": [51, 161, 183, 203, 205, 207, 279, 284, 285, 291], "were": [51, 91, 93, 101, 158, 199, 202], "raster": [51, 183, 184, 195, 203], "simpl": [53, 54, 101, 131, 181, 203, 205, 262, 284, 285, 289, 298], "genet": [53, 54, 59], "algorithm": [53, 54, 56, 59], "bit": 54, "bound": [54, 125, 127, 131, 132, 133, 134, 207, 274], "variable_typ": 54, "max_gener": 54, "population_s": 54, "crossover_r": 54, "mutation_r": 54, "01": [54, 112, 297, 300], "tol": 54, "1e": 54, "06": [54, 112], "convergence_it": 54, "max_tim": 54, "3600": 54, "discret": 54, "n": [54, 83, 112, 124, 176, 177, 178, 283, 288, 302], "upper": [54, 125, 127, 148, 149, 152, 153, 154, 207, 274, 298], "maximum": [54, 57, 84, 114, 115, 125, 127, 193, 203, 205, 274, 284], "popul": [54, 110, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "crossov": 54, "phase": 54, "mutat": 54, "toler": 54, "converg": 54, "initialize_design_vari": 54, "randomli": [54, 144, 146], "initialize_bit": 54, "initialize_popul": 54, "offspr": 54, "initialize_fit": 54, "fit": [54, 131, 132, 133, 134, 136, 137], "member": 54, "chromosome_2_vari": 54, "chromosom": 54, "individu": [54, 119], "optimize_ga": 54, "pack": [55, 56, 59, 254], "safe_polygon": [56, 59], "weight_x": 56, "0013547": 56, "maxim": [56, 59], "polygon": [56, 61, 62, 63], "multipolygon": [56, 59, 63], "safe": [56, 69, 71, 74, 76, 77, 78, 86, 271], "place": [56, 58, 59, 80, 81, 98, 101, 149, 268, 273], "without": [56, 91, 98, 101, 108, 109, 112, 203, 205, 269, 284, 285, 292], "violat": [56, 91, 98, 101], "boundari": [56, 61, 62, 63, 64, 133, 134, 203, 285], "setback": [56, 288], "constraint": [56, 112], "pack_turbines_poli": 56, "fast": 56, "turbine_x": [56, 59], "turbine_i": [56, 59], "clear": [56, 69, 71, 74, 76, 77, 78, 80, 81, 86], "reset": [56, 267, 288, 302], "_summary_": 57, "help": [57, 124, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 288, 302], "ti": [57, 91, 98, 101, 199], "differ": [57, 65, 98, 101, 108, 133, 134, 144, 158, 203, 205, 265, 276, 277, 284], "geometri": 57, "exterior": [57, 62], "_type_": 57, "coord": [57, 62, 195], "element": [57, 80, 81], "southwest": 57, "northeast": 57, "everyth": [59, 157, 158, 276, 291, 295], "cost_funct": 59, "exclusionmaskfromdict": 59, "define_exclus": 59, "initialize_pack": 59, "gentic": 59, "optimization_object": 59, "capacity_mw": 59, "exact": 59, "best": [59, 291], "charg": [59, 93, 95, 205, 284, 291], "nturb": 59, "convex_hul": 59, "convex_hull_area": 59, "full_cell_area": 59, "capacity_dens": 59, "convex_hull_capacity_dens": 59, "full_cell_capacity_dens": 59, "func": 60, "decor": 60, "until": [60, 294, 296], "meant": [60, 131, 296], "subclass": [60, 131], "optimized_design_vari": 60, "callabl": 60, "plot": [61, 63, 64, 65, 163, 164, 165, 166, 167, 170, 171, 289, 298], "exteroir": 62, "geom": 63, "ax": [63, 64, 65], "color": [63, 64, 65, 166, 170], "black": 63, "linestyl": 63, "linewidth": 63, "matplotlib": [63, 64, 65, 170, 298], "pyplot": [63, 64, 65, 298], "figur": [63, 64, 65], "rose": [63, 64, 65, 188, 205, 284], "style": [63, 108, 109, 192, 271], "line": [63, 66, 91, 101, 114, 115, 158, 176, 203, 205, 209, 268, 272, 273, 276, 280, 284, 285, 289, 291, 303], "width": 63, "r": [64, 69, 71, 74, 76, 77, 78, 86, 108, 112, 136, 277, 278, 283, 291, 298], "c0": 64, "num": [64, 128], "radiu": 64, "py": [64, 65, 282, 291], "show": [64, 268, 278, 290, 291, 292, 298], "next": [64, 288, 292, 294, 300], "wind_direct": 65, "wind_frequ": 65, "windros": 65, "command": [66, 76, 77, 78, 91, 101, 158, 176, 203, 205, 209, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 289, 290, 291, 292, 303], "run_preflight": 69, "check_kei": [69, 71], "baseconfig": [69, 74, 76, 86], "disabl": [69, 112, 203], "preflight": 69, "analysis_year": [69, 91, 101, 176, 211, 272, 273, 280], "code": [69, 157, 209, 277, 288, 302], "anticip": 69, "log_directori": [69, 212, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "log": [69, 71, 74, 76, 77, 78, 86, 136, 141, 212, 259, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 282, 284, 285, 291, 294], "execution_control": [69, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 293], "_ec": 69, "baseexecutionconfig": [69, 77], "eagleconfig": 69, "job": [69, 71, 74, 76, 77, 78, 86, 241, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 289, 290, 291, 292, 301], "_name": [69, 193], "str_rep": [69, 71, 74, 76, 77, 78, 86], "revdir": [69, 71, 74, 76, 77, 78, 86], "home": [69, 71, 74, 76, 77, 78, 86, 288, 291, 302], "runner": [69, 71, 74, 76, 77, 78, 86], "testdatadir": [69, 71, 74, 76, 77, 78, 84, 86, 101, 297, 298, 299, 300], "test": [69, 71, 74, 76, 77, 78, 86, 112, 261, 289, 296, 297, 300], "rel": [69, 71, 74, 76, 77, 78, 86, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "demarc": [69, 71, 74, 76, 77, 78, 86], "check_fil": [69, 71, 74, 76, 77, 78, 86], "flist": [69, 71, 74, 76, 77, 78, 86], "make": [69, 71, 74, 76, 77, 78, 86, 158, 205, 212, 261, 269, 276, 282, 284, 288, 291, 301, 302], "check_overwrite_kei": [69, 71, 74, 76, 77, 78, 86], "primary_kei": [69, 71, 74, 76, 77, 78, 86], "overwrite_kei": [69, 71, 74, 76, 77, 78, 86], "overwrit": [69, 71, 74, 76, 77, 78, 86], "configerror": [69, 71, 74, 76, 77, 78, 86], "primari": [69, 71, 74, 76, 77, 78, 84, 86, 133, 176, 280, 296], "messag": [69, 71, 74, 76, 77, 78, 86, 143, 259, 294], "item": [69, 71, 74, 76, 77, 78, 80, 81, 86, 193, 265, 269], "d": [69, 71, 74, 76, 77, 78, 84, 86, 108, 112, 164, 291], "config_dir": [69, 71, 74, 76, 77, 78, 86], "config_kei": [69, 71, 74, 76, 77, 78, 86, 213], "copi": [69, 71, 74, 76, 77, 78, 80, 81, 86, 109, 121, 122, 158, 276, 291, 292, 300], "shallow": [69, 71, 74, 76, 77, 78, 80, 81, 86, 296], "fromkei": [69, 71, 74, 76, 77, 78, 86], "els": [69, 71, 74, 76, 77, 78, 86], "view": [69, 71, 74, 76, 77, 78, 86], "log_level": [69, 71, 74, 76, 77, 78, 86, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 291, 300], "debug": [69, 71, 74, 76, 77, 78, 86, 136, 141, 212, 259, 268, 270, 271, 272, 273, 274, 275, 276, 278, 279, 280, 282, 284, 285, 291, 300], "pop": [69, 71, 74, 76, 77, 78, 80, 81, 86, 298], "k": [69, 71, 74, 76, 77, 78, 86], "v": [69, 71, 74, 76, 77, 78, 86, 171, 178, 262, 268, 278], "keyerror": [69, 71, 74, 76, 77, 78, 86, 229], "popitem": [69, 71, 74, 76, 77, 78, 86], "lifo": [69, 71, 74, 76, 77, 78, 86], "first": [69, 71, 74, 76, 77, 78, 80, 81, 86, 128, 150, 157, 195, 205, 269, 284, 288, 290, 291], "resolve_path": [69, 71, 74, 76, 77, 78, 86], "resolv": [69, 71, 74, 76, 77, 78, 86], "resembl": [69, 71, 74, 76, 77, 78, 86, 176, 280], "somewher": [69, 71, 74, 76, 77, 78, 86], "bodi": [69, 71, 74, 76, 77, 78, 86, 291], "unchang": [69, 71, 74, 76, 77, 78, 86], "ones": [69, 71, 74, 76, 77, 78, 86, 112], "deleg": [69, 71, 74, 76, 77, 78, 86], "logic": [69, 71, 74, 76, 77, 78, 80, 86], "pathlib": [69, 71, 74, 76, 77, 78, 86], "made": [69, 71, 74, 76, 77, 78, 86, 148, 292], "symlink": [69, 71, 74, 76, 77, 78, 86], "compon": [69, 71, 74, 76, 77, 78, 86], "elimin": [69, 71, 74, 76, 77, 78, 86], "t": [69, 71, 74, 76, 77, 78, 86, 109, 132, 133, 134, 136, 286, 291], "set_self_dict": [69, 71, 74, 76, 77, 78, 86], "dictlik": [69, 71, 74, 76, 77, 78, 86], "emul": [69, 71, 74, 76, 77, 78, 86, 265], "setdefault": [69, 71, 74, 76, 77, 78, 86], "insert": [69, 71, 74, 76, 77, 78, 80, 81, 86, 292], "str_replace_and_resolv": [69, 71, 74, 76, 77, 78, 86], "deep": [69, 71, 74, 76, 77, 78, 86], "search": [69, 71, 74, 76, 77, 78, 86, 109, 278, 283], "f": [69, 71, 74, 76, 77, 78, 86, 112, 278, 281, 283, 291, 300], "lack": [69, 71, 74, 76, 77, 78, 86], "perform_str_rep": 71, "jan": [73, 151], "28": [73, 297], "11": [73, 153, 154, 297], "43": 73, "27": [73, 291, 297], "2019": [73, 79, 174, 201, 206, 214, 288, 302], "curtailment_paramet": 74, "could": [74, 146, 157, 158, 265, 276, 291], "content": [74, 84, 291], "threshold": [74, 193], "_wind_spe": 74, "dawn_dusk": 74, "zenith": 74, "angl": 74, "dawn": 74, "dusk": 74, "_dawn_dusk": 74, "nautic": 74, "horizon": 74, "sza": [74, 101, 273], "102": 74, "date_rang": [74, 112, 262], "date": [74, 209, 291], "rang": [74, 80, 81, 83, 91, 98, 101, 137, 143, 144, 146, 191, 193, 198, 199, 200, 203, 285, 288, 291], "end": [74, 80, 81, 83, 84, 91, 98, 101, 150, 158, 205, 291], "mmdd": 74, "pad": 74, "nonetyp": [74, 77, 78, 84, 98, 101, 108, 112, 163, 190, 191, 205, 215], "precipit": 74, "precip": 74, "mm": 74, "precipitationrate_0m": 74, "equat": [74, 93, 157, 158, 199, 203, 276, 285, 296], "scenario": [74, 203, 285, 288], "precipitation_r": 74, "solar_zenith_angl": 74, "signal": 74, "screen": [74, 288, 302], "criteria": 74, "met": [74, 199], "alwai": [74, 110, 112, 125, 127, 144, 146, 274, 275, 282, 296], "random_se": [74, 215], "config_dict": [76, 77, 78], "hardwar": [76, 77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "peregrin": [76, 77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "eagl": [76, 77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 289, 303], "sites_per_work": [76, 77, 78, 91, 98, 101, 183, 203, 272, 273, 285, 291], "scheme": [76, 77, 78], "memory_utilization_limit": [76, 77, 78, 91, 98, 101, 272, 273], "mem_util_lim": [76, 77, 78], "sh_script": [76, 77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291], "shell": [76, 77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291], "script": [76, 77, 78, 267, 268, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 284, 285, 288, 291, 302], "hpc": [77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 291], "alloc": [77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 293], "hpc_alloc": [77, 78], "state": [77, 78, 176, 177, 198, 199, 203, 280, 288, 290, 291, 297, 299, 300, 302], "job_id": [77, 78], "conda_env": [77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "conda": [77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 291, 302], "environ": [77, 78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 291, 302], "activ": [77, 78, 158, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 291, 302], "hpcconfig": 78, "slurm": [78, 251, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291, 292, 293], "gb": [78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 293], "_hpc_node_mem": 78, "walltim": [78, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 293, 302], "_hpc_walltim": 78, "alias": 79, "typo": 79, "jul": [79, 148, 151], "09": [79, 112, 291], "37": [79, 291], "23": [79, 112, 174, 291, 297], "inp": [80, 81], "__add__": [80, 81, 128], "__mul__": [80, 81, 128], "append": [80, 81, 91, 272], "occurr": [80, 81, 128], "extend": [80, 81, 282], "9223372036854775807": [80, 81, 128], "indexerror": [80, 81], "revers": [80, 81], "IN": [80, 81], "ascend": [80, 81], "itself": [80, 81, 125, 143, 271], "stabl": [80, 81], "descend": [80, 81], "accord": [80, 81, 146], "outputrequest": 81, "sites_per_split": 83, "manag": [83, 84, 181, 182, 202, 206, 288, 291, 292, 294, 302], "__next__": 83, "_sites_per_split": 83, "increment": 83, "_project_point": 83, "split_rang": [83, 91, 98, 101], "_split_rang": 83, "finish": [83, 294], "dunder": 83, "i0": [83, 84], "i1": [83, 84], "begin": [83, 135, 150, 268, 288], "sub": [83, 84, 102, 164, 269, 277, 292], "subset": [83, 84, 183, 184, 203], "curtail": [84, 98, 101, 131, 273], "join": [84, 101, 158, 276, 297, 298, 299, 300], "naris_pv_1axis_inv13": [84, 101, 297, 299, 300], "pp": [84, 139, 297, 298, 299, 300], "config_id_site0": 84, "sam_config_dict_site0": 84, "site_list_or_slic": 84, "get_sites_from_config": 84, "config_id": 84, "projectpoints_sub": 84, "h_list": 84, "h": [84, 119, 120, 121, 122, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "samconfig": [84, 91, 98, 101], "pvwattsv7": [84, 91, 98, 101, 291, 299, 300], "tcsmoltensalt": [84, 91, 98, 101], "solarwaterheat": [84, 91, 98, 101], "troughphysicalheat": [84, 91, 98, 101], "lineardirectsteam": [84, 91, 98, 101], "_": [84, 91, 98, 101, 298], "find": [84, 108, 136, 146, 176, 178, 280, 291, 297], "explicit": [84, 98, 101, 108, 109, 273], "pointer": [84, 98, 101, 273], "_df": 84, "sam_config_id": 84, "sam_config_obj": 84, "_sam_config_obj": 84, "sam_input": 84, "all_sam_input_kei": 84, "gcr": 84, "wind_turbine_hub_ht": 84, "sites_as_slic": 84, "sequenti": [84, 98, 101], "_tech": 84, "_h": 84, "taken": [84, 91, 93, 98, 101, 143, 198, 199, 200], "_d": 84, "_curtail": 84, "being": [84, 86, 91, 101, 108, 112, 141, 164, 178, 197, 198, 199, 200, 203, 272, 273, 277, 288, 291], "assess": [84, 288], "ind": [84, 207], "join_df": 84, "df2": 84, "pkei": 84, "advantag": [84, 136], "relev": [84, 91, 98, 101, 137, 271, 300], "recogn": 84, "necessarili": [84, 203], "struct": 84, "lat_lon_coord": [84, 278, 297, 298, 299, 300], "lat_lon": [84, 108, 112, 195, 297, 298, 299, 300], "fine": [84, 291], "region": [84, 114, 115, 132, 133, 134, 136, 175, 176, 177, 205, 267, 280, 284, 288, 291, 297, 299, 300], "form": [84, 87, 125, 127, 205, 274, 284, 297], "region_column": [84, 297], "section": [86, 135], "whether": [86, 114, 115, 126, 143, 188, 191, 205, 261], "irrad": 86, "irradi": 86, "sky": 86, "bifaci": 86, "panel": 86, "ic": 86, "_ice": 86, "en_icing_cutoff": 86, "downscal": 86, "higher": [86, 101, 273], "5min": 86, "variability_kwarg": 86, "var_frac": 86, "05": [86, 112, 205, 206, 207, 284, 298, 299, 300], "uniform": 86, "json5": [86, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 286], "toml": [86, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 286], "bad": [87, 220, 224, 236, 237, 238, 243, 250], "check_pv": 87, "4": [91, 98, 101, 112, 262, 272, 273, 288, 291, 293, 297, 302], "basegen": [91, 101], "though": [91, 148, 268, 271, 272], "simpli": [91, 271, 272, 282, 288, 302], "those": [91, 148, 205, 272, 284, 294], "reli": [91, 272], "bracket": [91, 101, 176, 272, 273, 280], "fill": [91, 101, 125, 127, 176, 268, 272, 273, 274, 280], "pipelin": [91, 109, 110, 158, 176, 203, 205, 213, 242, 267, 268, 269, 271, 272, 275, 276, 280, 281, 282, 283, 284, 285, 286, 288, 289, 290, 291, 292, 296, 302], "pars": [91, 93, 108, 112, 158, 176, 203, 205, 213, 271, 272, 276, 280, 284, 285], "duplic": [91, 158, 176, 203, 205, 272, 276, 277, 280, 284, 285], "invalid": [91, 158, 176, 188, 203, 205, 272, 276, 280, 284, 285], "manual": [91, 139, 158, 176, 203, 205, 272, 276, 280, 284, 285, 288, 302], "rest": [91, 101, 272, 273], "instead": [91, 98, 101, 108, 112, 158, 183, 184, 193, 203, 268, 272, 273, 276, 277, 298], "chunk": [91, 98, 101, 106, 108, 112, 207, 261, 263, 272, 273], "store": [91, 98, 101, 108, 110, 112, 137, 143, 158, 272, 273, 275, 276, 288, 294, 299], "flush": [91, 98, 101, 272, 273], "project_return_aftertax_npv": 91, "filenam": [91, 101, 271], "_meta": 91, "get_pc": [91, 98, 101], "points_rang": [91, 98, 101], "fulli": [91, 98, 101, 198, 199, 200], "instanti": [91, 98, 101, 108, 112, 131], "prioriti": [91, 109, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 293], "pc": [91, 98, 101], "add_site_data_to_pp": [91, 98, 101], "_out_fpath": [91, 98, 101], "_fpath": [91, 98, 101], "get_sites_per_work": [91, 98, 101], "concept": [91, 98, 101], "most": [91, 98, 101, 136, 175, 176, 178, 188, 203, 209, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 297], "effici": [91, 98, 101, 188, 291], "twice": [91, 98, 101, 291], "redundantli": [91, 98, 101], "cannot": [91, 98, 101, 183, 198, 199, 291], "handle_leap_ti": [91, 98, 101], "gen": [91, 97, 98, 126, 158, 175, 176, 177, 199, 202, 203, 207, 273, 289, 297, 298], "out_chunk": [91, 98, 101], "_out_chunk": [91, 98, 101], "indici": [91, 98, 101], "timeout": [91, 101, 272, 273, 291], "1800": [91, 101, 272, 273], "pool_siz": [91, 101, 272, 273], "smart": [91, 101], "wait": [91, 101, 272, 273, 291, 294], "submit": [91, 101, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 283, 284, 285, 292, 294], "pool": [91, 101, 272, 273], "cpu_count": [91, 101, 272, 273], "run_attr": [91, 98, 101, 108, 112], "__init__": [91, 98, 101], "runtim": [91, 98, 101, 108, 112], "sam_meta": [91, 98, 101], "sam_modul": [91, 98, 101], "_site_data": [91, 98, 101], "site_index": [91, 98, 101], "site_gid": [91, 98, 101], "out_index": [91, 98, 101], "global": [91, 98, 101, 108, 112], "site_limit": [91, 98, 101], "_site_limit": [91, 98, 101], "site_mem": [91, 98, 101], "mb": [91, 98, 101], "_site_mem": [91, 98, 101], "unpack_futur": [91, 98, 101], "compil": [91, 98, 101], "unpack_output": [91, 98, 101], "site_output": [91, 98, 101], "unpack": [91, 98, 101], "siteoutput": [91, 98, 101], "_year": [91, 98, 101], "larger": [92, 93, 132, 134, 143, 270, 272, 273, 291, 293], "eqn": 93, "capacity_factor": 93, "unitless": [93, 95, 203], "annual_energy_product": [93, 95, 203], "independ": [93, 199, 203, 285], "is_num": 93, "is_method": 93, "builtin": 93, "var": 93, "capital_cost_scalar": 93, "raw_capital_cost": 93, "unscal": [93, 108, 112], "scaled_capital_cost": 93, "foc": 93, "voc": 93, "back": 93, "raw_cap_cost": 93, "raw_lco": 93, "scaled_lco": 93, "electr": 95, "rage": 95, "aka": 95, "expenditur": 95, "scale_output": [98, 101, 273], "immedi": [98, 101, 273], "upon": [98, 101, 273, 301], "_time_index": [98, 101], "low_res_resource_fil": [101, 273], "write_mapped_gid": [101, 273], "bundl": [101, 133, 273], "commonli": [101, 273], "wai": [101, 108, 112, 203, 285, 291, 293, 297], "refer": [101, 139, 158, 268, 276, 295, 300], "clone": [101, 291, 303], "repositori": [101, 291], "pip": [101, 291, 300, 303], "sam_tech": 101, "fp_sam": 101, "fp_re": 101, "ri_100_nsrdb_2013": 101, "16966143": 101, "dtype": [101, 108, 112], "7": [101, 203, 291, 297, 299, 300], "req": 101, "131": 101, "39166": 101, "31221": 101, "127": 101, "54539": 101, "125": 101, "49656": 101, "17713654": 101, "17724372": 101, "1824783": 101, "1854574": 101, "underscor": [101, 209, 269, 273], "multi_file_resourc": [101, 273], "multifileresourc": [101, 106, 273], "sup3rcc": [101, 273], "organ": [101, 178, 273], "least": [101, 125, 127, 135, 176, 177, 205, 269, 273, 284, 291], "dhi": [101, 273], "relationship": [101, 273], "co": [101, 273], "your": [101, 112, 131, 135, 158, 269, 273, 276, 282, 288, 289, 292, 300, 302], "dc_ac_ratio": [101, 110, 199, 273, 275], "detect": [101, 132, 133, 134, 135, 273], "h5_file": [106, 108, 112, 163, 168], "hsd": [106, 108, 112, 163, 190, 191, 192, 288, 289, 302], "h5pyd": [106, 108, 112, 163, 190, 191, 192, 288, 289, 300, 302], "host": [106, 108, 112, 163, 190, 191, 192, 300], "aw": [106, 108, 112, 163, 190, 191, 192, 289, 300], "h5py": [106, 108, 112, 259], "iarr": 106, "flatten": [106, 195, 198, 199, 200, 263], "uint": 106, "geotiff": 106, "cr": 106, "pixel_area": [106, 198, 199, 200], "appropri": [106, 199, 268], "get_layer_profil": 106, "get_layer_cr": 106, "get_layer_valu": 106, "band": 106, "get_layer_descript": 106, "get_nodata_valu": 106, "nodata": [106, 190, 191, 192, 193], "str_decod": [108, 112], "standard": [108, 112, 176, 203, 205, 280, 293], "deviat": 108, "coeffici": 108, "variat": 108, "decod": [108, 112], "bytestr": [108, 112], "parse_source_files_pattern": 108, "source_fil": [108, 109, 110, 275], "pattern": [108, 109, 135, 261, 271], "unix": [108, 109, 271, 291], "except": [108, 124, 138, 145, 291], "pertain": 108, "pass_through": 108, "modif": [108, 291, 296], "stdev": [108, 109, 169], "interest": [108, 114, 115, 168, 197, 198, 199, 200, 278, 300], "my_mean": 108, "my_stdev": 108, "cv": 108, "my_cv": 108, "is_profil": 108, "my_fil": 108, "ident": [108, 109, 296], "collect_mean": 108, "collect_profil": 108, "add_dataset": [108, 112], "dset_nam": [108, 112], "dset_data": [108, 112], "scale_factor": [108, 112], "offset": [108, 112, 203], "data_vers": [108, 112], "df_str_decod": [108, 112], "byte": [108, 112], "ordinari": [108, 112], "full_version_record": [108, 112], "record": [108, 112, 277], "get_sam_df": [108, 112], "get_attr": [108, 112], "get_config": [108, 112], "config_nam": [108, 112], "get_dset_properti": [108, 112], "get_meta_arr": [108, 112], "rec_nam": [108, 112], "meta_arr": [108, 112], "get_scale_factor": [108, 112], "get_unit": [108, 112], "global_attr": [108, 112], "_group": [108, 112], "init_h5": [108, 112], "init": [108, 112, 143], "impli": [108, 112], "open_dataset": [108, 112], "ds_name": [108, 112, 168], "resourcedataset": [108, 112, 198, 199, 200], "preload_sam": [108, 112], "hsds_kwarg": [108, 112], "bucket": [108, 112, 291], "usernam": [108, 112, 291], "password": [108, 112, 291], "res_arrai": [108, 112], "sam_r": [108, 112], "res_dset": [108, 112], "resource_dataset": [108, 112], "set_config": [108, 112], "set_version_attr": [108, 112], "update_dset": [108, 112], "dset_arrai": [108, 112], "dset_slic": [108, 112], "writabl": [108, 112], "is_writ": [108, 112], "write_dataset": [108, 112], "write_mean": [108, 112], "identifi": [108, 112, 158, 175, 176, 177, 178, 195, 205, 276, 280], "write_profil": [108, 112], "out_dir": [109, 110, 161, 163, 166, 168, 169, 170, 171, 213, 275], "source_dir": [109, 110, 275], "source_prefix": [109, 110, 275], "source_pattern": 109, "pass_through_dset": [109, 110, 275], "OR": 109, "statu": [109, 213, 267, 268, 288, 291, 292, 294, 301, 302], "explicitli": [109, 293], "don": [109, 291], "vari": [109, 144, 269, 288], "_dset": 109, "clobber": [110, 271, 275], "averag": [110, 198, 200, 203, 275], "word": [110, 275, 277], "multiyeargroup": [110, 275], "ghi_mean": [110, 275], "solar_group": [110, 275], "exactli": [110, 193, 275, 277], "what": [110, 193, 205, 275, 284], "want": [110, 158, 176, 205, 268, 269, 275, 276, 277, 280, 284, 288, 291, 300, 302], "purg": [110, 271, 275], "freshli": [110, 275], "overwritten": [110, 265, 275], "20210101": 112, "20220101": 112, "freq": 112, "1h": 112, "right": 112, "print": [112, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286, 291, 298, 299, 300], "head": [112, 288, 291], "2021": [112, 291], "02": [112, 176, 269, 280], "03": 112, "04": [112, 269], "07": 112, "08": [112, 291], "31": [112, 174, 201, 297], "16": [112, 206, 291, 297], "17": [112, 297], "19": [112, 269, 297], "21": [112, 201, 206, 291, 297], "22": [112, 291, 297], "datetime64": 112, "few": [112, 291], "we": [112, 207, 291, 298], "earlier": 112, "unless": [112, 176, 280], "explicityli": 112, "storag": [112, 291, 300], "big": [112, 144, 146, 294], "float64": 112, "dset1": 112, "int32": 112, "4242": 112, "spatiotempor": [112, 157, 158, 276, 295], "intial": 112, "furthermor": 112, "whose": 112, "bad_shap": 112, "handlervalueerror": 112, "proper": [112, 291], "trans_tabl": [114, 115, 205, 284], "line_tie_in_cost": [114, 115, 205, 284], "14000": [114, 115], "line_cost": [114, 115, 205, 284], "2279": [114, 115], "station_tie_in_cost": [114, 115, 205, 284], "center_tie_in_cost": [114, 115, 205, 284], "sink_tie_in_cost": [114, 115, 205, 284], "1000000000": [114, 115], "avail_cap_frac": [114, 115, 205, 284], "line_limit": [114, 115, 205, 284], "transmissionfeatur": [114, 205], "build": [114, 115, 205, 284, 291], "substat": [114, 115, 205, 284], "center": [114, 115], "synthet": [114, 115], "infinit": [114, 115], "sink": [114, 115], "1e9": [114, 115], "attach": [114, 115, 205, 284], "legaci": [114, 115, 193, 205, 284], "available_capac": [114, 115], "avail_cap": [114, 115], "feature_cost": 114, "check_avail": [114, 115], "availabl": [114, 115], "check_feature_depend": [114, 115], "intern": [114, 115, 288], "accordingli": [114, 115, 292], "intereset": [114, 115], "distanc": [114, 115, 205, 207], "transmission_multipli": [114, 115, 205], "lcot": [114, 115, 204, 205, 284], "give": [114, 115], "line_multipli": [114, 115], "increas": [114, 115, 133, 136], "feature_capac": [114, 115], "feature_cap": [114, 115], "rep": [118, 125, 126, 127, 175, 176, 177, 267, 268, 274, 277, 288, 302], "ppinchuk": [118, 123], "capciti": [119, 120, 121, 122, 299], "hybrid_meta": [119, 120, 121, 122, 125, 127], "mean_cf": [120, 199, 203, 205, 284, 285], "No": [121, 122, 148], "hybrid_solar_capac": [121, 122], "hybrid_wind_capac": 122, "helper": 124, "fmt": 124, "excess": 124, "char": 124, "whitespac": [124, 148, 149, 152, 153, 154], "merg": [124, 125, 126, 127, 205, 265, 274, 284], "charact": 124, "ascii": 124, "strip": [124, 150], "lowercas": [124, 209], "solar_fpath": [125, 126, 274], "wind_fpath": [125, 126, 274], "allow_solar_onli": [125, 127, 274], "allow_wind_onli": [125, 127, 274], "fillna": [125, 127, 274], "ratio_bound": [125, 127, 274], "solar_capac": [125, 127, 274], "wind_capac": [125, 127, 274], "variou": [125, 203, 274, 285], "column_nam": [125, 127, 274], "fill_valu": [125, 127, 274], "colum_nam": [125, 127, 274], "max_valu": [125, 127, 193, 274], "half": [125, 127, 207, 274], "doubl": [125, 127, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "numerator_column": [125, 274], "denominator_column": [125, 274], "noth": [125, 127, 274], "solar_meta": [125, 126], "wind_meta": [125, 126], "At": [125, 127, 176, 177, 291, 297], "veri": [125, 127, 176, 177, 291], "solar_time_index": [125, 126], "wind_time_index": [125, 126], "hybrid_time_index": [125, 126], "fout": [125, 176, 177, 178], "save_hybrid_meta": [125, 274], "disc": [125, 176, 183], "run_meta": 125, "run_profil": 125, "save_profil": [125, 176, 177], "contains_col": 126, "col_nam": 126, "overlap": [126, 143, 144, 146, 203], "hybridsdata": 127, "numerator_column_nam": 127, "denominator_column_nam": 127, "validate_input": 127, "correctli": 127, "solar_profile_indices_map": 127, "hybrid_indic": 127, "solar_indic": 127, "wind_profile_indices_map": 127, "wind_indic": 127, "denom": 128, "realist": [131, 133, 136, 296], "certain": [131, 136], "haircut": [131, 136, 298], "transformation_var": [131, 132, 133, 134], "strength": 131, "powercurveloss": [131, 141], "incur": [131, 136], "notimplementederror": 131, "did": [131, 136, 291], "_transformed_gener": 131, "protect": 131, "Then": 131, "super": 131, "cutout": [131, 141], "horizont": [131, 133, 136, 298], "optm_bound": [131, 132, 133, 134], "scipi": [131, 132, 133, 134, 259], "sometim": [131, 132, 133, 134, 292], "enforc": [131, 132, 133, 134], "abstractpowercurvetransform": [132, 133, 134], "exponenti": 132, "stretch": [132, 134], "mathemat": [132, 133, 134], "p_": [132, 133, 134], "expon": 132, "primarili": [132, 133, 134, 136], "closer": [132, 134], "cut": [132, 134, 135], "cutoff": [132, 133, 134, 135], "translat": [133, 136, 195, 298], "kind": 133, "simplist": 133, "hand": [133, 291], "applic": [133, 291], "blade": [133, 136], "degrad": [133, 136], "steep": [133, 136], "almost": [133, 136, 291], "portion": [133, 136, 144, 146, 269, 277], "rapidli": [133, 136], "togeth": [133, 288], "extrem": 133, "unrealist": [133, 205, 284], "posit": [135, 203], "attempt": [135, 136, 146, 202, 203, 300], "transit": 135, "highest": 135, "adher": 135, "array_lik": [135, 136, 139], "cutin_wind_spe": 135, "cutoff_wind_spe": 135, "inf": 135, "rated_pow": 135, "__call__": 135, "wind_resourc": 136, "underli": [136, 197, 198, 199, 200], "approach": [136, 291], "realiz": 136, "board": 136, "uniformli": 136, "overli": 136, "simplifi": 136, "abl": [136, 199, 290, 291], "reach": 136, "albeit": 136, "greater": 136, "measur": 136, "annual_losses_with_transformed_power_curv": 136, "transformed_power_curv": 136, "closest": [136, 176, 207, 280], "powercurvetransform": [136, 137], "revlosseswarn": [136, 146], "meet": [136, 141, 298], "guarante": 136, "especi": [136, 296], "abnorm": 136, "power_gen_no_loss": 136, "about": [137, 143, 144, 146, 148, 269, 291, 300], "horizontal_transl": [137, 140, 298], "sting": 137, "required_kei": [137, 143], "mixin": [138, 145], "anyth": [138, 145, 158, 276, 292, 294], "unexpect": [138, 145], "pa": 139, "atm": 139, "wind_resource_for_sit": 139, "air": 139, "2uzjawp": 139, "densiti": [139, 199, 202, 203, 285], "2p8fjba6": 139, "exponentialstretch": 140, "horizontaltransl": 140, "linear_stretch": 140, "linearstretch": 140, "resource_data": [141, 298], "target_loss": [141, 298], "investig": 141, "cutin": 141, "consecut": 143, "unformat": [143, 148], "letter": [143, 148, 150], "abbrevi": [143, 148, 151], "whenev": [143, 144, 146], "flexibl": 143, "percent": [143, 146, 203], "total_available_hour": 143, "leverag": [144, 295], "routin": 144, "singleoutageschedul": 144, "basi": [144, 290], "produc": [144, 205], "reproduc": [144, 215], "perfectli": 144, "total_loss": [144, 146], "can_schedule_mor": [144, 146], "track": [144, 146, 205, 284, 294], "conflict": [144, 301], "happen": [144, 146], "enough": [144, 146, 205], "issu": [144, 146, 292], "long": [144, 146, 205, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "largest": [144, 291], "outageschedul": 146, "max_it": 146, "10000": 146, "main": [146, 288, 302], "termin": [146, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286], "successfulli": [146, 288, 291, 294, 302], "exce": 146, "update_when_can_schedule_from_month": 146, "update_when_can_schedul": 146, "wherev": [146, 282], "caus": 146, "find_random_outage_slic": 146, "slot": 146, "verifi": [146, 291], "entir": [146, 190, 191, 192, 286, 288], "schedule_loss": 146, "outage_slic": 146, "equival": [146, 188], "month_nam": [148, 149, 150, 151, 152, 153, 154], "calendar": [148, 149, 150, 152, 153, 154], "input_nam": 148, "april": [148, 150], "jun": [148, 151, 153, 201, 206], "june": [148, 150, 151, 153], "juli": [148, 153], "known": 149, "unknown": [149, 154], "unknown_month": 149, "known_month": 149, "titl": 150, "uppercas": 150, "aug": [150, 151], "mar": [151, 214], "apr": 151, "sep": 151, "oct": [151, 174], "nov": [151, 291], "dec": 151, "unabbrevi": 151, "understood": [151, 153], "abcdef": 151, "sinc": [157, 205, 284, 291], "refactor": 157, "gen_fpath": [158, 175, 176, 177, 199, 202, 203, 276, 280, 285], "nrwal_config": [158, 276], "save_raw": [158, 276], "meta_gid_col": [158, 276], "site_meta_col": [158, 276], "rep_profil": [158, 276], "manipul": [158, 276], "nrwal_key_1": [158, 276], "nrwal_value_1": [158, 276], "nrwal_key_2": [158, 276], "nrwal_value_2": [158, 276], "cf_mean_raw": [158, 276], "archiv": [158, 276], "default_meta_col": [158, 276], "gen_dset": 158, "meta_sourc": 158, "meta_out": 158, "analysis_mask": 158, "analysis_gid": 158, "run_nrwal": 158, "check_output": 158, "save_raw_dset": 158, "write_to_gen_fpath": 158, "write_meta_to_csv": 158, "csv_output": [158, 276], "dimension": [158, 276], "fir": [158, 276], "input_dataset_name_raw": [158, 276], "qualiti": [159, 161, 162, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "assur": [159, 161, 162, 279], "qa": [160, 161, 163, 164, 166, 167, 168, 170, 171, 267, 268, 277, 288, 302], "qc": [160, 161, 163, 164, 166, 167, 168, 170, 171, 267, 268, 277, 288, 302], "qaqcmodul": [161, 279], "too": [163, 198, 199, 200, 291, 294, 300], "create_scatter_plot": 163, "plot_typ": [163, 164, 166, 170, 171], "plotli": [163, 164, 166, 170, 171], "cmap": [163, 164, 166, 170], "viridi": [163, 166, 170], "scatter": [163, 170, 171], "colormap": [163, 164, 166, 170], "process_s": [163, 164, 168], "stat": [163, 170, 291], "summar": [163, 168, 169, 199, 203, 285], "sc_tabl": [163, 169, 171], "mean_lco": [163, 171, 199, 203, 205], "sc_plot_kwarg": 163, "scatter_plot_kwarg": 163, "exclusions_mask": 163, "excl_h5": [163, 190, 191, 192], "layers_dict": [163, 191], "kernel": [163, 164, 190, 191], "plot_step": [163, 164, 166], "sq": [163, 181, 182, 183, 184, 190, 191, 202, 203], "module_nam": 164, "out_root": 164, "One": [164, 203, 285], "sub_dir": 164, "rev_h5": 164, "excl_mask": 166, "plotbas": [166, 170, 171], "npy": 166, "exclusions_plot": 166, "out_path": [166, 168, 169, 170, 171], "seaborn": [166, 170, 171], "heatmap": 166, "px": [166, 170], "continu": [166, 170, 269, 277, 294], "imag": [166, 170, 171, 291], "heatmap_plot": 166, "exclusions_plotli": 166, "imshow": 166, "summarize_dset": 168, "statist": [168, 169], "summarize_mean": 168, "supply_curve_summari": 169, "sc_summari": 169, "median": [169, 176, 178, 280], "scatter_plot": 170, "versu": 170, "df_scatter": [170, 171], "scatter_plotli": 170, "dist_plot": 170, "distplot": 170, "dist_plotli": 170, "histogram": 170, "summary_csv": 170, "scatter_al": 170, "supply_curve_plot": 171, "cumul": 171, "supply_curve_plotli": 171, "thu": [174, 195], "49": [174, 297], "rev_summari": [175, 176, 177, 280], "cf_dset": [175, 176, 177, 199, 203, 280, 285], "rep_method": [175, 176, 177, 178, 280], "meanoid": [175, 176, 177, 178, 280], "err_method": [175, 176, 177, 178, 280], "rmse": [175, 176, 177, 178, 280], "gid_count": [175, 176, 177, 198, 199, 203, 280], "n_profil": [175, 176, 177, 178, 280], "trim": 175, "res_gid": [175, 176, 177, 199, 203, 280], "gen_gid": [175, 176, 177, 199, 203, 280], "mae": [175, 176, 177, 178, 280], "mbe": [175, 176, 177, 178, 280], "medianoid": [175, 176, 177, 178, 280], "source_profil": 175, "i_rep": [175, 178], "rep_gen_gid": 175, "rep_res_gid": 175, "get_region_rep_profil": 175, "calc": [175, 198, 199, 200], "gen_gid_rep": 175, "res_gid_rep": 175, "reg_col": [176, 177, 280], "aggregate_profil": [176, 280], "repprofilesbas": 176, "preced": [176, 203, 205, 280, 284, 285], "categor": [176, 177, 280], "ll": [176, 280], "often": [176, 280], "someth": [176, 280, 291], "4095": [176, 280], "99": [176, 280, 298], "98": [176, 280, 297], "90m": [176, 195, 203, 280, 285], "contribut": [176, 280], "lieu": [176, 280], "forcibli": [176, 280], "save_rev_summari": [176, 177, 280], "scaled_precis": [176, 177, 280], "uint16": [176, 177, 280], "ness": 178, "nargmin": 178, "nth": 178, "2nd": 178, "i_profil": 178, "represnt": 178, "context": [181, 182, 202], "_excl": [181, 182, 195, 198, 199, 200, 202], "h5_fpath": [182, 183], "h5_handler": 182, "abstractaggfilehandl": [182, 202], "similar": [182, 265, 290], "default_h5_handl": 182, "_h5": [182, 198, 199], "agg_dset": [183, 198, 199], "concret": 183, "aggreat": [183, 198, 199], "prefer": [183, 184, 203, 294], "agg_method": [183, 198, 199], "gen_index": [183, 198, 199, 203], "agg_out": 183, "squar": [183, 198, 199, 200], "agg": [183, 197, 198, 199, 200, 203], "save_agg_to_h5": 183, "competit": [187, 188, 205, 284], "n_dir": [188, 205, 284], "revx": [188, 205, 284, 288], "winddir": [188, 205], "cardin": [188, 205, 284], "promin": [188, 205, 284], "un": [188, 190, 191, 192], "map_sc_point_gid_to_sc_gid": 188, "map_sc_gid_to_sc_point_gid": 188, "check_sc_gid": 188, "map_upwind": 188, "upwind": [188, 205, 284], "map_downwind": 188, "downwind": [188, 205, 284], "exclude_sc_point_gid": 188, "elsewher": 188, "remove_noncompetitive_farm": 188, "sort_on": [188, 205, 284], "total_lco": [188, 205, 284], "compet": 188, "check_lay": [190, 191, 192], "flight": [190, 191, 192], "exclusionlay": [190, 191, 192, 195, 288], "_excl_h5": [190, 191, 192], "excl_lay": [190, 191, 192], "_excl_lay": [190, 191, 192], "domain": [190, 191, 192, 199], "add_lay": [190, 191, 192], "nodata_lookup": [190, 191, 192], "extract_inclusion_mask": 191, "fric_h5": 192, "fric_dset": 192, "friction": [192, 199, 202, 203, 205, 285], "include_valu": 193, "include_weight": 193, "mutual": 193, "turn": [193, 268, 278, 300], "partial": [193, 203], "50": 193, "inclusion_": 193, "include_": 193, "min_valu": 193, "_exclude_valu": 193, "_include_valu": 193, "force_includ": 193, "_force_includ": 193, "mask_typ": 193, "f_excl": 195, "dictat": [195, 199, 203], "n_row": 195, "n_col": 195, "_re": 195, "excl_shap": 195, "excl_row": 195, "excl_col": 195, "rows_of_excl": 195, "_rows_of_excl": 195, "cols_of_excl": 195, "_cols_of_excl": 195, "excl_row_slic": [195, 207], "_excl_row_slic": 195, "excl_col_slic": [195, 207], "_excl_col_slic": 195, "row_indic": 195, "That": 195, "col_indic": 195, "_point": 195, "get_sc_row_col_ind": 195, "row_ind": 195, "col_ind": 195, "get_excl_slic": 195, "row_slic": [195, 197, 198, 199, 200], "col_slic": [195, 197, 198, 199, 200], "get_flat_excl_ind": 195, "excl_ind": 195, "get_excl_point": 195, "__getitem__": 195, "excl_point": 195, "get_coord": 195, "centroid": [195, 198, 199, 200, 203], "valid_gid": 195, "vector": 195, "gi": 195, "get_slice_lookup": 195, "get_agg_slic": [197, 198, 199, 200], "agg_h5": [198, 199], "apply_exclus": [198, 199], "supplycurvepoint": 198, "countri": [198, 199, 203], "counti": [198, 199, 203, 297], "h5_gid_set": [198, 199], "h5_gid": [198, 199], "n_gid": [198, 199, 200, 203], "bool_mask": [198, 199, 200], "exclusion_weighted_mean": [198, 199, 200], "drop_nan": [198, 200], "include_mask_flat": [198, 199, 200], "sc_mean": [198, 199, 200], "sc_sum": [198, 199, 200], "res_class_dset": [199, 203, 285], "res_class_bin": [199, 203, 285], "power_dens": [199, 202, 203, 285], "lcoe_dset": [199, 202, 203, 285], "h5_dset": [199, 203, 285], "friction_lay": [199, 202], "aggregationsupplycurvepoint": 199, "frictionmask": [199, 202], "enter": [199, 202], "sever": [199, 203, 268, 285, 290, 291, 294], "flat_arr": 199, "_gen_gid": 199, "_gen": [199, 202], "res_gid_set": 199, "gen_gid_set": 199, "res_data": 199, "_res_data": 199, "gen_data": 199, "_gen_data": 199, "lcoe_data": 199, "_lcoe_data": 199, "mean_r": [199, 203, 285], "mean_lcoe_frict": [199, 202, 203, 205, 284, 285], "mean_frict": [199, 203], "friction_data": 199, "estim": [199, 207, 288, 289, 295], "_power_dens": [199, 202], "power_density_ac": 199, "condit": 199, "_power_density_ac": 199, "capacity_ac": [199, 205, 284], "h5_dsets_data": 199, "supplement": 199, "mean_h5_dsets_data": 199, "point_summari": 199, "economies_of_scal": 199, "cap_cost_scal": [199, 203, 285], "abstractsupplycurvepoint": 200, "fri": [201, 206, 214], "13": [201, 214, 297], "econ_fpath": [202, 203, 285], "friction_fpath": [202, 203, 285], "friction_dset": [202, 203, 285], "surfac": [202, 203, 285, 295], "_data_lay": 202, "2km": [203, 285, 291, 296], "onto": [203, 285], "128": [203, 207, 285], "coars": [203, 285], "complementari": [203, 285], "character": [203, 285], "classif": [203, 285], "enumer": 203, "geograph": [203, 278, 297], "stringifi": 203, "arithmet": 203, "recalc": 203, "conting": 203, "area_sq_km": 203, "common": [203, 205, 277, 284, 295, 300], "determinist": 203, "sc_row_ind": 203, "sc_col_ind": 203, "res_class": 203, "mean_": 203, "sc_point_method": 203, "f_gen": 203, "creation": 204, "sc_featur": [205, 284], "sc_capacity_col": [205, 284], "transmiss": [205, 284, 288, 294, 296], "cheapest": [205, 284], "permut": [205, 284], "bespok": [205, 267, 268, 277, 284, 288, 302], "voltag": [205, 284], "trans_sc_tabl": [205, 284], "built": [205, 207, 284, 288], "reinforc": [205, 284], "term": [205, 284, 299], "sc_aggreg": 205, "supplycurveaggreg": [205, 285], "tie": 205, "trans_gid": [205, 284], "trans_capac": 205, "trans_typ": [205, 284], "tranmiss": [205, 296], "translin": 205, "trans_cap_cost_per_mw": [205, 284], "dist_km": [205, 284], "total_lcoe_frict": [205, 284], "compute_total_lco": 205, "transmission_cost": [205, 284], "consider_frict": [205, 284], "ac_cap": [205, 284], "add_sum_col": 205, "sum_col": 205, "summat": 205, "total_cap_cost": 205, "cap_cost1": 205, "cap_cost2": 205, "full_sort": 205, "offshore_compet": [205, 284], "preserv": [205, 284], "competitivewindfarm": 205, "simple_sort": 205, "decim": [205, 284], "trans_cap_cost": [205, 284], "spur": [205, 284], "prominentwinddirect": [205, 284], "ckdtree": [206, 207], "47": [206, 214], "sc_resolut": 207, "2560": 207, "dist_margin": 207, "margin": 207, "distance_threshold": 207, "nn": 207, "diagon": 207, "map_resource_gid": 207, "sc_row_indic": 207, "sc_col_indic": 207, "tree": 207, "dist_thresh": 207, "dist_tresh": 207, "save_tech_map": 207, "fpath_out": 207, "map_resourc": 207, "points_per_work": 207, "enum": 209, "click": [209, 291], "invok": [209, 291], "dash": 209, "4rehbsvf": 209, "all_nam": 209, "configwarn": 211, "thrown": [211, 271], "never": 211, "verbos": [212, 268, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 282, 284, 285], "logger": [212, 259, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291], "target_modul": 213, "out_fil": [213, 263, 282, 297], "modulenam": 213, "pipelineerror": 213, "sam_resourc": 215, "psuedo": 215, "runtimeerror": [217, 230], "unclear": [221, 234], "mismatch": 222, "failur": [225, 242, 283], "extrapol": 226, "danger": 239, "suspect": 240, "poorli": 246, "problemat": 248, "stuck": 254, "loop": 254, "deprec": 255, "pytest": 260, "td": [261, 298], "shuffl": 261, "fake": 261, "tempfil": [261, 298], "temporarydirectori": [261, 298], "out_pattern": 261, "s_slice": 261, "chang": [262, 291], "slotted_dict": 265, "rev": [267, 289, 290, 292, 293, 294, 301, 303], "templat": [267, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285, 288, 302], "batch": [267, 268, 270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 288, 289, 290, 295, 302], "multiyear": [267, 288, 302], "hybrid": [267, 268, 277, 288, 302], "nrwal": [267, 268, 277, 288, 295, 296, 302], "good": [268, 291], "page": [268, 288, 291, 302], "our": [268, 291, 300], "config_pipelin": [268, 288, 291, 294, 302], "sequenc": 268, "report": [268, 283, 288], "progress": [268, 281], "parameter": [268, 269, 290, 292], "config_batch": [268, 290, 292], "gap": [268, 288], "guid": [268, 288, 291], "structur": [268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 281, 282, 283, 284, 285, 286], "parametr": 269, "config_fil": [269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285], "log_fil": [269, 277, 292, 294], "null": [269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285], "pipeline_config": 269, "set_tag": 269, "set1": 269, "set2": 269, "init_logg": [269, 277, 291, 300], "ever": [269, 277], "necessari": [269, 277, 293, 294], "input_constant_1": 269, "path_to_a_fil": 269, "third": 269, "six": 269, "rememb": 269, "config_run": 269, "config_analyz": 269, "tag": [269, 291], "concaten": 269, "dry": [269, 292], "dir": [269, 288, 291, 302], "cancel": [269, 277, 292, 294], "batch_job": 269, "monitor": [269, 277, 291, 292, 294], "background": [269, 277, 292, 294], "stdout": [269, 277, 292, 294], "stderr": [269, 277, 292, 294], "captur": [269, 277, 292, 294], "IF": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "ON": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "qo": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 293], "queue": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 291], "kestrel": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "awspc": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "au": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "catchal": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "servic": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 289, 300], "On": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "standbi": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "pb": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "short": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 293], "test_queu": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "p": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 283, 284, 285, 291, 293], "seen": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285, 296], "suitabl": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "moder": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "yourself": [270, 271, 272, 273, 274, 275, 276, 279, 280, 282, 284, 285], "purge_chunk": 271, "collect_pattern": 271, "wil": 271, "surpris": 271, "byproduct": 271, "config_bespok": 277, "config_gener": 277, "config_econ": [277, 290], "config_collect": 277, "config_multi_year": 277, "config_supply_curve_aggreg": 277, "config_supply_curv": 277, "config_rep_profil": 277, "config_hybrid": 277, "config_nrw": 277, "config_qa_qc": 277, "config_script": 277, "_unique_": 277, "submiss": 277, "kick": [277, 292], "off": [277, 288, 292, 296], "recurs": [277, 283, 292], "therein": 277, "skip": [277, 294], "rf": 278, "sf": 278, "llf": 278, "lat_lon_fpath": 278, "llc": 278, "reg": 278, "region_col": [278, 297], "queu": 281, "after_step": 281, "unaffect": 281, "cmd": 282, "my_script": 282, "ineffici": 282, "processor": 282, "period": 282, "multiprocess": 282, "share": [282, 291], "txt": [282, 291], "wget": [282, 291], "websit": 282, "org": [282, 288, 295, 302], "latest": [282, 291], "zip": 282, "download": [282, 300], "web": [282, 300], "displai": [283, 297], "folder": [283, 291, 292], "pipe_step": 283, "repeat": 283, "step1": 283, "step2": 283, "insensit": [283, 286], "sb": 283, "pend": 283, "success": [283, 291, 294], "unsubmit": 283, "not_submit": 283, "status1": 283, "status2": 283, "key1": 283, "key2": 283, "yml": [286, 291], "geospati": [288, 302], "techno": 288, "technic": [288, 302], "research": 288, "natur": 288, "highli": [288, 300], "contin": 288, "five": 288, "span": 288, "decad": 288, "broad": 288, "coverag": 288, "america": 288, "south": 288, "central": 288, "asia": 288, "middl": 288, "east": [288, 296], "africa": 288, "nation": [288, 295, 302], "analys": [288, 290], "suit": 288, "infrastructur": [288, 300], "deploy": 288, "plan": [288, 291], "consist": [288, 297], "strung": 288, "workflow": [288, 289, 291], "sent": 288, "visual": 288, "go": [288, 291], "straight": 288, "git": [288, 291, 302], "github": [288, 291, 294, 302], "env": [288, 291, 302], "cd": [288, 291, 302], "branch": [288, 302], "dev": [288, 302], "tip": [288, 302], "session": [288, 302], "scratch": [288, 301, 302], "consol": [288, 291, 302], "config_gen": [288, 291, 302], "pleas": [288, 292, 300, 302], "cite": [288, 302], "paper": [288, 302], "softwar": [288, 300, 302], "doi": [288, 295, 302], "maclaurin": [288, 302], "galen": [288, 302], "j": [288, 302], "nichola": [288, 302], "grue": [288, 302], "anthoni": [288, 302], "lopez": [288, 302], "donna": [288, 302], "heimil": [288, 302], "michael": [288, 302], "rossol": [288, 302], "grant": [288, 302], "buster": [288, 302], "travi": [288, 302], "william": [288, 302], "platform": [288, 302], "golden": [288, 302], "colorado": [288, 302], "laboratori": [288, 295, 302], "tp": [288, 302], "6a20": [288, 302], "73067": [288, 302], "2172": [288, 302], "1563140": [288, 302], "paul": [288, 302], "pinchuk": [288, 302], "brandon": [288, 302], "benton": [288, 302], "robert": [288, 302], "spencer": [288, 302], "mike": [288, 302], "bannist": [288, 302], "v0": [288, 302], "zenodo": [288, 302], "5281": [288, 302], "8247528": [288, 302], "marin": 289, "treatment": 289, "server": [289, 300], "kubernet": 289, "lambda": 289, "concert": 290, "complex": 290, "three": [290, 292, 295], "suffici": [290, 291], "header": [290, 291], "tech_config": 290, "now": 291, "live": 291, "s3": [291, 300], "ec2": [291, 300], "lot": 291, "easier": 291, "api": [291, 300], "west": 291, "oregon": 291, "overview": 291, "cloud9": 291, "ini": 291, "choos": 291, "master_instance_typ": 291, "t2": 291, "micro": 291, "c5": 291, "xlarg": 291, "free": 291, "tier": 291, "elig": 291, "login": 291, "eb": 291, "volum": 291, "gp2": 291, "volume_typ": 291, "ssd": 291, "1gb": 291, "16tb": 291, "volume_s": 291, "filesystem": 291, "seem": 291, "fsx": 291, "unnecessari": 291, "intens": 291, "ssh": 291, "pcluster_nam": 291, "lab": 291, "miniconda": 291, "repo": [291, 303], "anaconda": 291, "miniconda3": 291, "sh": 291, "bashrc": 291, "aws_pclust": 291, "ship": 291, "pypi": 291, "cat": 291, "id_rsa": 291, "pub": 291, "put": 291, "squeue": 291, "sinfo": 291, "spin": 291, "certainli": 291, "503": 291, "vcpu": 291, "thread": 291, "disable_hyperthread": 291, "accept": 291, "reason": 291, "2xlarg": 291, "fleet": 291, "thought": 291, "setup": [291, 292, 300], "max_task_count": 291, "admin": 291, "docker": 291, "dn_ram": 291, "sn_ram": 291, "intact": 291, "readm": 291, "hdfgroup": 291, "magic": 291, "cp": 291, "passwd": 291, "wish": 291, "hscfg": [291, 300], "hs_endpoint": [291, 300], "localhost": 291, "5101": 291, "hs_usernam": [291, 300], "hs_password": [291, 300], "hs_api_kei": [291, 300], "hs_bucket": 291, "start_hsd": 291, "aws_access_key_id": 291, "aws_secret_access_kei": 291, "bucket_nam": 291, "iam": 291, "privileg": 291, "root": 291, "hsds_rangeget_1": 291, "hsds_sn_1": 291, "hsds_head_1": 291, "hsds_dn_": 291, "hsinfo": 291, "doesn": 291, "snippet": 291, "trial": 291, "team": 291, "pursu": 291, "rout": 291, "ve": 291, "overhead": 291, "poster": 291, "_not_": 291, "ecr": 291, "bullet": 291, "push": 291, "privat": 291, "gatewai": 291, "relat": 291, "resiz": 291, "hslambda": 291, "sublist": 291, "stdin": 291, "aws_account_id": 291, "dkr": 291, "amazonaw": 291, "lambda_build": 291, "my": 291, "uri": 291, "upload": 291, "banner": 291, "sai": 291, "yai": 291, "tab": 291, "1024mb": 291, "feel": 291, "tune": 291, "later": 291, "workload": [291, 300], "role": 291, "aws_s3_gatewai": 291, "button": 291, "status_cod": 291, "isbase64encod": 291, "statuscod": 291, "charset": 291, "utf": [291, 298], "323": 291, "tue": 291, "gmt": 291, "aiohttp": 291, "start_tim": 291, "1637706428": 291, "readi": 291, "hsds_version": 291, "0beta": 291, "greet": 291, "welcom": 291, "webservic": 291, "hdf": [291, 300], "node_count": 291, "dn_url": 291, "2ftmp": 291, "2fhs1a1c917f": 291, "2fdn_1": 291, "sock": 291, "dn_id": 291, "001": 291, "anonym": 291, "isadmin": 291, "integr": [291, 295], "stage": 291, "auto": 291, "deploi": 291, "url": 291, "xxxxxxx": 291, "hdfcloud": 291, "v3": [291, 300], "nsrdb_": 291, "endpoint": [291, 300], "hit": 291, "woohoo": 291, "17520": 291, "obvious": 291, "__name__": 291, "__main__": 291, "getlogg": 291, "fp": 291, "nsrdb_2019": 291, "demand": 291, "34": 291, "rough": 291, "budget": 291, "significantli": 291, "spot": 291, "market": 291, "influenc": 291, "datum": 291, "hr": 291, "35088": 291, "1850": 291, "49e7": 291, "77e": 291, "17544": 291, "6268": 291, "10e8": 291, "79e": 291, "4km": 291, "30min": 291, "5e05": 291, "76e9": 291, "457": 291, "155": 291, "1hr": 291, "2e6": 291, "75e10": 291, "195": 291, "66": [291, 298, 299, 300], "nine": 292, "happi": 292, "confid": 292, "goe": [292, 294], "wrong": [292, 294], "repeatedli": 292, "clean": 292, "rerun": 292, "shown": 292, "config_multi": 292, "partit": 293, "chosen": 293, "bigmem": 293, "192": 293, "receiv": 293, "demonstr": [294, 296, 300, 301], "nohup": 294, "plu": 294, "had": 294, "previous": 294, "beyond": 294, "conus_trans_lines_cache_064_sj_infsink": 294, "config_suppli": 294, "databas": 294, "rev_conus_exclus": 294, "config_aggreg": 294, "hydrokinet": 295, "mhk": 295, "atlant": 295, "pacif": 295, "ocean": 295, "dx": 295, "15473": 295, "1647329": 295, "coast": 296, "pure": 296, "fictiti": 296, "substructur": 296, "semi": 296, "submers": 296, "slightli": 296, "shore": 296, "seper": 296, "definit": 297, "varieti": 297, "fpp": 297, "pp_offshor": 297, "wind_gen_standard_losses_0": [297, 298, 299, 300], "wind_gen_standard_losses_1": 297, "2114919": 297, "2114920": 297, "2114921": 297, "2114922": 297, "2114923": 297, "124402": 297, "2239321": 297, "124403": 297, "2239322": 297, "124404": 297, "2239323": 297, "124405": 297, "2239324": 297, "124406": 297, "2239325": 297, "124407": 297, "mrossol": 297, "git_repo": 297, "wi": 297, "41": [297, 298, 299, 300], "77": 297, "71": [297, 298, 299, 300], "74": [297, 298, 299, 300], "73": 297, "97": [297, 298, 299, 300], "78": [297, 298, 299, 300], "ri_100_nsrdb_2012": [297, 299], "67": 297, "79": 297, "Of": 297, "rhode": [297, 299, 300], "island": [297, 299, 300], "kent": 297, "29": 297, "32": 297, "48": 297, "52": 297, "53": [297, 300], "55": 297, "69": 297, "82": 297, "83": 297, "94": 297, "96": 297, "25": [297, 298, 299, 300], "26": 297, "44": 297, "59": 297, "68": 297, "87": 297, "plt": 298, "ri_100_wtk_2012": [298, 299], "encod": 298, "fh": 298, "power_curve_loss_info": 298, "sam_fp": 298, "turb_generic_loss": 298, "133": 298, "202": 298, "184": 298, "045": 298, "242": 298, "508": 298, "119": 298, "319": [298, 299, 300], "688": 298, "628": 298, "power_curv": 298, "site_ind": 298, "temperature_100m": 298, "pressure_100m": 298, "windspeed_100m": 298, "pc_wind_spe": 298, "wind_turbine_powercurve_windspe": 298, "pc_gener": 298, "wind_turbine_powercurve_powerout": 298, "new_curv": 298, "legend": 298, "xlabel": 298, "ylabel": 298, "tell": 298, "wind_farm_losses_perc": 298, "744": 298, "67402536": 298, "6644584": 298, "technolog": 299, "65": [299, 300], "538": [299, 300], "287": [299, 300], "496": [299, 300], "579": [299, 300], "486": [299, 300], "382": [299, 300], "474": [299, 300], "595": [299, 300], "339": [299, 300], "601": [299, 300], "696": [299, 300], "814": [299, 300], "724": [299, 300], "466": [299, 300], "677": [299, 300], "833": [299, 300], "823": [299, 300], "782": [299, 300], "756": [299, 300], "801": [299, 300], "pvcapac": [299, 300], "183": [299, 300], "166": [299, 300], "177": [299, 300], "175": [299, 300], "167": [299, 300], "176": [299, 300], "scalabl": 300, "cloud": 300, "amazon": 300, "elast": 300, "slide": 300, "deck": 300, "further": 300, "enhanc": 300, "experi": 300, "might": 300, "hsconfigur": 300, "gov": 300, "your_api_key_her": 300, "visit": 300, "signup": 300, "purpos": 300, "oserror": 300, "busi": 300, "person": 300, "nsrdb_file": 300, "nsrdb_2013": 300, "meta_data": 300, "wtk_conus_2012": 300, "nsrdb_2012": 300, "module_config": 301, "rev_statu": 301, "addition": 301, "analyst": 303, "launch": 303, "citat": 303}, "objects": {"": [[0, 0, 0, "-", "reV"]], "reV": [[1, 0, 0, "-", "SAM"], [46, 0, 0, "-", "bespoke"], [66, 0, 0, "-", "cli"], [67, 0, 0, "-", "config"], [88, 0, 0, "-", "econ"], [96, 0, 0, "-", "generation"], [102, 0, 0, "-", "handlers"], [116, 0, 0, "-", "hybrids"], [129, 0, 0, "-", "losses"], [155, 0, 0, "-", "nrwal"], [159, 0, 0, "-", "qa_qc"], [172, 0, 0, "-", "rep_profiles"], [179, 0, 0, "-", "supply_curve"], [208, 0, 0, "-", "utilities"], [266, 0, 0, "-", "version"], [268, 8, 1, "cmdoption-reV-v", "--verbose"], [268, 8, 1, "cmdoption-reV-version", "--version"], [268, 8, 1, "cmdoption-reV-v", "-v"]], "reV.SAM": [[2, 0, 0, "-", "SAM"], [6, 0, 0, "-", "defaults"], [20, 0, 0, "-", "econ"], [24, 0, 0, "-", "generation"], [42, 0, 0, "-", "version_checker"], [44, 0, 0, "-", "windbos"]], "reV.SAM.SAM": [[3, 1, 1, "", "RevPySam"], [4, 1, 1, "", "Sam"], [5, 1, 1, "", "SamResourceRetriever"]], "reV.SAM.SAM.RevPySam": [[3, 2, 1, "", "PYSAM"], [3, 3, 1, "", "assign_inputs"], [3, 4, 1, "", "attr_dict"], [3, 3, 1, "", "collect_outputs"], [3, 3, 1, "", "default"], [3, 3, 1, "", "drop_leap"], [3, 3, 1, "", "ensure_res_len"], [3, 3, 1, "", "execute"], [3, 3, 1, "", "get_sam_res"], [3, 3, 1, "", "get_time_interval"], [3, 4, 1, "", "input_list"], [3, 3, 1, "", "make_datetime"], [3, 4, 1, "", "meta"], [3, 4, 1, "", "module"], [3, 3, 1, "", "outputs_to_utc_arr"], [3, 4, 1, "", "pysam"], [3, 4, 1, "", "site"]], "reV.SAM.SAM.Sam": [[4, 2, 1, "", "PYSAM"], [4, 3, 1, "", "assign_inputs"], [4, 4, 1, "", "attr_dict"], [4, 3, 1, "", "default"], [4, 3, 1, "", "execute"], [4, 4, 1, "", "input_list"], [4, 4, 1, "", "pysam"]], "reV.SAM.SAM.SamResourceRetriever": [[5, 3, 1, "", "get"]], "reV.SAM.defaults": [[7, 1, 1, "", "AbstractDefaultFromConfigFile"], [8, 1, 1, "", "DefaultGeothermal"], [9, 1, 1, "", "DefaultLCOE"], [10, 1, 1, "", "DefaultLinearFresnelDsgIph"], [11, 1, 1, "", "DefaultMhkWave"], [12, 1, 1, "", "DefaultPvSamv1"], [13, 1, 1, "", "DefaultPvWattsv5"], [14, 1, 1, "", "DefaultPvWattsv8"], [15, 1, 1, "", "DefaultSingleOwner"], [16, 1, 1, "", "DefaultSwh"], [17, 1, 1, "", "DefaultTcsMoltenSalt"], [18, 1, 1, "", "DefaultTroughPhysicalProcessHeat"], [19, 1, 1, "", "DefaultWindPower"]], "reV.SAM.defaults.AbstractDefaultFromConfigFile": [[7, 4, 1, "", "CONFIG_FILE_NAME"], [7, 4, 1, "", "PYSAM_MODULE"], [7, 3, 1, "", "init_default_pysam_obj"]], "reV.SAM.defaults.DefaultGeothermal": [[8, 2, 1, "", "PYSAM_MODULE"], [8, 3, 1, "", "default"], [8, 3, 1, "", "init_default_pysam_obj"]], "reV.SAM.defaults.DefaultLCOE": [[9, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultLinearFresnelDsgIph": [[10, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultMhkWave": [[11, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultPvSamv1": [[12, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultPvWattsv5": [[13, 2, 1, "", "PYSAM_MODULE"], [13, 3, 1, "", "default"], [13, 3, 1, "", "init_default_pysam_obj"]], "reV.SAM.defaults.DefaultPvWattsv8": [[14, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultSingleOwner": [[15, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultSwh": [[16, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultTcsMoltenSalt": [[17, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultTroughPhysicalProcessHeat": [[18, 3, 1, "", "default"]], "reV.SAM.defaults.DefaultWindPower": [[19, 3, 1, "", "default"]], "reV.SAM.econ": [[21, 1, 1, "", "Economic"], [22, 1, 1, "", "LCOE"], [23, 1, 1, "", "SingleOwner"]], "reV.SAM.econ.Economic": [[21, 2, 1, "", "PYSAM"], [21, 3, 1, "", "assign_inputs"], [21, 4, 1, "", "attr_dict"], [21, 3, 1, "", "collect_outputs"], [21, 3, 1, "", "default"], [21, 3, 1, "", "drop_leap"], [21, 3, 1, "", "ensure_res_len"], [21, 3, 1, "", "execute"], [21, 3, 1, "", "flip_actual_irr"], [21, 3, 1, "", "get_sam_res"], [21, 3, 1, "", "get_time_interval"], [21, 3, 1, "", "gross_revenue"], [21, 4, 1, "", "input_list"], [21, 3, 1, "", "lcoe_fcr"], [21, 3, 1, "", "lcoe_nom"], [21, 3, 1, "", "lcoe_real"], [21, 3, 1, "", "make_datetime"], [21, 4, 1, "", "meta"], [21, 4, 1, "", "module"], [21, 3, 1, "", "npv"], [21, 3, 1, "", "outputs_to_utc_arr"], [21, 3, 1, "", "ppa_price"], [21, 4, 1, "", "pysam"], [21, 3, 1, "", "reV_run"], [21, 4, 1, "", "site"]], "reV.SAM.econ.LCOE": [[22, 2, 1, "", "PYSAM"], [22, 3, 1, "", "assign_inputs"], [22, 4, 1, "", "attr_dict"], [22, 3, 1, "", "collect_outputs"], [22, 3, 1, "", "default"], [22, 3, 1, "", "drop_leap"], [22, 3, 1, "", "ensure_res_len"], [22, 3, 1, "", "execute"], [22, 3, 1, "", "flip_actual_irr"], [22, 3, 1, "", "get_sam_res"], [22, 3, 1, "", "get_time_interval"], [22, 3, 1, "", "gross_revenue"], [22, 4, 1, "", "input_list"], [22, 3, 1, "", "lcoe_fcr"], [22, 3, 1, "", "lcoe_nom"], [22, 3, 1, "", "lcoe_real"], [22, 3, 1, "", "make_datetime"], [22, 4, 1, "", "meta"], [22, 4, 1, "", "module"], [22, 3, 1, "", "npv"], [22, 3, 1, "", "outputs_to_utc_arr"], [22, 3, 1, "", "ppa_price"], [22, 4, 1, "", "pysam"], [22, 3, 1, "", "reV_run"], [22, 4, 1, "", "site"]], "reV.SAM.econ.SingleOwner": [[23, 2, 1, "", "PYSAM"], [23, 3, 1, "", "assign_inputs"], [23, 4, 1, "", "attr_dict"], [23, 3, 1, "", "collect_outputs"], [23, 3, 1, "", "default"], [23, 3, 1, "", "drop_leap"], [23, 3, 1, "", "ensure_res_len"], [23, 3, 1, "", "execute"], [23, 3, 1, "", "flip_actual_irr"], [23, 3, 1, "", "get_sam_res"], [23, 3, 1, "", "get_time_interval"], [23, 3, 1, "", "gross_revenue"], [23, 4, 1, "", "input_list"], [23, 3, 1, "", "lcoe_fcr"], [23, 3, 1, "", "lcoe_nom"], [23, 3, 1, "", "lcoe_real"], [23, 3, 1, "", "make_datetime"], [23, 4, 1, "", "meta"], [23, 4, 1, "", "module"], [23, 3, 1, "", "npv"], [23, 3, 1, "", "outputs_to_utc_arr"], [23, 3, 1, "", "ppa_price"], [23, 4, 1, "", "pysam"], [23, 3, 1, "", "reV_run"], [23, 4, 1, "", "site"]], "reV.SAM.generation": [[25, 1, 1, "", "AbstractSamGeneration"], [26, 1, 1, "", "AbstractSamGenerationFromWeatherFile"], [27, 1, 1, "", "AbstractSamPv"], [28, 1, 1, "", "AbstractSamSolar"], [29, 1, 1, "", "AbstractSamWind"], [30, 1, 1, "", "Geothermal"], [31, 1, 1, "", "LinearDirectSteam"], [32, 1, 1, "", "MhkWave"], [33, 1, 1, "", "PvSamv1"], [34, 1, 1, "", "PvWattsv5"], [35, 1, 1, "", "PvWattsv7"], [36, 1, 1, "", "PvWattsv8"], [37, 1, 1, "", "SolarWaterHeat"], [38, 1, 1, "", "TcsMoltenSalt"], [39, 1, 1, "", "TroughPhysicalHeat"], [40, 1, 1, "", "WindPower"], [41, 1, 1, "", "WindPowerPD"]], "reV.SAM.generation.AbstractSamGeneration": [[25, 2, 1, "", "OUTAGE_CONFIG_KEY"], [25, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [25, 2, 1, "", "PYSAM"], [25, 3, 1, "", "add_scheduled_losses"], [25, 3, 1, "", "annual_energy"], [25, 3, 1, "", "assign_inputs"], [25, 4, 1, "", "attr_dict"], [25, 3, 1, "", "cf_mean"], [25, 3, 1, "", "cf_profile"], [25, 3, 1, "", "check_resource_data"], [25, 3, 1, "", "collect_outputs"], [25, 3, 1, "", "default"], [25, 3, 1, "", "drop_leap"], [25, 3, 1, "", "energy_yield"], [25, 3, 1, "", "ensure_res_len"], [25, 3, 1, "", "execute"], [25, 3, 1, "", "gen_profile"], [25, 3, 1, "", "get_sam_res"], [25, 3, 1, "", "get_time_interval"], [25, 4, 1, "", "has_timezone"], [25, 4, 1, "", "input_list"], [25, 3, 1, "", "make_datetime"], [25, 4, 1, "", "meta"], [25, 4, 1, "", "module"], [25, 4, 1, "", "outage_seed"], [25, 3, 1, "", "outputs_to_utc_arr"], [25, 4, 1, "", "pysam"], [25, 3, 1, "", "reV_run"], [25, 3, 1, "", "run"], [25, 3, 1, "", "run_gen_and_econ"], [25, 3, 1, "", "set_resource_data"], [25, 4, 1, "", "site"], [25, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.AbstractSamGenerationFromWeatherFile": [[26, 2, 1, "", "OUTAGE_CONFIG_KEY"], [26, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [26, 2, 1, "", "PYSAM"], [26, 4, 1, "", "PYSAM_WEATHER_TAG"], [26, 3, 1, "", "add_scheduled_losses"], [26, 3, 1, "", "annual_energy"], [26, 3, 1, "", "assign_inputs"], [26, 4, 1, "", "attr_dict"], [26, 3, 1, "", "cf_mean"], [26, 3, 1, "", "cf_profile"], [26, 3, 1, "", "check_resource_data"], [26, 3, 1, "", "collect_outputs"], [26, 3, 1, "", "default"], [26, 3, 1, "", "drop_leap"], [26, 3, 1, "", "energy_yield"], [26, 3, 1, "", "ensure_res_len"], [26, 3, 1, "", "execute"], [26, 3, 1, "", "gen_profile"], [26, 3, 1, "", "get_sam_res"], [26, 3, 1, "", "get_time_interval"], [26, 4, 1, "", "has_timezone"], [26, 4, 1, "", "input_list"], [26, 3, 1, "", "make_datetime"], [26, 4, 1, "", "meta"], [26, 4, 1, "", "module"], [26, 4, 1, "", "outage_seed"], [26, 3, 1, "", "outputs_to_utc_arr"], [26, 4, 1, "", "pysam"], [26, 3, 1, "", "reV_run"], [26, 3, 1, "", "run"], [26, 3, 1, "", "run_gen_and_econ"], [26, 3, 1, "", "set_resource_data"], [26, 4, 1, "", "site"], [26, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.AbstractSamPv": [[27, 2, 1, "", "OUTAGE_CONFIG_KEY"], [27, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [27, 2, 1, "", "PYSAM"], [27, 3, 1, "", "ac"], [27, 3, 1, "", "add_scheduled_losses"], [27, 3, 1, "", "agg_albedo"], [27, 3, 1, "", "annual_energy"], [27, 3, 1, "", "assign_inputs"], [27, 4, 1, "", "attr_dict"], [27, 3, 1, "", "cf_mean"], [27, 3, 1, "", "cf_mean_ac"], [27, 3, 1, "", "cf_profile"], [27, 3, 1, "", "cf_profile_ac"], [27, 3, 1, "", "check_resource_data"], [27, 3, 1, "", "clipped_power"], [27, 3, 1, "", "collect_outputs"], [27, 3, 1, "", "dc"], [27, 3, 1, "", "default"], [27, 3, 1, "", "drop_leap"], [27, 3, 1, "", "energy_yield"], [27, 3, 1, "", "ensure_res_len"], [27, 3, 1, "", "execute"], [27, 3, 1, "", "gen_profile"], [27, 3, 1, "", "get_sam_res"], [27, 3, 1, "", "get_time_interval"], [27, 4, 1, "", "has_timezone"], [27, 4, 1, "", "input_list"], [27, 3, 1, "", "make_datetime"], [27, 4, 1, "", "meta"], [27, 4, 1, "", "module"], [27, 4, 1, "", "outage_seed"], [27, 3, 1, "", "outputs_to_utc_arr"], [27, 4, 1, "", "pysam"], [27, 3, 1, "", "reV_run"], [27, 3, 1, "", "run"], [27, 3, 1, "", "run_gen_and_econ"], [27, 3, 1, "", "set_latitude_tilt_az"], [27, 3, 1, "", "set_resource_data"], [27, 4, 1, "", "site"], [27, 3, 1, "", "system_capacity_ac"], [27, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.AbstractSamSolar": [[28, 2, 1, "", "OUTAGE_CONFIG_KEY"], [28, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [28, 2, 1, "", "PYSAM"], [28, 3, 1, "", "add_scheduled_losses"], [28, 3, 1, "", "agg_albedo"], [28, 3, 1, "", "annual_energy"], [28, 3, 1, "", "assign_inputs"], [28, 4, 1, "", "attr_dict"], [28, 3, 1, "", "cf_mean"], [28, 3, 1, "", "cf_profile"], [28, 3, 1, "", "check_resource_data"], [28, 3, 1, "", "collect_outputs"], [28, 3, 1, "", "default"], [28, 3, 1, "", "drop_leap"], [28, 3, 1, "", "energy_yield"], [28, 3, 1, "", "ensure_res_len"], [28, 3, 1, "", "execute"], [28, 3, 1, "", "gen_profile"], [28, 3, 1, "", "get_sam_res"], [28, 3, 1, "", "get_time_interval"], [28, 4, 1, "", "has_timezone"], [28, 4, 1, "", "input_list"], [28, 3, 1, "", "make_datetime"], [28, 4, 1, "", "meta"], [28, 4, 1, "", "module"], [28, 4, 1, "", "outage_seed"], [28, 3, 1, "", "outputs_to_utc_arr"], [28, 4, 1, "", "pysam"], [28, 3, 1, "", "reV_run"], [28, 3, 1, "", "run"], [28, 3, 1, "", "run_gen_and_econ"], [28, 3, 1, "", "set_resource_data"], [28, 4, 1, "", "site"], [28, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.AbstractSamWind": [[29, 2, 1, "", "OUTAGE_CONFIG_KEY"], [29, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [29, 2, 1, "", "POWER_CURVE_CONFIG_KEY"], [29, 2, 1, "", "PYSAM"], [29, 3, 1, "", "add_power_curve_losses"], [29, 3, 1, "", "add_scheduled_losses"], [29, 3, 1, "", "annual_energy"], [29, 3, 1, "", "assign_inputs"], [29, 4, 1, "", "attr_dict"], [29, 3, 1, "", "cf_mean"], [29, 3, 1, "", "cf_profile"], [29, 3, 1, "", "check_resource_data"], [29, 3, 1, "", "collect_outputs"], [29, 3, 1, "", "default"], [29, 3, 1, "", "drop_leap"], [29, 3, 1, "", "energy_yield"], [29, 3, 1, "", "ensure_res_len"], [29, 3, 1, "", "execute"], [29, 3, 1, "", "gen_profile"], [29, 3, 1, "", "get_sam_res"], [29, 3, 1, "", "get_time_interval"], [29, 4, 1, "", "has_timezone"], [29, 4, 1, "", "input_list"], [29, 4, 1, "", "input_power_curve"], [29, 3, 1, "", "make_datetime"], [29, 4, 1, "", "meta"], [29, 4, 1, "", "module"], [29, 4, 1, "", "outage_seed"], [29, 3, 1, "", "outputs_to_utc_arr"], [29, 4, 1, "", "pysam"], [29, 3, 1, "", "reV_run"], [29, 3, 1, "", "run"], [29, 3, 1, "", "run_gen_and_econ"], [29, 3, 1, "", "set_resource_data"], [29, 4, 1, "", "site"], [29, 3, 1, "", "tz_elev_check"], [29, 3, 1, "", "wind_resource_from_input"]], "reV.SAM.generation.Geothermal": [[30, 2, 1, "", "OUTAGE_CONFIG_KEY"], [30, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [30, 2, 1, "", "PYSAM"], [30, 3, 1, "", "add_scheduled_losses"], [30, 3, 1, "", "annual_energy"], [30, 3, 1, "", "assign_inputs"], [30, 4, 1, "", "attr_dict"], [30, 3, 1, "", "cf_mean"], [30, 3, 1, "", "cf_profile"], [30, 3, 1, "", "check_resource_data"], [30, 3, 1, "", "collect_outputs"], [30, 3, 1, "", "default"], [30, 3, 1, "", "drop_leap"], [30, 3, 1, "", "energy_yield"], [30, 3, 1, "", "ensure_res_len"], [30, 3, 1, "", "execute"], [30, 3, 1, "", "gen_profile"], [30, 3, 1, "", "get_sam_res"], [30, 3, 1, "", "get_time_interval"], [30, 4, 1, "", "has_timezone"], [30, 4, 1, "", "input_list"], [30, 3, 1, "", "make_datetime"], [30, 4, 1, "", "meta"], [30, 4, 1, "", "module"], [30, 4, 1, "", "outage_seed"], [30, 3, 1, "", "outputs_to_utc_arr"], [30, 4, 1, "", "pysam"], [30, 3, 1, "", "reV_run"], [30, 3, 1, "", "run"], [30, 3, 1, "", "run_gen_and_econ"], [30, 3, 1, "", "set_resource_data"], [30, 4, 1, "", "site"], [30, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.LinearDirectSteam": [[31, 2, 1, "", "OUTAGE_CONFIG_KEY"], [31, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [31, 2, 1, "", "PYSAM"], [31, 3, 1, "", "add_scheduled_losses"], [31, 3, 1, "", "annual_energy"], [31, 3, 1, "", "assign_inputs"], [31, 4, 1, "", "attr_dict"], [31, 3, 1, "", "cf_mean"], [31, 3, 1, "", "cf_profile"], [31, 3, 1, "", "check_resource_data"], [31, 3, 1, "", "collect_outputs"], [31, 3, 1, "", "default"], [31, 3, 1, "", "drop_leap"], [31, 3, 1, "", "energy_yield"], [31, 3, 1, "", "ensure_res_len"], [31, 3, 1, "", "execute"], [31, 3, 1, "", "gen_profile"], [31, 3, 1, "", "get_sam_res"], [31, 3, 1, "", "get_time_interval"], [31, 4, 1, "", "has_timezone"], [31, 4, 1, "", "input_list"], [31, 3, 1, "", "make_datetime"], [31, 4, 1, "", "meta"], [31, 4, 1, "", "module"], [31, 4, 1, "", "outage_seed"], [31, 3, 1, "", "outputs_to_utc_arr"], [31, 4, 1, "", "pysam"], [31, 3, 1, "", "reV_run"], [31, 3, 1, "", "run"], [31, 3, 1, "", "run_gen_and_econ"], [31, 3, 1, "", "set_resource_data"], [31, 4, 1, "", "site"], [31, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.MhkWave": [[32, 2, 1, "", "OUTAGE_CONFIG_KEY"], [32, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [32, 2, 1, "", "PYSAM"], [32, 3, 1, "", "add_scheduled_losses"], [32, 3, 1, "", "annual_energy"], [32, 3, 1, "", "assign_inputs"], [32, 4, 1, "", "attr_dict"], [32, 3, 1, "", "cf_mean"], [32, 3, 1, "", "cf_profile"], [32, 3, 1, "", "check_resource_data"], [32, 3, 1, "", "collect_outputs"], [32, 3, 1, "", "default"], [32, 3, 1, "", "drop_leap"], [32, 3, 1, "", "energy_yield"], [32, 3, 1, "", "ensure_res_len"], [32, 3, 1, "", "execute"], [32, 3, 1, "", "gen_profile"], [32, 3, 1, "", "get_sam_res"], [32, 3, 1, "", "get_time_interval"], [32, 4, 1, "", "has_timezone"], [32, 4, 1, "", "input_list"], [32, 3, 1, "", "make_datetime"], [32, 4, 1, "", "meta"], [32, 4, 1, "", "module"], [32, 4, 1, "", "outage_seed"], [32, 3, 1, "", "outputs_to_utc_arr"], [32, 4, 1, "", "pysam"], [32, 3, 1, "", "reV_run"], [32, 3, 1, "", "run"], [32, 3, 1, "", "run_gen_and_econ"], [32, 3, 1, "", "set_resource_data"], [32, 4, 1, "", "site"], [32, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.PvSamv1": [[33, 2, 1, "", "OUTAGE_CONFIG_KEY"], [33, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [33, 2, 1, "", "PYSAM"], [33, 3, 1, "", "ac"], [33, 3, 1, "", "add_scheduled_losses"], [33, 3, 1, "", "agg_albedo"], [33, 3, 1, "", "annual_energy"], [33, 3, 1, "", "assign_inputs"], [33, 4, 1, "", "attr_dict"], [33, 3, 1, "", "cf_mean"], [33, 3, 1, "", "cf_mean_ac"], [33, 3, 1, "", "cf_profile"], [33, 3, 1, "", "cf_profile_ac"], [33, 3, 1, "", "check_resource_data"], [33, 3, 1, "", "clipped_power"], [33, 3, 1, "", "collect_outputs"], [33, 3, 1, "", "dc"], [33, 3, 1, "", "default"], [33, 3, 1, "", "drop_leap"], [33, 3, 1, "", "energy_yield"], [33, 3, 1, "", "ensure_res_len"], [33, 3, 1, "", "execute"], [33, 3, 1, "", "gen_profile"], [33, 3, 1, "", "get_sam_res"], [33, 3, 1, "", "get_time_interval"], [33, 4, 1, "", "has_timezone"], [33, 4, 1, "", "input_list"], [33, 3, 1, "", "make_datetime"], [33, 4, 1, "", "meta"], [33, 4, 1, "", "module"], [33, 4, 1, "", "outage_seed"], [33, 3, 1, "", "outputs_to_utc_arr"], [33, 4, 1, "", "pysam"], [33, 3, 1, "", "reV_run"], [33, 3, 1, "", "run"], [33, 3, 1, "", "run_gen_and_econ"], [33, 3, 1, "", "set_latitude_tilt_az"], [33, 3, 1, "", "set_resource_data"], [33, 4, 1, "", "site"], [33, 3, 1, "", "system_capacity_ac"], [33, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.PvWattsv5": [[34, 2, 1, "", "OUTAGE_CONFIG_KEY"], [34, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [34, 2, 1, "", "PYSAM"], [34, 3, 1, "", "ac"], [34, 3, 1, "", "add_scheduled_losses"], [34, 3, 1, "", "agg_albedo"], [34, 3, 1, "", "annual_energy"], [34, 3, 1, "", "assign_inputs"], [34, 4, 1, "", "attr_dict"], [34, 3, 1, "", "cf_mean"], [34, 3, 1, "", "cf_mean_ac"], [34, 3, 1, "", "cf_profile"], [34, 3, 1, "", "cf_profile_ac"], [34, 3, 1, "", "check_resource_data"], [34, 3, 1, "", "clipped_power"], [34, 3, 1, "", "collect_outputs"], [34, 3, 1, "", "dc"], [34, 3, 1, "", "default"], [34, 3, 1, "", "drop_leap"], [34, 3, 1, "", "energy_yield"], [34, 3, 1, "", "ensure_res_len"], [34, 3, 1, "", "execute"], [34, 3, 1, "", "gen_profile"], [34, 3, 1, "", "get_sam_res"], [34, 3, 1, "", "get_time_interval"], [34, 4, 1, "", "has_timezone"], [34, 4, 1, "", "input_list"], [34, 3, 1, "", "make_datetime"], [34, 4, 1, "", "meta"], [34, 4, 1, "", "module"], [34, 4, 1, "", "outage_seed"], [34, 3, 1, "", "outputs_to_utc_arr"], [34, 4, 1, "", "pysam"], [34, 3, 1, "", "reV_run"], [34, 3, 1, "", "run"], [34, 3, 1, "", "run_gen_and_econ"], [34, 3, 1, "", "set_latitude_tilt_az"], [34, 3, 1, "", "set_resource_data"], [34, 4, 1, "", "site"], [34, 3, 1, "", "system_capacity_ac"], [34, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.PvWattsv7": [[35, 2, 1, "", "OUTAGE_CONFIG_KEY"], [35, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [35, 2, 1, "", "PYSAM"], [35, 3, 1, "", "ac"], [35, 3, 1, "", "add_scheduled_losses"], [35, 3, 1, "", "agg_albedo"], [35, 3, 1, "", "annual_energy"], [35, 3, 1, "", "assign_inputs"], [35, 4, 1, "", "attr_dict"], [35, 3, 1, "", "cf_mean"], [35, 3, 1, "", "cf_mean_ac"], [35, 3, 1, "", "cf_profile"], [35, 3, 1, "", "cf_profile_ac"], [35, 3, 1, "", "check_resource_data"], [35, 3, 1, "", "clipped_power"], [35, 3, 1, "", "collect_outputs"], [35, 3, 1, "", "dc"], [35, 3, 1, "", "default"], [35, 3, 1, "", "drop_leap"], [35, 3, 1, "", "energy_yield"], [35, 3, 1, "", "ensure_res_len"], [35, 3, 1, "", "execute"], [35, 3, 1, "", "gen_profile"], [35, 3, 1, "", "get_sam_res"], [35, 3, 1, "", "get_time_interval"], [35, 4, 1, "", "has_timezone"], [35, 4, 1, "", "input_list"], [35, 3, 1, "", "make_datetime"], [35, 4, 1, "", "meta"], [35, 4, 1, "", "module"], [35, 4, 1, "", "outage_seed"], [35, 3, 1, "", "outputs_to_utc_arr"], [35, 4, 1, "", "pysam"], [35, 3, 1, "", "reV_run"], [35, 3, 1, "", "run"], [35, 3, 1, "", "run_gen_and_econ"], [35, 3, 1, "", "set_latitude_tilt_az"], [35, 3, 1, "", "set_resource_data"], [35, 4, 1, "", "site"], [35, 3, 1, "", "system_capacity_ac"], [35, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.PvWattsv8": [[36, 2, 1, "", "OUTAGE_CONFIG_KEY"], [36, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [36, 2, 1, "", "PYSAM"], [36, 3, 1, "", "ac"], [36, 3, 1, "", "add_scheduled_losses"], [36, 3, 1, "", "agg_albedo"], [36, 3, 1, "", "annual_energy"], [36, 3, 1, "", "assign_inputs"], [36, 4, 1, "", "attr_dict"], [36, 3, 1, "", "cf_mean"], [36, 3, 1, "", "cf_mean_ac"], [36, 3, 1, "", "cf_profile"], [36, 3, 1, "", "cf_profile_ac"], [36, 3, 1, "", "check_resource_data"], [36, 3, 1, "", "clipped_power"], [36, 3, 1, "", "collect_outputs"], [36, 3, 1, "", "dc"], [36, 3, 1, "", "default"], [36, 3, 1, "", "drop_leap"], [36, 3, 1, "", "energy_yield"], [36, 3, 1, "", "ensure_res_len"], [36, 3, 1, "", "execute"], [36, 3, 1, "", "gen_profile"], [36, 3, 1, "", "get_sam_res"], [36, 3, 1, "", "get_time_interval"], [36, 4, 1, "", "has_timezone"], [36, 4, 1, "", "input_list"], [36, 3, 1, "", "make_datetime"], [36, 4, 1, "", "meta"], [36, 4, 1, "", "module"], [36, 4, 1, "", "outage_seed"], [36, 3, 1, "", "outputs_to_utc_arr"], [36, 4, 1, "", "pysam"], [36, 3, 1, "", "reV_run"], [36, 3, 1, "", "run"], [36, 3, 1, "", "run_gen_and_econ"], [36, 3, 1, "", "set_latitude_tilt_az"], [36, 3, 1, "", "set_resource_data"], [36, 4, 1, "", "site"], [36, 3, 1, "", "system_capacity_ac"], [36, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.SolarWaterHeat": [[37, 2, 1, "", "OUTAGE_CONFIG_KEY"], [37, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [37, 2, 1, "", "PYSAM"], [37, 3, 1, "", "add_scheduled_losses"], [37, 3, 1, "", "annual_energy"], [37, 3, 1, "", "assign_inputs"], [37, 4, 1, "", "attr_dict"], [37, 3, 1, "", "cf_mean"], [37, 3, 1, "", "cf_profile"], [37, 3, 1, "", "check_resource_data"], [37, 3, 1, "", "collect_outputs"], [37, 3, 1, "", "default"], [37, 3, 1, "", "drop_leap"], [37, 3, 1, "", "energy_yield"], [37, 3, 1, "", "ensure_res_len"], [37, 3, 1, "", "execute"], [37, 3, 1, "", "gen_profile"], [37, 3, 1, "", "get_sam_res"], [37, 3, 1, "", "get_time_interval"], [37, 4, 1, "", "has_timezone"], [37, 4, 1, "", "input_list"], [37, 3, 1, "", "make_datetime"], [37, 4, 1, "", "meta"], [37, 4, 1, "", "module"], [37, 4, 1, "", "outage_seed"], [37, 3, 1, "", "outputs_to_utc_arr"], [37, 4, 1, "", "pysam"], [37, 3, 1, "", "reV_run"], [37, 3, 1, "", "run"], [37, 3, 1, "", "run_gen_and_econ"], [37, 3, 1, "", "set_resource_data"], [37, 4, 1, "", "site"], [37, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.TcsMoltenSalt": [[38, 2, 1, "", "OUTAGE_CONFIG_KEY"], [38, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [38, 2, 1, "", "PYSAM"], [38, 3, 1, "", "add_scheduled_losses"], [38, 3, 1, "", "agg_albedo"], [38, 3, 1, "", "annual_energy"], [38, 3, 1, "", "assign_inputs"], [38, 4, 1, "", "attr_dict"], [38, 3, 1, "", "cf_mean"], [38, 3, 1, "", "cf_profile"], [38, 3, 1, "", "check_resource_data"], [38, 3, 1, "", "collect_outputs"], [38, 3, 1, "", "default"], [38, 3, 1, "", "drop_leap"], [38, 3, 1, "", "energy_yield"], [38, 3, 1, "", "ensure_res_len"], [38, 3, 1, "", "execute"], [38, 3, 1, "", "gen_profile"], [38, 3, 1, "", "get_sam_res"], [38, 3, 1, "", "get_time_interval"], [38, 4, 1, "", "has_timezone"], [38, 4, 1, "", "input_list"], [38, 3, 1, "", "make_datetime"], [38, 4, 1, "", "meta"], [38, 4, 1, "", "module"], [38, 4, 1, "", "outage_seed"], [38, 3, 1, "", "outputs_to_utc_arr"], [38, 4, 1, "", "pysam"], [38, 3, 1, "", "reV_run"], [38, 3, 1, "", "run"], [38, 3, 1, "", "run_gen_and_econ"], [38, 3, 1, "", "set_resource_data"], [38, 4, 1, "", "site"], [38, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.TroughPhysicalHeat": [[39, 2, 1, "", "OUTAGE_CONFIG_KEY"], [39, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [39, 2, 1, "", "PYSAM"], [39, 3, 1, "", "add_scheduled_losses"], [39, 3, 1, "", "annual_energy"], [39, 3, 1, "", "assign_inputs"], [39, 4, 1, "", "attr_dict"], [39, 3, 1, "", "cf_mean"], [39, 3, 1, "", "cf_profile"], [39, 3, 1, "", "check_resource_data"], [39, 3, 1, "", "collect_outputs"], [39, 3, 1, "", "default"], [39, 3, 1, "", "drop_leap"], [39, 3, 1, "", "energy_yield"], [39, 3, 1, "", "ensure_res_len"], [39, 3, 1, "", "execute"], [39, 3, 1, "", "gen_profile"], [39, 3, 1, "", "get_sam_res"], [39, 3, 1, "", "get_time_interval"], [39, 4, 1, "", "has_timezone"], [39, 4, 1, "", "input_list"], [39, 3, 1, "", "make_datetime"], [39, 4, 1, "", "meta"], [39, 4, 1, "", "module"], [39, 4, 1, "", "outage_seed"], [39, 3, 1, "", "outputs_to_utc_arr"], [39, 4, 1, "", "pysam"], [39, 3, 1, "", "reV_run"], [39, 3, 1, "", "run"], [39, 3, 1, "", "run_gen_and_econ"], [39, 3, 1, "", "set_resource_data"], [39, 4, 1, "", "site"], [39, 3, 1, "", "tz_elev_check"]], "reV.SAM.generation.WindPower": [[40, 2, 1, "", "OUTAGE_CONFIG_KEY"], [40, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [40, 2, 1, "", "POWER_CURVE_CONFIG_KEY"], [40, 2, 1, "", "PYSAM"], [40, 3, 1, "", "add_power_curve_losses"], [40, 3, 1, "", "add_scheduled_losses"], [40, 3, 1, "", "annual_energy"], [40, 3, 1, "", "assign_inputs"], [40, 4, 1, "", "attr_dict"], [40, 3, 1, "", "cf_mean"], [40, 3, 1, "", "cf_profile"], [40, 3, 1, "", "check_resource_data"], [40, 3, 1, "", "collect_outputs"], [40, 3, 1, "", "default"], [40, 3, 1, "", "drop_leap"], [40, 3, 1, "", "energy_yield"], [40, 3, 1, "", "ensure_res_len"], [40, 3, 1, "", "execute"], [40, 3, 1, "", "gen_profile"], [40, 3, 1, "", "get_sam_res"], [40, 3, 1, "", "get_time_interval"], [40, 4, 1, "", "has_timezone"], [40, 4, 1, "", "input_list"], [40, 4, 1, "", "input_power_curve"], [40, 3, 1, "", "make_datetime"], [40, 4, 1, "", "meta"], [40, 4, 1, "", "module"], [40, 4, 1, "", "outage_seed"], [40, 3, 1, "", "outputs_to_utc_arr"], [40, 4, 1, "", "pysam"], [40, 3, 1, "", "reV_run"], [40, 3, 1, "", "run"], [40, 3, 1, "", "run_gen_and_econ"], [40, 3, 1, "", "set_resource_data"], [40, 4, 1, "", "site"], [40, 3, 1, "", "tz_elev_check"], [40, 3, 1, "", "wind_resource_from_input"]], "reV.SAM.generation.WindPowerPD": [[41, 2, 1, "", "OUTAGE_CONFIG_KEY"], [41, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [41, 2, 1, "", "POWER_CURVE_CONFIG_KEY"], [41, 2, 1, "", "PYSAM"], [41, 3, 1, "", "add_power_curve_losses"], [41, 3, 1, "", "add_scheduled_losses"], [41, 3, 1, "", "annual_energy"], [41, 3, 1, "", "assign_inputs"], [41, 4, 1, "", "attr_dict"], [41, 3, 1, "", "cf_mean"], [41, 3, 1, "", "cf_profile"], [41, 3, 1, "", "check_resource_data"], [41, 3, 1, "", "collect_outputs"], [41, 3, 1, "", "default"], [41, 3, 1, "", "drop_leap"], [41, 3, 1, "", "energy_yield"], [41, 3, 1, "", "ensure_res_len"], [41, 3, 1, "", "execute"], [41, 3, 1, "", "gen_profile"], [41, 3, 1, "", "get_sam_res"], [41, 3, 1, "", "get_time_interval"], [41, 4, 1, "", "has_timezone"], [41, 4, 1, "", "input_list"], [41, 4, 1, "", "input_power_curve"], [41, 3, 1, "", "make_datetime"], [41, 4, 1, "", "meta"], [41, 4, 1, "", "module"], [41, 4, 1, "", "outage_seed"], [41, 3, 1, "", "outputs_to_utc_arr"], [41, 4, 1, "", "pysam"], [41, 3, 1, "", "reV_run"], [41, 3, 1, "", "run"], [41, 3, 1, "", "run_gen_and_econ"], [41, 3, 1, "", "set_resource_data"], [41, 4, 1, "", "site"], [41, 3, 1, "", "tz_elev_check"], [41, 3, 1, "", "wind_resource_from_input"]], "reV.SAM.version_checker": [[43, 1, 1, "", "PySamVersionChecker"]], "reV.SAM.version_checker.PySamVersionChecker": [[43, 4, 1, "", "pysam_version"], [43, 3, 1, "", "run"]], "reV.SAM.windbos": [[45, 1, 1, "", "WindBos"]], "reV.SAM.windbos.WindBos": [[45, 4, 1, "", "bos_cost"], [45, 4, 1, "", "hub_height"], [45, 4, 1, "", "machine_rating"], [45, 4, 1, "", "number_of_turbines"], [45, 4, 1, "", "output"], [45, 3, 1, "", "reV_run"], [45, 4, 1, "", "rotor_diameter"], [45, 4, 1, "", "sales_tax_cost"], [45, 4, 1, "", "sales_tax_mult"], [45, 4, 1, "", "total_installed_cost"], [45, 4, 1, "", "turbine_capital_cost"], [45, 4, 1, "", "turbine_cost"]], "reV.bespoke": [[47, 0, 0, "-", "bespoke"], [52, 0, 0, "-", "cli_bespoke"], [53, 0, 0, "-", "gradient_free"], [55, 0, 0, "-", "pack_turbs"], [58, 0, 0, "-", "place_turbines"], [61, 0, 0, "-", "plotting_functions"]], "reV.bespoke.bespoke": [[48, 1, 1, "", "BespokeMultiPlantData"], [49, 1, 1, "", "BespokeSinglePlant"], [50, 1, 1, "", "BespokeSinglePlantData"], [51, 1, 1, "", "BespokeWindPlants"]], "reV.bespoke.bespoke.BespokeMultiPlantData": [[48, 3, 1, "", "get_preloaded_data_for_gid"]], "reV.bespoke.bespoke.BespokeSinglePlant": [[49, 3, 1, "", "agg_data_layers"], [49, 4, 1, "", "annual_time_indexes"], [49, 3, 1, "", "check_dependencies"], [49, 3, 1, "", "close"], [49, 3, 1, "", "get_lcoe_kwargs"], [49, 3, 1, "", "get_weighted_res_dir"], [49, 3, 1, "", "get_weighted_res_ts"], [49, 3, 1, "", "get_wind_handler"], [49, 4, 1, "", "gid"], [49, 4, 1, "", "hub_height"], [49, 4, 1, "", "include_mask"], [49, 3, 1, "", "initialize_wind_plant_ts"], [49, 4, 1, "", "meta"], [49, 4, 1, "", "original_sam_sys_inputs"], [49, 4, 1, "", "outputs"], [49, 4, 1, "", "pixel_side_length"], [49, 4, 1, "", "plant_optimizer"], [49, 3, 1, "", "recalc_lcoe"], [49, 4, 1, "", "res_df"], [49, 3, 1, "", "run"], [49, 3, 1, "", "run_plant_optimization"], [49, 3, 1, "", "run_wind_plant_ts"], [49, 4, 1, "", "sam_sys_inputs"], [49, 4, 1, "", "sc_point"], [49, 4, 1, "", "wind_dist"], [49, 4, 1, "", "wind_plant_pd"], [49, 4, 1, "", "wind_plant_ts"], [49, 4, 1, "", "years"]], "reV.bespoke.bespoke.BespokeWindPlants": [[51, 4, 1, "", "completed_gids"], [51, 4, 1, "", "gids"], [51, 4, 1, "", "meta"], [51, 4, 1, "", "outputs"], [51, 3, 1, "", "run"], [51, 3, 1, "", "run_parallel"], [51, 3, 1, "", "run_serial"], [51, 3, 1, "", "sam_sys_inputs_with_site_data"], [51, 3, 1, "", "save_outputs"], [51, 4, 1, "", "shape"], [51, 4, 1, "", "slice_lookup"]], "reV.bespoke.gradient_free": [[54, 1, 1, "", "GeneticAlgorithm"]], "reV.bespoke.gradient_free.GeneticAlgorithm": [[54, 3, 1, "", "chromosome_2_variables"], [54, 3, 1, "", "crossover"], [54, 3, 1, "", "initialize_bits"], [54, 3, 1, "", "initialize_design_variables"], [54, 3, 1, "", "initialize_fitness"], [54, 3, 1, "", "initialize_population"], [54, 3, 1, "", "mutate"], [54, 3, 1, "", "optimize_ga"]], "reV.bespoke.pack_turbs": [[56, 1, 1, "", "PackTurbines"], [57, 5, 1, "", "smallest_area_with_tiebreakers"]], "reV.bespoke.pack_turbs.PackTurbines": [[56, 3, 1, "", "clear"], [56, 3, 1, "", "pack_turbines_poly"]], "reV.bespoke.place_turbines": [[59, 1, 1, "", "PlaceTurbines"], [60, 5, 1, "", "none_until_optimized"]], "reV.bespoke.place_turbines.PlaceTurbines": [[59, 4, 1, "", "aep"], [59, 4, 1, "", "area"], [59, 4, 1, "", "capacity"], [59, 4, 1, "", "capacity_density"], [59, 4, 1, "", "capital_cost"], [59, 3, 1, "", "capital_cost_per_kw"], [59, 4, 1, "", "convex_hull"], [59, 4, 1, "", "convex_hull_area"], [59, 4, 1, "", "convex_hull_capacity_density"], [59, 3, 1, "", "define_exclusions"], [59, 4, 1, "", "fixed_charge_rate"], [59, 4, 1, "", "fixed_operating_cost"], [59, 4, 1, "", "full_cell_area"], [59, 4, 1, "", "full_cell_capacity_density"], [59, 3, 1, "", "initialize_packing"], [59, 4, 1, "", "nturbs"], [59, 4, 1, "", "objective"], [59, 3, 1, "", "optimization_objective"], [59, 3, 1, "", "optimize"], [59, 3, 1, "", "place_turbines"], [59, 4, 1, "", "turbine_x"], [59, 4, 1, "", "turbine_y"], [59, 4, 1, "", "variable_operating_cost"]], "reV.bespoke.plotting_functions": [[62, 5, 1, "", "get_xy"], [63, 5, 1, "", "plot_poly"], [64, 5, 1, "", "plot_turbines"], [65, 5, 1, "", "plot_windrose"]], "reV.config": [[68, 0, 0, "-", "base_analysis_config"], [70, 0, 0, "-", "base_config"], [72, 0, 0, "-", "cli_project_points"], [73, 0, 0, "-", "curtailment"], [75, 0, 0, "-", "execution"], [79, 0, 0, "-", "output_request"], [82, 0, 0, "-", "project_points"], [85, 0, 0, "-", "sam_config"]], "reV.config.base_analysis_config": [[69, 1, 1, "", "AnalysisConfig"]], "reV.config.base_analysis_config.AnalysisConfig": [[69, 2, 1, "", "REQUIREMENTS"], [69, 2, 1, "", "STR_REP"], [69, 4, 1, "", "analysis_years"], [69, 3, 1, "", "check_files"], [69, 3, 1, "", "check_overwrite_keys"], [69, 3, 1, "", "clear"], [69, 4, 1, "", "config_dir"], [69, 4, 1, "", "config_keys"], [69, 3, 1, "", "copy"], [69, 4, 1, "", "execution_control"], [69, 3, 1, "", "fromkeys"], [69, 3, 1, "", "get"], [69, 3, 1, "", "items"], [69, 3, 1, "", "keys"], [69, 4, 1, "", "log_directory"], [69, 4, 1, "", "log_level"], [69, 4, 1, "", "name"], [69, 3, 1, "", "pop"], [69, 3, 1, "", "popitem"], [69, 3, 1, "", "resolve_path"], [69, 3, 1, "", "set_self_dict"], [69, 3, 1, "", "setdefault"], [69, 3, 1, "", "str_replace_and_resolve"], [69, 3, 1, "", "update"], [69, 3, 1, "", "values"]], "reV.config.base_config": [[71, 1, 1, "", "BaseConfig"]], "reV.config.base_config.BaseConfig": [[71, 2, 1, "", "REQUIREMENTS"], [71, 2, 1, "", "STR_REP"], [71, 3, 1, "", "check_files"], [71, 3, 1, "", "check_overwrite_keys"], [71, 3, 1, "", "clear"], [71, 4, 1, "", "config_dir"], [71, 4, 1, "", "config_keys"], [71, 3, 1, "", "copy"], [71, 3, 1, "", "fromkeys"], [71, 3, 1, "", "get"], [71, 3, 1, "", "items"], [71, 3, 1, "", "keys"], [71, 4, 1, "", "log_level"], [71, 4, 1, "", "name"], [71, 3, 1, "", "pop"], [71, 3, 1, "", "popitem"], [71, 3, 1, "", "resolve_path"], [71, 3, 1, "", "set_self_dict"], [71, 3, 1, "", "setdefault"], [71, 3, 1, "", "str_replace_and_resolve"], [71, 3, 1, "", "update"], [71, 3, 1, "", "values"]], "reV.config.curtailment": [[74, 1, 1, "", "Curtailment"]], "reV.config.curtailment.Curtailment": [[74, 2, 1, "", "REQUIREMENTS"], [74, 2, 1, "", "STR_REP"], [74, 3, 1, "", "check_files"], [74, 3, 1, "", "check_overwrite_keys"], [74, 3, 1, "", "clear"], [74, 4, 1, "", "config_dir"], [74, 4, 1, "", "config_keys"], [74, 3, 1, "", "copy"], [74, 4, 1, "", "date_range"], [74, 4, 1, "", "dawn_dusk"], [74, 4, 1, "", "equation"], [74, 3, 1, "", "fromkeys"], [74, 3, 1, "", "get"], [74, 3, 1, "", "items"], [74, 3, 1, "", "keys"], [74, 4, 1, "", "log_level"], [74, 4, 1, "", "months"], [74, 4, 1, "", "name"], [74, 3, 1, "", "pop"], [74, 3, 1, "", "popitem"], [74, 4, 1, "", "precipitation"], [74, 4, 1, "", "probability"], [74, 4, 1, "", "random_seed"], [74, 3, 1, "", "resolve_path"], [74, 3, 1, "", "set_self_dict"], [74, 3, 1, "", "setdefault"], [74, 3, 1, "", "str_replace_and_resolve"], [74, 4, 1, "", "temperature"], [74, 3, 1, "", "update"], [74, 3, 1, "", "values"], [74, 4, 1, "", "wind_speed"]], "reV.config.execution": [[76, 1, 1, "", "BaseExecutionConfig"], [77, 1, 1, "", "HPCConfig"], [78, 1, 1, "", "SlurmConfig"]], "reV.config.execution.BaseExecutionConfig": [[76, 2, 1, "", "REQUIREMENTS"], [76, 2, 1, "", "STR_REP"], [76, 3, 1, "", "check_files"], [76, 3, 1, "", "check_overwrite_keys"], [76, 3, 1, "", "clear"], [76, 4, 1, "", "config_dir"], [76, 4, 1, "", "config_keys"], [76, 3, 1, "", "copy"], [76, 3, 1, "", "fromkeys"], [76, 3, 1, "", "get"], [76, 3, 1, "", "items"], [76, 3, 1, "", "keys"], [76, 4, 1, "", "log_level"], [76, 4, 1, "", "max_workers"], [76, 4, 1, "", "memory_utilization_limit"], [76, 4, 1, "", "name"], [76, 4, 1, "", "nodes"], [76, 4, 1, "", "option"], [76, 3, 1, "", "pop"], [76, 3, 1, "", "popitem"], [76, 3, 1, "", "resolve_path"], [76, 3, 1, "", "set_self_dict"], [76, 3, 1, "", "setdefault"], [76, 4, 1, "", "sh_script"], [76, 4, 1, "", "sites_per_worker"], [76, 3, 1, "", "str_replace_and_resolve"], [76, 3, 1, "", "update"], [76, 3, 1, "", "values"]], "reV.config.execution.HPCConfig": [[77, 2, 1, "", "REQUIREMENTS"], [77, 2, 1, "", "STR_REP"], [77, 4, 1, "", "allocation"], [77, 3, 1, "", "check_files"], [77, 3, 1, "", "check_overwrite_keys"], [77, 3, 1, "", "clear"], [77, 4, 1, "", "conda_env"], [77, 4, 1, "", "config_dir"], [77, 4, 1, "", "config_keys"], [77, 3, 1, "", "copy"], [77, 4, 1, "", "feature"], [77, 3, 1, "", "fromkeys"], [77, 3, 1, "", "get"], [77, 3, 1, "", "items"], [77, 3, 1, "", "keys"], [77, 4, 1, "", "log_level"], [77, 4, 1, "", "max_workers"], [77, 4, 1, "", "memory_utilization_limit"], [77, 4, 1, "", "module"], [77, 4, 1, "", "name"], [77, 4, 1, "", "nodes"], [77, 4, 1, "", "option"], [77, 3, 1, "", "pop"], [77, 3, 1, "", "popitem"], [77, 3, 1, "", "resolve_path"], [77, 3, 1, "", "set_self_dict"], [77, 3, 1, "", "setdefault"], [77, 4, 1, "", "sh_script"], [77, 4, 1, "", "sites_per_worker"], [77, 3, 1, "", "str_replace_and_resolve"], [77, 3, 1, "", "update"], [77, 3, 1, "", "values"]], "reV.config.execution.SlurmConfig": [[78, 2, 1, "", "REQUIREMENTS"], [78, 2, 1, "", "STR_REP"], [78, 4, 1, "", "allocation"], [78, 3, 1, "", "check_files"], [78, 3, 1, "", "check_overwrite_keys"], [78, 3, 1, "", "clear"], [78, 4, 1, "", "conda_env"], [78, 4, 1, "", "config_dir"], [78, 4, 1, "", "config_keys"], [78, 3, 1, "", "copy"], [78, 4, 1, "", "feature"], [78, 3, 1, "", "fromkeys"], [78, 3, 1, "", "get"], [78, 3, 1, "", "items"], [78, 3, 1, "", "keys"], [78, 4, 1, "", "log_level"], [78, 4, 1, "", "max_workers"], [78, 4, 1, "", "memory"], [78, 4, 1, "", "memory_utilization_limit"], [78, 4, 1, "", "module"], [78, 4, 1, "", "name"], [78, 4, 1, "", "nodes"], [78, 4, 1, "", "option"], [78, 3, 1, "", "pop"], [78, 3, 1, "", "popitem"], [78, 3, 1, "", "resolve_path"], [78, 3, 1, "", "set_self_dict"], [78, 3, 1, "", "setdefault"], [78, 4, 1, "", "sh_script"], [78, 4, 1, "", "sites_per_worker"], [78, 3, 1, "", "str_replace_and_resolve"], [78, 3, 1, "", "update"], [78, 3, 1, "", "values"], [78, 4, 1, "", "walltime"]], "reV.config.output_request": [[80, 1, 1, "", "OutputRequest"], [81, 1, 1, "", "SAMOutputRequest"]], "reV.config.output_request.OutputRequest": [[80, 3, 1, "", "__add__"], [80, 3, 1, "", "__mul__"], [80, 3, 1, "", "append"], [80, 3, 1, "", "clear"], [80, 3, 1, "", "copy"], [80, 3, 1, "", "count"], [80, 3, 1, "", "extend"], [80, 3, 1, "", "index"], [80, 3, 1, "", "insert"], [80, 3, 1, "", "pop"], [80, 3, 1, "", "remove"], [80, 3, 1, "", "reverse"], [80, 3, 1, "", "sort"]], "reV.config.output_request.SAMOutputRequest": [[81, 3, 1, "", "__add__"], [81, 3, 1, "", "__mul__"], [81, 3, 1, "", "append"], [81, 3, 1, "", "clear"], [81, 3, 1, "", "copy"], [81, 3, 1, "", "count"], [81, 3, 1, "", "extend"], [81, 3, 1, "", "index"], [81, 3, 1, "", "insert"], [81, 3, 1, "", "pop"], [81, 3, 1, "", "remove"], [81, 3, 1, "", "reverse"], [81, 3, 1, "", "sort"]], "reV.config.project_points": [[83, 1, 1, "", "PointsControl"], [84, 1, 1, "", "ProjectPoints"]], "reV.config.project_points.PointsControl": [[83, 4, 1, "", "N"], [83, 4, 1, "", "project_points"], [83, 4, 1, "", "sites"], [83, 4, 1, "", "sites_per_split"], [83, 3, 1, "", "split"], [83, 4, 1, "", "split_range"]], "reV.config.project_points.ProjectPoints": [[84, 4, 1, "", "all_sam_input_keys"], [84, 4, 1, "", "curtailment"], [84, 4, 1, "", "d"], [84, 4, 1, "", "df"], [84, 3, 1, "", "get_sites_from_config"], [84, 4, 1, "", "gids"], [84, 4, 1, "", "h"], [84, 3, 1, "", "index"], [84, 3, 1, "", "join_df"], [84, 3, 1, "", "lat_lon_coords"], [84, 3, 1, "", "regions"], [84, 4, 1, "", "sam_config_ids"], [84, 4, 1, "", "sam_config_obj"], [84, 4, 1, "", "sam_inputs"], [84, 4, 1, "", "sites"], [84, 4, 1, "", "sites_as_slice"], [84, 3, 1, "", "split"], [84, 4, 1, "", "tech"]], "reV.config.sam_config": [[86, 1, 1, "", "SAMConfig"], [87, 1, 1, "", "SAMInputsChecker"]], "reV.config.sam_config.SAMConfig": [[86, 2, 1, "", "REQUIREMENTS"], [86, 2, 1, "", "STR_REP"], [86, 4, 1, "", "bifacial"], [86, 3, 1, "", "check_files"], [86, 3, 1, "", "check_overwrite_keys"], [86, 3, 1, "", "clear"], [86, 4, 1, "", "clearsky"], [86, 4, 1, "", "config_dir"], [86, 4, 1, "", "config_keys"], [86, 3, 1, "", "copy"], [86, 4, 1, "", "downscale"], [86, 3, 1, "", "fromkeys"], [86, 3, 1, "", "get"], [86, 4, 1, "", "icing"], [86, 4, 1, "", "inputs"], [86, 3, 1, "", "items"], [86, 3, 1, "", "keys"], [86, 4, 1, "", "log_level"], [86, 4, 1, "", "name"], [86, 3, 1, "", "pop"], [86, 3, 1, "", "popitem"], [86, 3, 1, "", "resolve_path"], [86, 3, 1, "", "set_self_dict"], [86, 3, 1, "", "setdefault"], [86, 3, 1, "", "str_replace_and_resolve"], [86, 4, 1, "", "time_index_step"], [86, 3, 1, "", "update"], [86, 3, 1, "", "values"]], "reV.config.sam_config.SAMInputsChecker": [[87, 3, 1, "", "check"], [87, 3, 1, "", "check_pv"]], "reV.econ": [[89, 0, 0, "-", "cli_econ"], [90, 0, 0, "-", "econ"], [92, 0, 0, "-", "economies_of_scale"], [94, 0, 0, "-", "utilities"]], "reV.econ.econ": [[91, 1, 1, "", "Econ"]], "reV.econ.econ.Econ": [[91, 2, 1, "", "OPTIONS"], [91, 3, 1, "", "add_site_data_to_pp"], [91, 4, 1, "", "cf_file"], [91, 3, 1, "", "flush"], [91, 3, 1, "", "get_pc"], [91, 3, 1, "", "get_sites_per_worker"], [91, 3, 1, "", "handle_leap_ti"], [91, 4, 1, "", "meta"], [91, 4, 1, "", "out"], [91, 4, 1, "", "out_chunk"], [91, 4, 1, "", "output_request"], [91, 4, 1, "", "points_control"], [91, 4, 1, "", "project_points"], [91, 3, 1, "", "run"], [91, 4, 1, "", "run_attrs"], [91, 4, 1, "", "sam_configs"], [91, 4, 1, "", "sam_metas"], [91, 4, 1, "", "sam_module"], [91, 4, 1, "", "site_data"], [91, 3, 1, "", "site_index"], [91, 4, 1, "", "site_limit"], [91, 4, 1, "", "site_mem"], [91, 4, 1, "", "tech"], [91, 4, 1, "", "time_index"], [91, 3, 1, "", "unpack_futures"], [91, 3, 1, "", "unpack_output"], [91, 4, 1, "", "year"]], "reV.econ.economies_of_scale": [[93, 1, 1, "", "EconomiesOfScale"]], "reV.econ.economies_of_scale.EconomiesOfScale": [[93, 4, 1, "", "aep"], [93, 4, 1, "", "capital_cost_scalar"], [93, 4, 1, "", "fcr"], [93, 4, 1, "", "foc"], [93, 3, 1, "", "is_method"], [93, 3, 1, "", "is_num"], [93, 4, 1, "", "raw_capital_cost"], [93, 4, 1, "", "raw_lcoe"], [93, 4, 1, "", "scaled_capital_cost"], [93, 4, 1, "", "scaled_lcoe"], [93, 4, 1, "", "system_capacity"], [93, 4, 1, "", "vars"], [93, 4, 1, "", "voc"]], "reV.econ.utilities": [[95, 5, 1, "", "lcoe_fcr"]], "reV.generation": [[97, 0, 0, "-", "base"], [99, 0, 0, "-", "cli_gen"], [100, 0, 0, "-", "generation"]], "reV.generation.base": [[98, 1, 1, "", "BaseGen"]], "reV.generation.base.BaseGen": [[98, 3, 1, "", "add_site_data_to_pp"], [98, 3, 1, "", "flush"], [98, 3, 1, "", "get_pc"], [98, 3, 1, "", "get_sites_per_worker"], [98, 3, 1, "", "handle_leap_ti"], [98, 4, 1, "", "meta"], [98, 4, 1, "", "out"], [98, 4, 1, "", "out_chunk"], [98, 4, 1, "", "output_request"], [98, 4, 1, "", "points_control"], [98, 4, 1, "", "project_points"], [98, 4, 1, "", "run_attrs"], [98, 4, 1, "", "sam_configs"], [98, 4, 1, "", "sam_metas"], [98, 4, 1, "", "sam_module"], [98, 4, 1, "", "site_data"], [98, 3, 1, "", "site_index"], [98, 4, 1, "", "site_limit"], [98, 4, 1, "", "site_mem"], [98, 4, 1, "", "tech"], [98, 4, 1, "", "time_index"], [98, 3, 1, "", "unpack_futures"], [98, 3, 1, "", "unpack_output"], [98, 4, 1, "", "year"]], "reV.generation.generation": [[101, 1, 1, "", "Gen"]], "reV.generation.generation.Gen": [[101, 2, 1, "", "OPTIONS"], [101, 3, 1, "", "add_site_data_to_pp"], [101, 3, 1, "", "flush"], [101, 3, 1, "", "get_pc"], [101, 3, 1, "", "get_sites_per_worker"], [101, 3, 1, "", "handle_leap_ti"], [101, 4, 1, "", "lr_res_file"], [101, 4, 1, "", "meta"], [101, 4, 1, "", "out"], [101, 4, 1, "", "out_chunk"], [101, 4, 1, "", "output_request"], [101, 4, 1, "", "points_control"], [101, 4, 1, "", "project_points"], [101, 4, 1, "", "res_file"], [101, 3, 1, "", "run"], [101, 4, 1, "", "run_attrs"], [101, 4, 1, "", "sam_configs"], [101, 4, 1, "", "sam_metas"], [101, 4, 1, "", "sam_module"], [101, 4, 1, "", "site_data"], [101, 3, 1, "", "site_index"], [101, 4, 1, "", "site_limit"], [101, 4, 1, "", "site_mem"], [101, 4, 1, "", "tech"], [101, 4, 1, "", "time_index"], [101, 3, 1, "", "unpack_futures"], [101, 3, 1, "", "unpack_output"], [101, 4, 1, "", "year"]], "reV.handlers": [[103, 0, 0, "-", "cli_collect"], [104, 0, 0, "-", "cli_multi_year"], [105, 0, 0, "-", "exclusions"], [107, 0, 0, "-", "multi_year"], [111, 0, 0, "-", "outputs"], [113, 0, 0, "-", "transmission"]], "reV.handlers.exclusions": [[106, 1, 1, "", "ExclusionLayers"]], "reV.handlers.exclusions.ExclusionLayers": [[106, 4, 1, "", "chunks"], [106, 3, 1, "", "close"], [106, 4, 1, "", "crs"], [106, 3, 1, "", "get_layer_crs"], [106, 3, 1, "", "get_layer_description"], [106, 3, 1, "", "get_layer_profile"], [106, 3, 1, "", "get_layer_values"], [106, 3, 1, "", "get_nodata_value"], [106, 4, 1, "", "h5"], [106, 4, 1, "", "iarr"], [106, 4, 1, "", "latitude"], [106, 4, 1, "", "layers"], [106, 4, 1, "", "longitude"], [106, 4, 1, "", "pixel_area"], [106, 4, 1, "", "profile"], [106, 4, 1, "", "shape"]], "reV.handlers.multi_year": [[108, 1, 1, "", "MultiYear"], [109, 1, 1, "", "MultiYearGroup"], [110, 5, 1, "", "my_collect_groups"]], "reV.handlers.multi_year.MultiYear": [[108, 3, 1, "", "CV"], [108, 4, 1, "", "SAM_configs"], [108, 3, 1, "", "add_dataset"], [108, 4, 1, "", "adders"], [108, 4, 1, "", "attrs"], [108, 4, 1, "", "chunks"], [108, 3, 1, "", "close"], [108, 3, 1, "", "collect"], [108, 3, 1, "", "collect_means"], [108, 3, 1, "", "collect_profiles"], [108, 4, 1, "", "coordinates"], [108, 4, 1, "", "data_version"], [108, 4, 1, "", "datasets"], [108, 3, 1, "", "df_str_decode"], [108, 4, 1, "", "dsets"], [108, 4, 1, "", "dtypes"], [108, 4, 1, "", "full_version_record"], [108, 3, 1, "", "get_SAM_df"], [108, 3, 1, "", "get_attrs"], [108, 3, 1, "", "get_config"], [108, 3, 1, "", "get_dset_properties"], [108, 3, 1, "", "get_meta_arr"], [108, 3, 1, "", "get_scale_factor"], [108, 3, 1, "", "get_units"], [108, 4, 1, "", "global_attrs"], [108, 4, 1, "", "groups"], [108, 4, 1, "", "h5"], [108, 3, 1, "", "init_h5"], [108, 3, 1, "", "is_profile"], [108, 4, 1, "", "lat_lon"], [108, 3, 1, "", "means"], [108, 4, 1, "", "meta"], [108, 3, 1, "", "open_dataset"], [108, 4, 1, "", "package"], [108, 3, 1, "", "parse_source_files_pattern"], [108, 3, 1, "", "pass_through"], [108, 3, 1, "", "preload_SAM"], [108, 4, 1, "", "res_dsets"], [108, 4, 1, "", "resource_datasets"], [108, 4, 1, "", "run_attrs"], [108, 4, 1, "", "scale_factors"], [108, 3, 1, "", "set_configs"], [108, 3, 1, "", "set_version_attr"], [108, 4, 1, "", "shape"], [108, 4, 1, "", "shapes"], [108, 4, 1, "", "source"], [108, 3, 1, "", "stdev"], [108, 4, 1, "", "time_index"], [108, 4, 1, "", "units"], [108, 3, 1, "", "update_dset"], [108, 4, 1, "", "version"], [108, 4, 1, "", "writable"], [108, 3, 1, "", "write_dataset"], [108, 3, 1, "", "write_means"], [108, 3, 1, "", "write_profiles"]], "reV.handlers.multi_year.MultiYearGroup": [[109, 4, 1, "", "dsets"], [109, 4, 1, "", "name"], [109, 4, 1, "", "pass_through_dsets"], [109, 4, 1, "", "source_files"]], "reV.handlers.outputs": [[112, 1, 1, "", "Outputs"]], "reV.handlers.outputs.Outputs": [[112, 4, 1, "", "SAM_configs"], [112, 3, 1, "", "add_dataset"], [112, 4, 1, "", "adders"], [112, 4, 1, "", "attrs"], [112, 4, 1, "", "chunks"], [112, 3, 1, "", "close"], [112, 4, 1, "", "coordinates"], [112, 4, 1, "", "data_version"], [112, 4, 1, "", "datasets"], [112, 3, 1, "", "df_str_decode"], [112, 4, 1, "", "dsets"], [112, 4, 1, "", "dtypes"], [112, 4, 1, "", "full_version_record"], [112, 3, 1, "", "get_SAM_df"], [112, 3, 1, "", "get_attrs"], [112, 3, 1, "", "get_config"], [112, 3, 1, "", "get_dset_properties"], [112, 3, 1, "", "get_meta_arr"], [112, 3, 1, "", "get_scale_factor"], [112, 3, 1, "", "get_units"], [112, 4, 1, "", "global_attrs"], [112, 4, 1, "", "groups"], [112, 4, 1, "", "h5"], [112, 3, 1, "", "init_h5"], [112, 4, 1, "", "lat_lon"], [112, 4, 1, "", "meta"], [112, 3, 1, "", "open_dataset"], [112, 4, 1, "", "package"], [112, 3, 1, "", "preload_SAM"], [112, 4, 1, "", "res_dsets"], [112, 4, 1, "", "resource_datasets"], [112, 4, 1, "", "run_attrs"], [112, 4, 1, "", "scale_factors"], [112, 3, 1, "", "set_configs"], [112, 3, 1, "", "set_version_attr"], [112, 4, 1, "", "shape"], [112, 4, 1, "", "shapes"], [112, 4, 1, "", "source"], [112, 4, 1, "", "time_index"], [112, 4, 1, "", "units"], [112, 3, 1, "", "update_dset"], [112, 4, 1, "", "version"], [112, 4, 1, "", "writable"], [112, 3, 1, "", "write_dataset"], [112, 3, 1, "", "write_means"], [112, 3, 1, "", "write_profiles"]], "reV.handlers.transmission": [[114, 1, 1, "", "TransmissionCosts"], [115, 1, 1, "", "TransmissionFeatures"]], "reV.handlers.transmission.TransmissionCosts": [[114, 3, 1, "", "available_capacity"], [114, 3, 1, "", "check_availability"], [114, 3, 1, "", "check_feature_dependencies"], [114, 3, 1, "", "connect"], [114, 3, 1, "", "cost"], [114, 3, 1, "", "feature_capacity"], [114, 3, 1, "", "feature_costs"]], "reV.handlers.transmission.TransmissionFeatures": [[115, 3, 1, "", "available_capacity"], [115, 3, 1, "", "check_availability"], [115, 3, 1, "", "check_feature_dependencies"], [115, 3, 1, "", "connect"], [115, 3, 1, "", "cost"], [115, 3, 1, "", "feature_capacity"]], "reV.hybrids": [[117, 0, 0, "-", "cli_hybrids"], [118, 0, 0, "-", "hybrid_methods"], [123, 0, 0, "-", "hybrids"]], "reV.hybrids.hybrid_methods": [[119, 5, 1, "", "aggregate_capacity"], [120, 5, 1, "", "aggregate_capacity_factor"], [121, 5, 1, "", "aggregate_solar_capacity"], [122, 5, 1, "", "aggregate_wind_capacity"]], "reV.hybrids.hybrids": [[124, 1, 1, "", "ColNameFormatter"], [125, 1, 1, "", "Hybridization"], [126, 1, 1, "", "HybridsData"], [127, 1, 1, "", "MetaHybridizer"], [128, 1, 1, "", "RatioColumns"]], "reV.hybrids.hybrids.ColNameFormatter": [[124, 3, 1, "", "fmt"]], "reV.hybrids.hybrids.Hybridization": [[125, 4, 1, "", "hybrid_meta"], [125, 4, 1, "", "hybrid_time_index"], [125, 4, 1, "", "profiles"], [125, 3, 1, "", "run"], [125, 3, 1, "", "run_meta"], [125, 3, 1, "", "run_profiles"], [125, 3, 1, "", "save_profiles"], [125, 4, 1, "", "solar_meta"], [125, 4, 1, "", "solar_time_index"], [125, 4, 1, "", "wind_meta"], [125, 4, 1, "", "wind_time_index"]], "reV.hybrids.hybrids.HybridsData": [[126, 3, 1, "", "contains_col"], [126, 4, 1, "", "hybrid_time_index"], [126, 4, 1, "", "solar_meta"], [126, 4, 1, "", "solar_time_index"], [126, 3, 1, "", "validate"], [126, 4, 1, "", "wind_meta"], [126, 4, 1, "", "wind_time_index"]], "reV.hybrids.hybrids.MetaHybridizer": [[127, 4, 1, "", "hybrid_meta"], [127, 3, 1, "", "hybridize"], [127, 4, 1, "", "solar_profile_indices_map"], [127, 3, 1, "", "validate_input"], [127, 4, 1, "", "wind_profile_indices_map"]], "reV.hybrids.hybrids.RatioColumns": [[128, 3, 1, "", "__add__"], [128, 3, 1, "", "__mul__"], [128, 3, 1, "", "count"], [128, 2, 1, "", "denom"], [128, 2, 1, "", "fixed"], [128, 3, 1, "", "index"], [128, 2, 1, "", "num"]], "reV.losses": [[130, 0, 0, "-", "power_curve"], [142, 0, 0, "-", "scheduled"], [147, 0, 0, "-", "utils"]], "reV.losses.power_curve": [[131, 1, 1, "", "AbstractPowerCurveTransformation"], [132, 1, 1, "", "ExponentialStretching"], [133, 1, 1, "", "HorizontalTranslation"], [134, 1, 1, "", "LinearStretching"], [135, 1, 1, "", "PowerCurve"], [136, 1, 1, "", "PowerCurveLosses"], [137, 1, 1, "", "PowerCurveLossesInput"], [138, 1, 1, "", "PowerCurveLossesMixin"], [139, 1, 1, "", "PowerCurveWindResource"], [140, 6, 1, "", "TRANSFORMATIONS"], [141, 5, 1, "", "adjust_power_curve"]], "reV.losses.power_curve.AbstractPowerCurveTransformation": [[131, 3, 1, "", "apply"], [131, 4, 1, "", "bounds"], [131, 4, 1, "", "optm_bounds"], [131, 2, 1, "", "power_curve"]], "reV.losses.power_curve.ExponentialStretching": [[132, 3, 1, "", "apply"], [132, 4, 1, "", "bounds"], [132, 4, 1, "", "optm_bounds"], [132, 2, 1, "", "power_curve"]], "reV.losses.power_curve.HorizontalTranslation": [[133, 3, 1, "", "apply"], [133, 4, 1, "", "bounds"], [133, 4, 1, "", "optm_bounds"], [133, 2, 1, "", "power_curve"]], "reV.losses.power_curve.LinearStretching": [[134, 3, 1, "", "apply"], [134, 4, 1, "", "bounds"], [134, 4, 1, "", "optm_bounds"], [134, 2, 1, "", "power_curve"]], "reV.losses.power_curve.PowerCurve": [[135, 3, 1, "", "__call__"], [135, 4, 1, "", "cutin_wind_speed"], [135, 4, 1, "", "cutoff_wind_speed"], [135, 2, 1, "", "generation"], [135, 4, 1, "", "rated_power"], [135, 2, 1, "", "wind_speed"]], "reV.losses.power_curve.PowerCurveLosses": [[136, 3, 1, "", "annual_losses_with_transformed_power_curve"], [136, 3, 1, "", "fit"], [136, 2, 1, "", "power_curve"], [136, 4, 1, "", "power_gen_no_losses"], [136, 2, 1, "", "weights"], [136, 2, 1, "", "wind_resource"]], "reV.losses.power_curve.PowerCurveLossesInput": [[137, 2, 1, "", "REQUIRED_KEYS"], [137, 4, 1, "", "target"], [137, 4, 1, "", "transformation"]], "reV.losses.power_curve.PowerCurveLossesMixin": [[138, 2, 1, "", "POWER_CURVE_CONFIG_KEY"], [138, 3, 1, "", "add_power_curve_losses"], [138, 4, 1, "", "input_power_curve"], [138, 3, 1, "", "wind_resource_from_input"]], "reV.losses.power_curve.PowerCurveWindResource": [[139, 3, 1, "", "wind_resource_for_site"], [139, 4, 1, "", "wind_speeds"]], "reV.losses.scheduled": [[143, 1, 1, "", "Outage"], [144, 1, 1, "", "OutageScheduler"], [145, 1, 1, "", "ScheduledLossesMixin"], [146, 1, 1, "", "SingleOutageScheduler"]], "reV.losses.scheduled.Outage": [[143, 2, 1, "", "REQUIRED_KEYS"], [143, 4, 1, "", "allow_outage_overlap"], [143, 4, 1, "", "allowed_months"], [143, 4, 1, "", "count"], [143, 4, 1, "", "duration"], [143, 4, 1, "", "name"], [143, 4, 1, "", "percentage_of_capacity_lost"], [143, 4, 1, "", "total_available_hours"]], "reV.losses.scheduled.OutageScheduler": [[144, 3, 1, "", "calculate"], [144, 2, 1, "", "can_schedule_more"], [144, 2, 1, "", "outages"], [144, 2, 1, "", "seed"], [144, 2, 1, "", "total_losses"]], "reV.losses.scheduled.ScheduledLossesMixin": [[145, 2, 1, "", "OUTAGE_CONFIG_KEY"], [145, 2, 1, "", "OUTAGE_SEED_CONFIG_KEY"], [145, 3, 1, "", "add_scheduled_losses"], [145, 4, 1, "", "outage_seed"]], "reV.losses.scheduled.SingleOutageScheduler": [[146, 2, 1, "", "MAX_ITER"], [146, 3, 1, "", "calculate"], [146, 2, 1, "", "can_schedule_more"], [146, 3, 1, "", "find_random_outage_slice"], [146, 2, 1, "", "outage"], [146, 3, 1, "", "schedule_losses"], [146, 2, 1, "", "scheduler"], [146, 3, 1, "", "update_when_can_schedule"], [146, 3, 1, "", "update_when_can_schedule_from_months"]], "reV.losses.utils": [[148, 5, 1, "", "convert_to_full_month_names"], [149, 5, 1, "", "filter_unknown_month_names"], [150, 5, 1, "", "format_month_name"], [151, 5, 1, "", "full_month_name_from_abbr"], [152, 5, 1, "", "hourly_indices_for_months"], [153, 5, 1, "", "month_index"], [154, 5, 1, "", "month_indices"]], "reV.nrwal": [[156, 0, 0, "-", "cli_nrwal"], [157, 0, 0, "-", "nrwal"]], "reV.nrwal.nrwal": [[158, 1, 1, "", "RevNrwal"]], "reV.nrwal.nrwal.RevNrwal": [[158, 2, 1, "", "DEFAULT_META_COLS"], [158, 4, 1, "", "analysis_gids"], [158, 4, 1, "", "analysis_mask"], [158, 3, 1, "", "check_outputs"], [158, 4, 1, "", "gen_dsets"], [158, 4, 1, "", "meta_out"], [158, 4, 1, "", "meta_source"], [158, 4, 1, "", "outputs"], [158, 3, 1, "", "run"], [158, 3, 1, "", "run_nrwal"], [158, 3, 1, "", "save_raw_dsets"], [158, 4, 1, "", "time_index"], [158, 3, 1, "", "write_meta_to_csv"], [158, 3, 1, "", "write_to_gen_fpath"]], "reV.qa_qc": [[160, 0, 0, "-", "cli_qa_qc"], [162, 0, 0, "-", "qa_qc"], [165, 0, 0, "-", "summary"]], "reV.qa_qc.cli_qa_qc": [[161, 5, 1, "", "cli_qa_qc"]], "reV.qa_qc.qa_qc": [[163, 1, 1, "", "QaQc"], [164, 1, 1, "", "QaQcModule"]], "reV.qa_qc.qa_qc.QaQc": [[163, 3, 1, "", "create_scatter_plots"], [163, 3, 1, "", "exclusions_mask"], [163, 3, 1, "", "h5"], [163, 4, 1, "", "out_dir"], [163, 3, 1, "", "supply_curve"]], "reV.qa_qc.qa_qc.QaQcModule": [[164, 4, 1, "", "area_filter_kernel"], [164, 4, 1, "", "cmap"], [164, 4, 1, "", "columns"], [164, 4, 1, "", "dsets"], [164, 4, 1, "", "excl_dict"], [164, 4, 1, "", "excl_fpath"], [164, 4, 1, "", "fpath"], [164, 4, 1, "", "group"], [164, 4, 1, "", "lcoe"], [164, 4, 1, "", "min_area"], [164, 4, 1, "", "plot_step"], [164, 4, 1, "", "plot_type"], [164, 4, 1, "", "process_size"], [164, 4, 1, "", "sub_dir"]], "reV.qa_qc.summary": [[166, 1, 1, "", "ExclusionsMask"], [167, 1, 1, "", "PlotBase"], [168, 1, 1, "", "SummarizeH5"], [169, 1, 1, "", "SummarizeSupplyCurve"], [170, 1, 1, "", "SummaryPlots"], [171, 1, 1, "", "SupplyCurvePlot"]], "reV.qa_qc.summary.ExclusionsMask": [[166, 4, 1, "", "data"], [166, 3, 1, "", "exclusions_plot"], [166, 3, 1, "", "exclusions_plotly"], [166, 4, 1, "", "mask"], [166, 3, 1, "", "plot"]], "reV.qa_qc.summary.PlotBase": [[167, 4, 1, "", "data"]], "reV.qa_qc.summary.SummarizeH5": [[168, 4, 1, "", "h5_file"], [168, 3, 1, "", "run"], [168, 3, 1, "", "summarize_dset"], [168, 3, 1, "", "summarize_means"]], "reV.qa_qc.summary.SummarizeSupplyCurve": [[169, 3, 1, "", "run"], [169, 4, 1, "", "sc_table"], [169, 3, 1, "", "supply_curve_summary"]], "reV.qa_qc.summary.SummaryPlots": [[170, 4, 1, "", "columns"], [170, 4, 1, "", "data"], [170, 3, 1, "", "dist_plot"], [170, 3, 1, "", "dist_plotly"], [170, 3, 1, "", "scatter"], [170, 3, 1, "", "scatter_all"], [170, 3, 1, "", "scatter_plot"], [170, 3, 1, "", "scatter_plotly"], [170, 4, 1, "", "summary"]], "reV.qa_qc.summary.SupplyCurvePlot": [[171, 4, 1, "", "columns"], [171, 4, 1, "", "data"], [171, 3, 1, "", "plot"], [171, 4, 1, "", "sc_table"], [171, 3, 1, "", "supply_curve_plot"], [171, 3, 1, "", "supply_curve_plotly"]], "reV.rep_profiles": [[173, 0, 0, "-", "cli_rep_profiles"], [174, 0, 0, "-", "rep_profiles"]], "reV.rep_profiles.rep_profiles": [[175, 1, 1, "", "RegionRepProfile"], [176, 1, 1, "", "RepProfiles"], [177, 1, 1, "", "RepProfilesBase"], [178, 1, 1, "", "RepresentativeMethods"]], "reV.rep_profiles.rep_profiles.RegionRepProfile": [[175, 3, 1, "", "get_region_rep_profile"], [175, 4, 1, "", "i_reps"], [175, 4, 1, "", "rep_gen_gids"], [175, 4, 1, "", "rep_profiles"], [175, 4, 1, "", "rep_res_gids"], [175, 4, 1, "", "source_profiles"], [175, 4, 1, "", "weights"]], "reV.rep_profiles.rep_profiles.RepProfiles": [[176, 4, 1, "", "meta"], [176, 4, 1, "", "profiles"], [176, 3, 1, "", "run"], [176, 3, 1, "", "save_profiles"], [176, 4, 1, "", "time_index"]], "reV.rep_profiles.rep_profiles.RepProfilesBase": [[177, 4, 1, "", "meta"], [177, 4, 1, "", "profiles"], [177, 3, 1, "", "run"], [177, 3, 1, "", "save_profiles"], [177, 4, 1, "", "time_index"]], "reV.rep_profiles.rep_profiles.RepresentativeMethods": [[178, 4, 1, "", "err_methods"], [178, 3, 1, "", "mae"], [178, 3, 1, "", "mbe"], [178, 3, 1, "", "meanoid"], [178, 3, 1, "", "medianoid"], [178, 3, 1, "", "nargmin"], [178, 4, 1, "", "rep_methods"], [178, 3, 1, "", "rmse"], [178, 3, 1, "", "run"]], "reV.supply_curve": [[180, 0, 0, "-", "aggregation"], [185, 0, 0, "-", "cli_sc_aggregation"], [186, 0, 0, "-", "cli_supply_curve"], [187, 0, 0, "-", "competitive_wind_farms"], [189, 0, 0, "-", "exclusions"], [194, 0, 0, "-", "extent"], [196, 0, 0, "-", "points"], [201, 0, 0, "-", "sc_aggregation"], [204, 0, 0, "-", "supply_curve"], [206, 0, 0, "-", "tech_mapping"]], "reV.supply_curve.aggregation": [[181, 1, 1, "", "AbstractAggFileHandler"], [182, 1, 1, "", "AggFileHandler"], [183, 1, 1, "", "Aggregation"], [184, 1, 1, "", "BaseAggregation"]], "reV.supply_curve.aggregation.AbstractAggFileHandler": [[181, 3, 1, "", "close"], [181, 4, 1, "", "exclusions"], [181, 4, 1, "", "h5"]], "reV.supply_curve.aggregation.AggFileHandler": [[182, 2, 1, "", "DEFAULT_H5_HANDLER"], [182, 3, 1, "", "close"], [182, 4, 1, "", "exclusions"], [182, 4, 1, "", "h5"]], "reV.supply_curve.aggregation.Aggregation": [[183, 3, 1, "", "aggregate"], [183, 4, 1, "", "gids"], [183, 3, 1, "", "run"], [183, 3, 1, "", "run_parallel"], [183, 3, 1, "", "run_serial"], [183, 3, 1, "", "save_agg_to_h5"], [183, 4, 1, "", "shape"]], "reV.supply_curve.aggregation.BaseAggregation": [[184, 4, 1, "", "gids"], [184, 4, 1, "", "shape"]], "reV.supply_curve.competitive_wind_farms": [[188, 1, 1, "", "CompetitiveWindFarms"]], "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms": [[188, 3, 1, "", "check_sc_gid"], [188, 3, 1, "", "exclude_sc_point_gid"], [188, 3, 1, "", "map_downwind"], [188, 3, 1, "", "map_sc_gid_to_sc_point_gid"], [188, 3, 1, "", "map_sc_point_gid_to_sc_gid"], [188, 3, 1, "", "map_upwind"], [188, 4, 1, "", "mask"], [188, 3, 1, "", "remove_noncompetitive_farm"], [188, 3, 1, "", "run"], [188, 4, 1, "", "sc_gids"], [188, 4, 1, "", "sc_point_gids"]], "reV.supply_curve.exclusions": [[190, 1, 1, "", "ExclusionMask"], [191, 1, 1, "", "ExclusionMaskFromDict"], [192, 1, 1, "", "FrictionMask"], [193, 1, 1, "", "LayerMask"]], "reV.supply_curve.exclusions.ExclusionMask": [[190, 3, 1, "", "add_layer"], [190, 3, 1, "", "close"], [190, 4, 1, "", "excl_h5"], [190, 4, 1, "", "excl_layers"], [190, 4, 1, "", "latitude"], [190, 4, 1, "", "layer_names"], [190, 4, 1, "", "layers"], [190, 4, 1, "", "longitude"], [190, 4, 1, "", "mask"], [190, 4, 1, "", "nodata_lookup"], [190, 3, 1, "", "run"], [190, 4, 1, "", "shape"]], "reV.supply_curve.exclusions.ExclusionMaskFromDict": [[191, 3, 1, "", "add_layer"], [191, 3, 1, "", "close"], [191, 4, 1, "", "excl_h5"], [191, 4, 1, "", "excl_layers"], [191, 3, 1, "", "extract_inclusion_mask"], [191, 4, 1, "", "latitude"], [191, 4, 1, "", "layer_names"], [191, 4, 1, "", "layers"], [191, 4, 1, "", "longitude"], [191, 4, 1, "", "mask"], [191, 4, 1, "", "nodata_lookup"], [191, 3, 1, "", "run"], [191, 4, 1, "", "shape"]], "reV.supply_curve.exclusions.FrictionMask": [[192, 3, 1, "", "add_layer"], [192, 3, 1, "", "close"], [192, 4, 1, "", "excl_h5"], [192, 4, 1, "", "excl_layers"], [192, 4, 1, "", "latitude"], [192, 4, 1, "", "layer_names"], [192, 4, 1, "", "layers"], [192, 4, 1, "", "longitude"], [192, 4, 1, "", "mask"], [192, 4, 1, "", "nodata_lookup"], [192, 3, 1, "", "run"], [192, 4, 1, "", "shape"]], "reV.supply_curve.exclusions.LayerMask": [[193, 4, 1, "", "exclude_values"], [193, 4, 1, "", "force_include"], [193, 4, 1, "", "include_values"], [193, 4, 1, "", "include_weights"], [193, 4, 1, "", "mask_type"], [193, 4, 1, "", "max_value"], [193, 4, 1, "", "min_value"], [193, 4, 1, "", "name"]], "reV.supply_curve.extent": [[195, 1, 1, "", "SupplyCurveExtent"]], "reV.supply_curve.extent.SupplyCurveExtent": [[195, 3, 1, "", "close"], [195, 4, 1, "", "col_indices"], [195, 4, 1, "", "cols_of_excl"], [195, 4, 1, "", "excl_col_slices"], [195, 4, 1, "", "excl_cols"], [195, 4, 1, "", "excl_row_slices"], [195, 4, 1, "", "excl_rows"], [195, 4, 1, "", "excl_shape"], [195, 4, 1, "", "exclusions"], [195, 3, 1, "", "get_coord"], [195, 3, 1, "", "get_excl_points"], [195, 3, 1, "", "get_excl_slices"], [195, 3, 1, "", "get_flat_excl_ind"], [195, 3, 1, "", "get_sc_row_col_ind"], [195, 3, 1, "", "get_slice_lookup"], [195, 4, 1, "", "lat_lon"], [195, 4, 1, "", "latitude"], [195, 4, 1, "", "longitude"], [195, 4, 1, "", "n_cols"], [195, 4, 1, "", "n_rows"], [195, 4, 1, "", "points"], [195, 4, 1, "", "resolution"], [195, 4, 1, "", "row_indices"], [195, 4, 1, "", "rows_of_excl"], [195, 4, 1, "", "shape"], [195, 3, 1, "", "valid_sc_points"]], "reV.supply_curve.points": [[197, 1, 1, "", "AbstractSupplyCurvePoint"], [198, 1, 1, "", "AggregationSupplyCurvePoint"], [199, 1, 1, "", "GenerationSupplyCurvePoint"], [200, 1, 1, "", "SupplyCurvePoint"]], "reV.supply_curve.points.AbstractSupplyCurvePoint": [[197, 4, 1, "", "cols"], [197, 3, 1, "", "get_agg_slices"], [197, 4, 1, "", "gid"], [197, 4, 1, "", "resolution"], [197, 4, 1, "", "rows"], [197, 4, 1, "", "sc_point_gid"]], "reV.supply_curve.points.AggregationSupplyCurvePoint": [[198, 3, 1, "", "agg_data_layers"], [198, 3, 1, "", "aggregate"], [198, 4, 1, "", "area"], [198, 4, 1, "", "bool_mask"], [198, 4, 1, "", "centroid"], [198, 3, 1, "", "close"], [198, 4, 1, "", "cols"], [198, 4, 1, "", "country"], [198, 4, 1, "", "county"], [198, 4, 1, "", "elevation"], [198, 3, 1, "", "exclusion_weighted_mean"], [198, 4, 1, "", "exclusions"], [198, 3, 1, "", "get_agg_slices"], [198, 4, 1, "", "gid"], [198, 4, 1, "", "gid_counts"], [198, 4, 1, "", "h5"], [198, 4, 1, "", "h5_gid_set"], [198, 4, 1, "", "include_mask"], [198, 4, 1, "", "include_mask_flat"], [198, 4, 1, "", "latitude"], [198, 4, 1, "", "longitude"], [198, 3, 1, "", "mean_wind_dirs"], [198, 4, 1, "", "n_gids"], [198, 4, 1, "", "offshore"], [198, 4, 1, "", "pixel_area"], [198, 4, 1, "", "resolution"], [198, 4, 1, "", "rows"], [198, 3, 1, "", "run"], [198, 3, 1, "", "sc_mean"], [198, 4, 1, "", "sc_point_gid"], [198, 3, 1, "", "sc_sum"], [198, 4, 1, "", "state"], [198, 4, 1, "", "summary"], [198, 4, 1, "", "timezone"]], "reV.supply_curve.points.GenerationSupplyCurvePoint": [[199, 3, 1, "", "agg_data_layers"], [199, 3, 1, "", "aggregate"], [199, 4, 1, "", "area"], [199, 4, 1, "", "bool_mask"], [199, 4, 1, "", "capacity"], [199, 4, 1, "", "capacity_ac"], [199, 4, 1, "", "centroid"], [199, 3, 1, "", "close"], [199, 4, 1, "", "cols"], [199, 4, 1, "", "country"], [199, 4, 1, "", "county"], [199, 3, 1, "", "economies_of_scale"], [199, 4, 1, "", "elevation"], [199, 3, 1, "", "exclusion_weighted_mean"], [199, 4, 1, "", "exclusions"], [199, 4, 1, "", "friction_data"], [199, 4, 1, "", "gen"], [199, 4, 1, "", "gen_data"], [199, 4, 1, "", "gen_gid_set"], [199, 3, 1, "", "get_agg_slices"], [199, 4, 1, "", "gid"], [199, 4, 1, "", "gid_counts"], [199, 4, 1, "", "h5"], [199, 4, 1, "", "h5_dsets_data"], [199, 4, 1, "", "h5_gid_set"], [199, 4, 1, "", "include_mask"], [199, 4, 1, "", "include_mask_flat"], [199, 4, 1, "", "latitude"], [199, 4, 1, "", "lcoe_data"], [199, 4, 1, "", "longitude"], [199, 4, 1, "", "mean_cf"], [199, 4, 1, "", "mean_friction"], [199, 4, 1, "", "mean_h5_dsets_data"], [199, 4, 1, "", "mean_lcoe"], [199, 4, 1, "", "mean_lcoe_friction"], [199, 4, 1, "", "mean_res"], [199, 3, 1, "", "mean_wind_dirs"], [199, 4, 1, "", "n_gids"], [199, 4, 1, "", "offshore"], [199, 4, 1, "", "pixel_area"], [199, 3, 1, "", "point_summary"], [199, 4, 1, "", "power_density"], [199, 4, 1, "", "power_density_ac"], [199, 4, 1, "", "res_data"], [199, 4, 1, "", "res_gid_set"], [199, 4, 1, "", "resolution"], [199, 4, 1, "", "rows"], [199, 3, 1, "", "run"], [199, 3, 1, "", "sc_mean"], [199, 4, 1, "", "sc_point_gid"], [199, 3, 1, "", "sc_sum"], [199, 4, 1, "", "state"], [199, 3, 1, "", "summarize"], [199, 4, 1, "", "summary"], [199, 4, 1, "", "timezone"]], "reV.supply_curve.points.SupplyCurvePoint": [[200, 3, 1, "", "agg_data_layers"], [200, 3, 1, "", "aggregate"], [200, 4, 1, "", "area"], [200, 4, 1, "", "bool_mask"], [200, 4, 1, "", "centroid"], [200, 3, 1, "", "close"], [200, 4, 1, "", "cols"], [200, 3, 1, "", "exclusion_weighted_mean"], [200, 4, 1, "", "exclusions"], [200, 3, 1, "", "get_agg_slices"], [200, 4, 1, "", "gid"], [200, 4, 1, "", "h5"], [200, 4, 1, "", "include_mask"], [200, 4, 1, "", "include_mask_flat"], [200, 4, 1, "", "latitude"], [200, 4, 1, "", "longitude"], [200, 3, 1, "", "mean_wind_dirs"], [200, 4, 1, "", "n_gids"], [200, 4, 1, "", "pixel_area"], [200, 4, 1, "", "resolution"], [200, 4, 1, "", "rows"], [200, 3, 1, "", "sc_mean"], [200, 4, 1, "", "sc_point_gid"], [200, 3, 1, "", "sc_sum"], [200, 4, 1, "", "summary"]], "reV.supply_curve.sc_aggregation": [[202, 1, 1, "", "SupplyCurveAggFileHandler"], [203, 1, 1, "", "SupplyCurveAggregation"]], "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler": [[202, 3, 1, "", "close"], [202, 4, 1, "", "data_layers"], [202, 4, 1, "", "exclusions"], [202, 4, 1, "", "friction_layer"], [202, 4, 1, "", "gen"], [202, 4, 1, "", "h5"], [202, 4, 1, "", "power_density"]], "reV.supply_curve.sc_aggregation.SupplyCurveAggregation": [[203, 4, 1, "", "gids"], [203, 3, 1, "", "run"], [203, 3, 1, "", "run_parallel"], [203, 3, 1, "", "run_serial"], [203, 4, 1, "", "shape"], [203, 3, 1, "", "summarize"]], "reV.supply_curve.supply_curve": [[205, 1, 1, "", "SupplyCurve"]], "reV.supply_curve.supply_curve.SupplyCurve": [[205, 3, 1, "", "add_sum_cols"], [205, 3, 1, "", "compute_total_lcoe"], [205, 3, 1, "", "full_sort"], [205, 3, 1, "", "run"], [205, 3, 1, "", "simple_sort"]], "reV.supply_curve.tech_mapping": [[207, 1, 1, "", "TechMapping"]], "reV.supply_curve.tech_mapping.TechMapping": [[207, 4, 1, "", "distance_threshold"], [207, 3, 1, "", "map_resource"], [207, 3, 1, "", "map_resource_gids"], [207, 3, 1, "", "run"], [207, 3, 1, "", "save_tech_map"]], "reV.utilities": [[209, 1, 1, "", "ModuleName"], [210, 0, 0, "-", "cli_functions"], [214, 0, 0, "-", "curtailment"], [216, 0, 0, "-", "exceptions"], [259, 5, 1, "", "log_versions"], [260, 0, 0, "-", "pytest_utils"], [264, 0, 0, "-", "slots"]], "reV.utilities.ModuleName": [[209, 3, 1, "", "all_names"]], "reV.utilities.cli_functions": [[211, 5, 1, "", "format_analysis_years"], [212, 5, 1, "", "init_cli_logging"], [213, 5, 1, "", "parse_from_pipeline"]], "reV.utilities.curtailment": [[215, 5, 1, "", "curtail"]], "reV.utilities.exceptions": [[217, 7, 1, "", "CollectionRuntimeError"], [218, 7, 1, "", "CollectionValueError"], [219, 7, 1, "", "CollectionWarning"], [220, 7, 1, "", "ConfigError"], [221, 7, 1, "", "ConfigWarning"], [222, 7, 1, "", "DataShapeError"], [223, 7, 1, "", "EmptySupplyCurvePointError"], [224, 7, 1, "", "ExclusionLayerError"], [225, 7, 1, "", "ExecutionError"], [226, 7, 1, "", "ExtrapolationWarning"], [227, 7, 1, "", "FileInputError"], [228, 7, 1, "", "FileInputWarning"], [229, 7, 1, "", "HandlerKeyError"], [230, 7, 1, "", "HandlerRuntimeError"], [231, 7, 1, "", "HandlerValueError"], [232, 7, 1, "", "HandlerWarning"], [233, 7, 1, "", "InputError"], [234, 7, 1, "", "InputWarning"], [235, 7, 1, "", "JSONError"], [236, 7, 1, "", "MultiFileExclusionError"], [237, 7, 1, "", "NearestNeighborError"], [238, 7, 1, "", "OffshoreWindInputError"], [239, 7, 1, "", "OffshoreWindInputWarning"], [240, 7, 1, "", "OutputWarning"], [241, 7, 1, "", "ParallelExecutionWarning"], [242, 7, 1, "", "PipelineError"], [243, 7, 1, "", "ProjectPointsValueError"], [244, 7, 1, "", "PySAMVersionError"], [245, 7, 1, "", "PySAMVersionWarning"], [246, 7, 1, "", "ResourceError"], [247, 7, 1, "", "SAMExecutionError"], [248, 7, 1, "", "SAMExecutionWarning"], [249, 7, 1, "", "SAMInputError"], [250, 7, 1, "", "SAMInputWarning"], [251, 7, 1, "", "SlurmWarning"], [252, 7, 1, "", "SupplyCurveError"], [253, 7, 1, "", "SupplyCurveInputError"], [254, 7, 1, "", "WhileLoopPackingError"], [255, 7, 1, "", "reVDeprecationWarning"], [256, 7, 1, "", "reVError"], [257, 7, 1, "", "reVLossesValueError"], [258, 7, 1, "", "reVLossesWarning"]], "reV.utilities.pytest_utils": [[261, 5, 1, "", "make_fake_h5_chunks"], [262, 5, 1, "", "pd_date_range"], [263, 5, 1, "", "write_chunk"]], "reV.utilities.slots": [[265, 1, 1, "", "SlottedDict"]], "reV.utilities.slots.SlottedDict": [[265, 3, 1, "", "items"], [265, 3, 1, "", "keys"], [265, 3, 1, "", "update"], [265, 3, 1, "", "values"]], "reV-batch": [[269, 8, 1, "cmdoption-reV-batch-cancel", "--cancel"], [269, 8, 1, "cmdoption-reV-batch-c", "--config_file"], [269, 8, 1, "cmdoption-reV-batch-delete", "--delete"], [269, 8, 1, "cmdoption-reV-batch-dry", "--dry"], [269, 8, 1, "cmdoption-reV-batch-monitor-background", "--monitor-background"], [269, 8, 1, "cmdoption-reV-batch-c", "-c"]], "reV-bespoke": [[270, 8, 1, "cmdoption-reV-bespoke-c", "--config_file"], [270, 8, 1, "cmdoption-reV-bespoke-c", "-c"]], "reV-collect": [[271, 8, 1, "cmdoption-reV-collect-c", "--config_file"], [271, 8, 1, "cmdoption-reV-collect-c", "-c"]], "reV-econ": [[272, 8, 1, "cmdoption-reV-econ-c", "--config_file"], [272, 8, 1, "cmdoption-reV-econ-c", "-c"]], "reV-generation": [[273, 8, 1, "cmdoption-reV-generation-c", "--config_file"], [273, 8, 1, "cmdoption-reV-generation-c", "-c"]], "reV-hybrids": [[274, 8, 1, "cmdoption-reV-hybrids-c", "--config_file"], [274, 8, 1, "cmdoption-reV-hybrids-c", "-c"]], "reV-multiyear": [[275, 8, 1, "cmdoption-reV-multiyear-c", "--config_file"], [275, 8, 1, "cmdoption-reV-multiyear-c", "-c"]], "reV-nrwal": [[276, 8, 1, "cmdoption-reV-nrwal-c", "--config_file"], [276, 8, 1, "cmdoption-reV-nrwal-c", "-c"]], "reV-pipeline": [[277, 8, 1, "cmdoption-reV-pipeline-background", "--background"], [277, 8, 1, "cmdoption-reV-pipeline-cancel", "--cancel"], [277, 8, 1, "cmdoption-reV-pipeline-c", "--config_file"], [277, 8, 1, "cmdoption-reV-pipeline-monitor", "--monitor"], [277, 8, 1, "cmdoption-reV-pipeline-r", "--recursive"], [277, 8, 1, "cmdoption-reV-pipeline-c", "-c"], [277, 8, 1, "cmdoption-reV-pipeline-r", "-r"]], "reV-project-points-from-lat-lons": [[278, 8, 1, "cmdoption-reV-project-points-from-lat-lons-lat_lon_coords", "--lat_lon_coords"], [278, 8, 1, "cmdoption-reV-project-points-from-lat-lons-llf", "--lat_lon_fpath"], [278, 8, 1, "cmdoption-reV-project-points-from-lat-lons-lat_lon_coords", "--llc"], [278, 8, 1, "cmdoption-reV-project-points-from-lat-lons-llf", "-llf"]], "reV-project-points-from-regions": [[278, 8, 1, "cmdoption-reV-project-points-from-regions-r", "--region"], [278, 8, 1, "cmdoption-reV-project-points-from-regions-col", "--region_col"], [278, 8, 1, "cmdoption-reV-project-points-from-regions-regs", "--regions"], [278, 8, 1, "cmdoption-reV-project-points-from-regions-col", "-col"], [278, 8, 1, "cmdoption-reV-project-points-from-regions-r", "-r"], [278, 8, 1, "cmdoption-reV-project-points-from-regions-regs", "-regs"]], "reV-project-points": [[278, 8, 1, "cmdoption-reV-project-points-f", "--fpath"], [278, 8, 1, "cmdoption-reV-project-points-rf", "--res_file"], [278, 8, 1, "cmdoption-reV-project-points-sf", "--sam_file"], [278, 8, 1, "cmdoption-reV-project-points-v", "--verbose"], [278, 8, 1, "cmdoption-reV-project-points-version", "--version"], [278, 8, 1, "cmdoption-reV-project-points-f", "-f"], [278, 8, 1, "cmdoption-reV-project-points-rf", "-rf"], [278, 8, 1, "cmdoption-reV-project-points-sf", "-sf"], [278, 8, 1, "cmdoption-reV-project-points-v", "-v"]], "reV-qa-qc": [[279, 8, 1, "cmdoption-reV-qa-qc-c", "--config_file"], [279, 8, 1, "cmdoption-reV-qa-qc-c", "-c"]], "reV-rep-profiles": [[280, 8, 1, "cmdoption-reV-rep-profiles-c", "--config_file"], [280, 8, 1, "cmdoption-reV-rep-profiles-c", "-c"]], "reV-reset-status": [[281, 8, 1, "cmdoption-reV-reset-status-a", "--after-step"], [281, 8, 1, "cmdoption-reV-reset-status-f", "--force"], [281, 8, 1, "cmdoption-reV-reset-status-a", "-a"], [281, 8, 1, "cmdoption-reV-reset-status-f", "-f"], [281, 8, 1, "cmdoption-reV-reset-status-arg-DIRECTORY", "DIRECTORY"]], "reV-script": [[282, 8, 1, "cmdoption-reV-script-c", "--config_file"], [282, 8, 1, "cmdoption-reV-script-c", "-c"]], "reV-status": [[283, 8, 1, "cmdoption-reV-status-i", "--include"], [283, 8, 1, "cmdoption-reV-status-ps", "--pipe_steps"], [283, 8, 1, "cmdoption-reV-status-r", "--recursive"], [283, 8, 1, "cmdoption-reV-status-s", "--status"], [283, 8, 1, "cmdoption-reV-status-i", "-i"], [283, 8, 1, "cmdoption-reV-status-ps", "-ps"], [283, 8, 1, "cmdoption-reV-status-r", "-r"], [283, 8, 1, "cmdoption-reV-status-s", "-s"], [283, 8, 1, "cmdoption-reV-status-arg-FOLDER", "FOLDER"]], "reV-supply-curve-aggregation": [[285, 8, 1, "cmdoption-reV-supply-curve-aggregation-c", "--config_file"], [285, 8, 1, "cmdoption-reV-supply-curve-aggregation-c", "-c"]], "reV-supply-curve": [[284, 8, 1, "cmdoption-reV-supply-curve-c", "--config_file"], [284, 8, 1, "cmdoption-reV-supply-curve-c", "-c"]], "reV-template-configs": [[286, 8, 1, "cmdoption-reV-template-configs-t", "--type"], [286, 8, 1, "cmdoption-reV-template-configs-t", "-t"], [286, 8, 1, "cmdoption-reV-template-configs-arg-COMMANDS", "COMMANDS"]]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:attribute", "3": "py:method", "4": "py:property", "5": "py:function", "6": "py:data", "7": "py:exception", "8": "std:cmdoption"}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "attribute", "Python attribute"], "3": ["py", "method", "Python method"], "4": ["py", "property", "Python property"], "5": ["py", "function", "Python function"], "6": ["py", "data", "Python data"], "7": ["py", "exception", "Python exception"], "8": ["std", "cmdoption", "program option"]}, "titleterms": {"rev": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265, 266, 268, 269, 270, 271, 272, 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, 288, 291, 295, 296, 297, 298, 299, 300, 302], "sam": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 290], "revpysam": 3, "samresourceretriev": 5, "default": [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19], "abstractdefaultfromconfigfil": 7, "defaultgeotherm": 8, "defaultlco": 9, "defaultlinearfresneldsgiph": 10, "defaultmhkwav": 11, "defaultpvsamv1": 12, "defaultpvwattsv5": 13, "defaultpvwattsv8": 14, "defaultsingleown": 15, "defaultswh": 16, "defaulttcsmoltensalt": 17, "defaulttroughphysicalprocessheat": 18, "defaultwindpow": 19, "econ": [20, 21, 22, 23, 88, 89, 90, 91, 92, 93, 94, 95, 272], "econom": 21, "lcoe": 22, "singleown": 23, "gener": [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 96, 97, 98, 99, 100, 101, 273, 288, 302], "abstractsamgener": 25, "abstractsamgenerationfromweatherfil": 26, "abstractsampv": 27, "abstractsamsolar": 28, "abstractsamwind": 29, "geotherm": 30, "lineardirectsteam": 31, "mhkwave": 32, "pvsamv1": 33, "pvwattsv5": 34, "pvwattsv7": 35, "pvwattsv8": 36, "solarwaterheat": 37, "tcsmoltensalt": 38, "troughphysicalheat": 39, "windpow": [40, 299, 300], "windpowerpd": 41, "version_check": [42, 43], "pysamversioncheck": 43, "windbo": [44, 45], "bespok": [46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 270], "bespokemultiplantdata": 48, "bespokesinglepl": 49, "bespokesingleplantdata": 50, "bespokewindpl": 51, "cli_bespok": 52, "gradient_fre": [53, 54], "geneticalgorithm": 54, "pack_turb": [55, 56, 57], "packturbin": 56, "smallest_area_with_tiebreak": 57, "place_turbin": [58, 59, 60], "placeturbin": 59, "none_until_optim": 60, "plotting_funct": [61, 62, 63, 64, 65], "get_xi": 62, "plot_poli": 63, "plot_turbin": 64, "plot_windros": 65, "cli": [66, 267, 297, 299, 300], "config": [67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 286, 292], "base_analysis_config": [68, 69], "analysisconfig": 69, "base_config": [70, 71], "baseconfig": 71, "cli_project_point": 72, "curtail": [73, 74, 214, 215], "execut": [75, 76, 77, 78, 292, 294, 301], "baseexecutionconfig": 76, "hpcconfig": 77, "slurmconfig": 78, "output_request": [79, 80, 81], "outputrequest": 80, "samoutputrequest": 81, "project_point": [82, 83, 84], "pointscontrol": 83, "projectpoint": 84, "sam_config": [85, 86, 87], "samconfig": 86, "saminputscheck": 87, "cli_econ": 89, "economies_of_scal": [92, 93], "economiesofscal": 93, "unit": 93, "util": [94, 95, 147, 148, 149, 150, 151, 152, 153, 154, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, 259, 260, 261, 262, 263, 264, 265], "lcoe_fcr": 95, "base": [97, 98], "basegen": 98, "cli_gen": 99, "gen": [101, 299, 300], "handler": [102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115], "cli_collect": 103, "cli_multi_year": 104, "exclus": [105, 106, 189, 190, 191, 192, 193], "exclusionlay": 106, "multi_year": [107, 108, 109, 110], "multiyear": [108, 275], "multiyeargroup": 109, "my_collect_group": 110, "output": [111, 112, 295, 296], "transmiss": [113, 114, 115], "transmissioncost": 114, "transmissionfeatur": 115, "hybrid": [116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 274], "cli_hybrid": 117, "hybrid_method": [118, 119, 120, 121, 122], "aggregate_capac": 119, "aggregate_capacity_factor": 120, "aggregate_solar_capac": 121, "aggregate_wind_capac": 122, "colnameformatt": 124, "hybridsdata": 126, "metahybrid": 127, "ratiocolumn": 128, "loss": [129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 298], "power_curv": [130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141], "abstractpowercurvetransform": 131, "exponentialstretch": 132, "horizontaltransl": 133, "linearstretch": 134, "powercurv": 135, "powercurveloss": 136, "powercurvelossesinput": 137, "powercurvelossesmixin": 138, "powercurvewindresourc": 139, "transform": 140, "adjust_power_curv": 141, "schedul": [142, 143, 144, 145, 146], "outag": [143, 298], "outageschedul": 144, "scheduledlossesmixin": 145, "singleoutageschedul": 146, "convert_to_full_month_nam": 148, "filter_unknown_month_nam": 149, "format_month_nam": 150, "full_month_name_from_abbr": 151, "hourly_indices_for_month": 152, "month_index": 153, "month_indic": 154, "nrwal": [155, 156, 157, 158, 276], "cli_nrwal": 156, "revnrwal": 158, "qa_qc": [159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171], "cli_qa_qc": [160, 161], "qaqc": 163, "qaqcmodul": 164, "summari": [165, 166, 167, 168, 169, 170, 171], "exclusionsmask": 166, "plotbas": 167, "summarizeh5": 168, "summarizesupplycurv": 169, "summaryplot": 170, "supplycurveplot": 171, "rep_profil": [172, 173, 174, 175, 176, 177, 178], "cli_rep_profil": 173, "regionrepprofil": 175, "repprofil": 176, "repprofilesbas": 177, "representativemethod": 178, "supply_curv": [179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207], "aggreg": [180, 181, 182, 183, 184, 285], "abstractaggfilehandl": 181, "aggfilehandl": 182, "baseaggreg": 184, "cli_sc_aggreg": 185, "cli_supply_curv": 186, "competitive_wind_farm": [187, 188], "competitivewindfarm": 188, "exclusionmask": 190, "exclusionmaskfromdict": 191, "frictionmask": 192, "layermask": 193, "extent": [194, 195], "supplycurveext": 195, "point": [196, 197, 198, 199, 200, 278, 296, 297], "abstractsupplycurvepoint": 197, "aggregationsupplycurvepoint": 198, "generationsupplycurvepoint": 199, "supplycurvepoint": 200, "sc_aggreg": [201, 202, 203], "supplycurveaggfilehandl": 202, "supplycurveaggreg": 203, "supplycurv": 205, "tech_map": [206, 207], "techmap": 207, "modulenam": 209, "refer": 209, "cli_funct": [210, 211, 212, 213], "format_analysis_year": 211, "init_cli_log": 212, "parse_from_pipelin": 213, "except": [216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258], "collectionruntimeerror": 217, "collectionvalueerror": 218, "collectionwarn": 219, "configerror": 220, "configwarn": 221, "datashapeerror": 222, "emptysupplycurvepointerror": 223, "exclusionlayererror": 224, "executionerror": 225, "extrapolationwarn": 226, "fileinputerror": 227, "fileinputwarn": 228, "handlerkeyerror": 229, "handlerruntimeerror": 230, "handlervalueerror": 231, "handlerwarn": 232, "inputerror": 233, "inputwarn": 234, "jsonerror": 235, "multifileexclusionerror": 236, "nearestneighborerror": 237, "offshorewindinputerror": 238, "offshorewindinputwarn": 239, "outputwarn": 240, "parallelexecutionwarn": 241, "pipelineerror": 242, "projectpointsvalueerror": 243, "pysamversionerror": 244, "pysamversionwarn": 245, "resourceerror": 246, "samexecutionerror": 247, "samexecutionwarn": 248, "saminputerror": 249, "saminputwarn": 250, "slurmwarn": 251, "supplycurveerror": 252, "supplycurveinputerror": 253, "whilelooppackingerror": 254, "revdeprecationwarn": 255, "reverror": 256, "revlossesvalueerror": 257, "revlosseswarn": 258, "log_vers": 259, "pytest_util": [260, 261, 262, 263], "make_fake_h5_chunk": 261, "pd_date_rang": 262, "write_chunk": 263, "slot": [264, 265], "slotteddict": 265, "version": 266, "command": [267, 288, 297, 299, 300, 302], "line": [267, 288, 297, 299, 300, 302], "interfac": [267, 297, 299, 300], "batch": [269, 292], "paramet": [269, 270, 271, 272, 273, 274, 275, 276, 277, 279, 280, 282, 284, 285], "collect": 271, "pipelin": [277, 294], "project": [278, 297], "from": [278, 288, 302], "lat": 278, "lon": 278, "region": 278, "qa": 279, "qc": 279, "rep": 280, "profil": 280, "reset": 281, "statu": [281, 283], "script": 282, "suppli": [284, 285, 296], "curv": [284, 285, 296, 298], "templat": 286, "document": 288, "what": 288, "i": 288, "how": [288, 292], "doe": 288, "work": 288, "instal": [288, 302, 303], "option": [288, 302], "1": [288, 302], "pip": [288, 302], "recommend": [288, 302], "analyst": [288, 302], "2": [288, 302], "clone": [288, 302], "repo": [288, 302], "develop": [288, 302], "tool": [288, 302], "launch": [288, 302], "run": [288, 291, 292, 299, 300, 302], "time": [288, 302], "node": [288, 293, 302], "configur": [288, 302], "eagl": [288, 293, 302], "citat": [288, 302], "exampl": [289, 295, 296], "singl": [290, 298, 301], "owner": 290, "model": [290, 296], "workflow": 290, "descript": [290, 292, 296], "an": 291, "aw": 291, "parallel": 291, "cluster": 291, "set": [291, 300], "up": [291, 300], "note": 291, "hsd": [291, 300], "local": [291, 299], "server": 291, "your": 291, "comput": 291, "kubernet": 291, "servic": 291, "lambda": 291, "simpl": 291, "h5pyd": 291, "test": 291, "cost": 291, "estim": 291, "pcluster": 291, "empir": 291, "conu": 291, "request": 293, "full": 294, "input": 294, "requir": 294, "fail": 294, "job": 294, "file": 294, "marin": 295, "energi": 295, "plot": [295, 296], "offshor": 296, "wind": [296, 298], "modul": [296, 301], "treatment": 296, "power": 298, "onli": 298, "site": 298, "solar": 298, "pvwatt": [299, 300], "us": 300, "usag": 303}, "envversion": {"sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2, "sphinx.ext.intersphinx": 1, "sphinx.ext.viewcode": 1, "sphinx": 58}, "alltitles": {"reV": [[0, "module-reV"], [268, "rev"]], "reV.SAM": [[1, "module-reV.SAM"]], "reV.SAM.SAM": [[2, "module-reV.SAM.SAM"]], "reV.SAM.SAM.RevPySam": [[3, "rev-sam-sam-revpysam"]], "reV.SAM.SAM.Sam": [[4, "rev-sam-sam-sam"]], "reV.SAM.SAM.SamResourceRetriever": [[5, "rev-sam-sam-samresourceretriever"]], "reV.SAM.defaults": [[6, "module-reV.SAM.defaults"]], "reV.SAM.defaults.AbstractDefaultFromConfigFile": [[7, "rev-sam-defaults-abstractdefaultfromconfigfile"]], "reV.SAM.defaults.DefaultGeothermal": [[8, "rev-sam-defaults-defaultgeothermal"]], "reV.SAM.defaults.DefaultLCOE": [[9, "rev-sam-defaults-defaultlcoe"]], "reV.SAM.defaults.DefaultLinearFresnelDsgIph": [[10, "rev-sam-defaults-defaultlinearfresneldsgiph"]], "reV.SAM.defaults.DefaultMhkWave": [[11, "rev-sam-defaults-defaultmhkwave"]], "reV.SAM.defaults.DefaultPvSamv1": [[12, "rev-sam-defaults-defaultpvsamv1"]], "reV.SAM.defaults.DefaultPvWattsv5": [[13, "rev-sam-defaults-defaultpvwattsv5"]], "reV.SAM.defaults.DefaultPvWattsv8": [[14, "rev-sam-defaults-defaultpvwattsv8"]], "reV.SAM.defaults.DefaultSingleOwner": [[15, "rev-sam-defaults-defaultsingleowner"]], "reV.SAM.defaults.DefaultSwh": [[16, "rev-sam-defaults-defaultswh"]], "reV.SAM.defaults.DefaultTcsMoltenSalt": [[17, "rev-sam-defaults-defaulttcsmoltensalt"]], "reV.SAM.defaults.DefaultTroughPhysicalProcessHeat": [[18, "rev-sam-defaults-defaulttroughphysicalprocessheat"]], "reV.SAM.defaults.DefaultWindPower": [[19, "rev-sam-defaults-defaultwindpower"]], "reV.SAM.econ": [[20, "module-reV.SAM.econ"]], "reV.SAM.econ.Economic": [[21, "rev-sam-econ-economic"]], "reV.SAM.econ.LCOE": [[22, "rev-sam-econ-lcoe"]], "reV.SAM.econ.SingleOwner": [[23, "rev-sam-econ-singleowner"]], "reV.SAM.generation": [[24, "module-reV.SAM.generation"]], "reV.SAM.generation.AbstractSamGeneration": [[25, "rev-sam-generation-abstractsamgeneration"]], "reV.SAM.generation.AbstractSamGenerationFromWeatherFile": [[26, "rev-sam-generation-abstractsamgenerationfromweatherfile"]], "reV.SAM.generation.AbstractSamPv": [[27, "rev-sam-generation-abstractsampv"]], "reV.SAM.generation.AbstractSamSolar": [[28, "rev-sam-generation-abstractsamsolar"]], "reV.SAM.generation.AbstractSamWind": [[29, "rev-sam-generation-abstractsamwind"]], "reV.SAM.generation.Geothermal": [[30, "rev-sam-generation-geothermal"]], "reV.SAM.generation.LinearDirectSteam": [[31, "rev-sam-generation-lineardirectsteam"]], "reV.SAM.generation.MhkWave": [[32, "rev-sam-generation-mhkwave"]], "reV.SAM.generation.PvSamv1": [[33, "rev-sam-generation-pvsamv1"]], "reV.SAM.generation.PvWattsv5": [[34, "rev-sam-generation-pvwattsv5"]], "reV.SAM.generation.PvWattsv7": [[35, "rev-sam-generation-pvwattsv7"]], "reV.SAM.generation.PvWattsv8": [[36, "rev-sam-generation-pvwattsv8"]], "reV.SAM.generation.SolarWaterHeat": [[37, "rev-sam-generation-solarwaterheat"]], "reV.SAM.generation.TcsMoltenSalt": [[38, "rev-sam-generation-tcsmoltensalt"]], "reV.SAM.generation.TroughPhysicalHeat": [[39, "rev-sam-generation-troughphysicalheat"]], "reV.SAM.generation.WindPower": [[40, "rev-sam-generation-windpower"]], "reV.SAM.generation.WindPowerPD": [[41, "rev-sam-generation-windpowerpd"]], "reV.SAM.version_checker": [[42, "module-reV.SAM.version_checker"]], "reV.SAM.version_checker.PySamVersionChecker": [[43, "rev-sam-version-checker-pysamversionchecker"]], "reV.SAM.windbos": [[44, "module-reV.SAM.windbos"]], "reV.SAM.windbos.WindBos": [[45, "rev-sam-windbos-windbos"]], "reV.bespoke": [[46, "module-reV.bespoke"]], "reV.bespoke.bespoke": [[47, "module-reV.bespoke.bespoke"]], "reV.bespoke.bespoke.BespokeMultiPlantData": [[48, "rev-bespoke-bespoke-bespokemultiplantdata"]], "reV.bespoke.bespoke.BespokeSinglePlant": [[49, "rev-bespoke-bespoke-bespokesingleplant"]], "reV.bespoke.bespoke.BespokeSinglePlantData": [[50, "rev-bespoke-bespoke-bespokesingleplantdata"]], "reV.bespoke.bespoke.BespokeWindPlants": [[51, "rev-bespoke-bespoke-bespokewindplants"]], "reV.bespoke.cli_bespoke": [[52, "module-reV.bespoke.cli_bespoke"]], "reV.bespoke.gradient_free": [[53, "module-reV.bespoke.gradient_free"]], "reV.bespoke.gradient_free.GeneticAlgorithm": [[54, "rev-bespoke-gradient-free-geneticalgorithm"]], "reV.bespoke.pack_turbs": [[55, "module-reV.bespoke.pack_turbs"]], "reV.bespoke.pack_turbs.PackTurbines": [[56, "rev-bespoke-pack-turbs-packturbines"]], "reV.bespoke.pack_turbs.smallest_area_with_tiebreakers": [[57, "rev-bespoke-pack-turbs-smallest-area-with-tiebreakers"]], "reV.bespoke.place_turbines": [[58, "module-reV.bespoke.place_turbines"]], "reV.bespoke.place_turbines.PlaceTurbines": [[59, "rev-bespoke-place-turbines-placeturbines"]], "reV.bespoke.place_turbines.none_until_optimized": [[60, "rev-bespoke-place-turbines-none-until-optimized"]], "reV.bespoke.plotting_functions": [[61, "module-reV.bespoke.plotting_functions"]], "reV.bespoke.plotting_functions.get_xy": [[62, "rev-bespoke-plotting-functions-get-xy"]], "reV.bespoke.plotting_functions.plot_poly": [[63, "rev-bespoke-plotting-functions-plot-poly"]], "reV.bespoke.plotting_functions.plot_turbines": [[64, "rev-bespoke-plotting-functions-plot-turbines"]], "reV.bespoke.plotting_functions.plot_windrose": [[65, "rev-bespoke-plotting-functions-plot-windrose"]], "reV.cli": [[66, "module-reV.cli"]], "reV.config": [[67, "module-reV.config"]], "reV.config.base_analysis_config": [[68, "module-reV.config.base_analysis_config"]], "reV.config.base_analysis_config.AnalysisConfig": [[69, "rev-config-base-analysis-config-analysisconfig"]], "reV.config.base_config": [[70, "module-reV.config.base_config"]], "reV.config.base_config.BaseConfig": [[71, "rev-config-base-config-baseconfig"]], "reV.config.cli_project_points": [[72, "module-reV.config.cli_project_points"]], "reV.config.curtailment": [[73, "module-reV.config.curtailment"]], "reV.config.curtailment.Curtailment": [[74, "rev-config-curtailment-curtailment"]], "reV.config.execution": [[75, "module-reV.config.execution"]], "reV.config.execution.BaseExecutionConfig": [[76, "rev-config-execution-baseexecutionconfig"]], "reV.config.execution.HPCConfig": [[77, "rev-config-execution-hpcconfig"]], "reV.config.execution.SlurmConfig": [[78, "rev-config-execution-slurmconfig"]], "reV.config.output_request": [[79, "module-reV.config.output_request"]], "reV.config.output_request.OutputRequest": [[80, "rev-config-output-request-outputrequest"]], "reV.config.output_request.SAMOutputRequest": [[81, "rev-config-output-request-samoutputrequest"]], "reV.config.project_points": [[82, "module-reV.config.project_points"]], "reV.config.project_points.PointsControl": [[83, "rev-config-project-points-pointscontrol"]], "reV.config.project_points.ProjectPoints": [[84, "rev-config-project-points-projectpoints"]], "reV.config.sam_config": [[85, "module-reV.config.sam_config"]], "reV.config.sam_config.SAMConfig": [[86, "rev-config-sam-config-samconfig"]], "reV.config.sam_config.SAMInputsChecker": [[87, "rev-config-sam-config-saminputschecker"]], "reV.econ": [[88, "module-reV.econ"]], "reV.econ.cli_econ": [[89, "module-reV.econ.cli_econ"]], "reV.econ.econ": [[90, "module-reV.econ.econ"]], "reV.econ.econ.Econ": [[91, "rev-econ-econ-econ"]], "reV.econ.economies_of_scale": [[92, "module-reV.econ.economies_of_scale"]], "reV.econ.economies_of_scale.EconomiesOfScale": [[93, "rev-econ-economies-of-scale-economiesofscale"]], "Units": [[93, "units"]], "reV.econ.utilities": [[94, "module-reV.econ.utilities"]], "reV.econ.utilities.lcoe_fcr": [[95, "rev-econ-utilities-lcoe-fcr"]], "reV.generation": [[96, "module-reV.generation"]], "reV.generation.base": [[97, "module-reV.generation.base"]], "reV.generation.base.BaseGen": [[98, "rev-generation-base-basegen"]], "reV.generation.cli_gen": [[99, "module-reV.generation.cli_gen"]], "reV.generation.generation": [[100, "module-reV.generation.generation"]], "reV.generation.generation.Gen": [[101, "rev-generation-generation-gen"]], "reV.handlers": [[102, "module-reV.handlers"]], "reV.handlers.cli_collect": [[103, "module-reV.handlers.cli_collect"]], "reV.handlers.cli_multi_year": [[104, "module-reV.handlers.cli_multi_year"]], "reV.handlers.exclusions": [[105, "module-reV.handlers.exclusions"]], "reV.handlers.exclusions.ExclusionLayers": [[106, "rev-handlers-exclusions-exclusionlayers"]], "reV.handlers.multi_year": [[107, "module-reV.handlers.multi_year"]], "reV.handlers.multi_year.MultiYear": [[108, "rev-handlers-multi-year-multiyear"]], "reV.handlers.multi_year.MultiYearGroup": [[109, "rev-handlers-multi-year-multiyeargroup"]], "reV.handlers.multi_year.my_collect_groups": [[110, "rev-handlers-multi-year-my-collect-groups"]], "reV.handlers.outputs": [[111, "module-reV.handlers.outputs"]], "reV.handlers.outputs.Outputs": [[112, "rev-handlers-outputs-outputs"]], "reV.handlers.transmission": [[113, "module-reV.handlers.transmission"]], "reV.handlers.transmission.TransmissionCosts": [[114, "rev-handlers-transmission-transmissioncosts"]], "reV.handlers.transmission.TransmissionFeatures": [[115, "rev-handlers-transmission-transmissionfeatures"]], "reV.hybrids": [[116, "module-reV.hybrids"]], "reV.hybrids.cli_hybrids": [[117, "module-reV.hybrids.cli_hybrids"]], "reV.hybrids.hybrid_methods": [[118, "module-reV.hybrids.hybrid_methods"]], "reV.hybrids.hybrid_methods.aggregate_capacity": [[119, "rev-hybrids-hybrid-methods-aggregate-capacity"]], "reV.hybrids.hybrid_methods.aggregate_capacity_factor": [[120, "rev-hybrids-hybrid-methods-aggregate-capacity-factor"]], "reV.hybrids.hybrid_methods.aggregate_solar_capacity": [[121, "rev-hybrids-hybrid-methods-aggregate-solar-capacity"]], "reV.hybrids.hybrid_methods.aggregate_wind_capacity": [[122, "rev-hybrids-hybrid-methods-aggregate-wind-capacity"]], "reV.hybrids.hybrids": [[123, "module-reV.hybrids.hybrids"]], "reV.hybrids.hybrids.ColNameFormatter": [[124, "rev-hybrids-hybrids-colnameformatter"]], "reV.hybrids.hybrids.Hybridization": [[125, "rev-hybrids-hybrids-hybridization"]], "reV.hybrids.hybrids.HybridsData": [[126, "rev-hybrids-hybrids-hybridsdata"]], "reV.hybrids.hybrids.MetaHybridizer": [[127, "rev-hybrids-hybrids-metahybridizer"]], "reV.hybrids.hybrids.RatioColumns": [[128, "rev-hybrids-hybrids-ratiocolumns"]], "reV.losses": [[129, "module-reV.losses"]], "reV.losses.power_curve": [[130, "module-reV.losses.power_curve"]], "reV.losses.power_curve.AbstractPowerCurveTransformation": [[131, "rev-losses-power-curve-abstractpowercurvetransformation"]], "reV.losses.power_curve.ExponentialStretching": [[132, "rev-losses-power-curve-exponentialstretching"]], "reV.losses.power_curve.HorizontalTranslation": [[133, "rev-losses-power-curve-horizontaltranslation"]], "reV.losses.power_curve.LinearStretching": [[134, "rev-losses-power-curve-linearstretching"]], "reV.losses.power_curve.PowerCurve": [[135, "rev-losses-power-curve-powercurve"]], "reV.losses.power_curve.PowerCurveLosses": [[136, "rev-losses-power-curve-powercurvelosses"]], "reV.losses.power_curve.PowerCurveLossesInput": [[137, "rev-losses-power-curve-powercurvelossesinput"]], "reV.losses.power_curve.PowerCurveLossesMixin": [[138, "rev-losses-power-curve-powercurvelossesmixin"]], "reV.losses.power_curve.PowerCurveWindResource": [[139, "rev-losses-power-curve-powercurvewindresource"]], "reV.losses.power_curve.TRANSFORMATIONS": [[140, "rev-losses-power-curve-transformations"]], "reV.losses.power_curve.adjust_power_curve": [[141, "rev-losses-power-curve-adjust-power-curve"]], "reV.losses.scheduled": [[142, "module-reV.losses.scheduled"]], "reV.losses.scheduled.Outage": [[143, "rev-losses-scheduled-outage"]], "reV.losses.scheduled.OutageScheduler": [[144, "rev-losses-scheduled-outagescheduler"]], "reV.losses.scheduled.ScheduledLossesMixin": [[145, "rev-losses-scheduled-scheduledlossesmixin"]], "reV.losses.scheduled.SingleOutageScheduler": [[146, "rev-losses-scheduled-singleoutagescheduler"]], "reV.losses.utils": [[147, "module-reV.losses.utils"]], "reV.losses.utils.convert_to_full_month_names": [[148, "rev-losses-utils-convert-to-full-month-names"]], "reV.losses.utils.filter_unknown_month_names": [[149, "rev-losses-utils-filter-unknown-month-names"]], "reV.losses.utils.format_month_name": [[150, "rev-losses-utils-format-month-name"]], "reV.losses.utils.full_month_name_from_abbr": [[151, "rev-losses-utils-full-month-name-from-abbr"]], "reV.losses.utils.hourly_indices_for_months": [[152, "rev-losses-utils-hourly-indices-for-months"]], "reV.losses.utils.month_index": [[153, "rev-losses-utils-month-index"]], "reV.losses.utils.month_indices": [[154, "rev-losses-utils-month-indices"]], "reV.nrwal": [[155, "module-reV.nrwal"]], "reV.nrwal.cli_nrwal": [[156, "module-reV.nrwal.cli_nrwal"]], "reV.nrwal.nrwal": [[157, "module-reV.nrwal.nrwal"]], "reV.nrwal.nrwal.RevNrwal": [[158, "rev-nrwal-nrwal-revnrwal"]], "reV.qa_qc": [[159, "module-reV.qa_qc"]], "reV.qa_qc.cli_qa_qc": [[160, "module-reV.qa_qc.cli_qa_qc"]], "reV.qa_qc.cli_qa_qc.cli_qa_qc": [[161, "rev-qa-qc-cli-qa-qc-cli-qa-qc"]], "reV.qa_qc.qa_qc": [[162, "module-reV.qa_qc.qa_qc"]], "reV.qa_qc.qa_qc.QaQc": [[163, "rev-qa-qc-qa-qc-qaqc"]], "reV.qa_qc.qa_qc.QaQcModule": [[164, "rev-qa-qc-qa-qc-qaqcmodule"]], "reV.qa_qc.summary": [[165, "module-reV.qa_qc.summary"]], "reV.qa_qc.summary.ExclusionsMask": [[166, "rev-qa-qc-summary-exclusionsmask"]], "reV.qa_qc.summary.PlotBase": [[167, "rev-qa-qc-summary-plotbase"]], "reV.qa_qc.summary.SummarizeH5": [[168, "rev-qa-qc-summary-summarizeh5"]], "reV.qa_qc.summary.SummarizeSupplyCurve": [[169, "rev-qa-qc-summary-summarizesupplycurve"]], "reV.qa_qc.summary.SummaryPlots": [[170, "rev-qa-qc-summary-summaryplots"]], "reV.qa_qc.summary.SupplyCurvePlot": [[171, "rev-qa-qc-summary-supplycurveplot"]], "reV.rep_profiles": [[172, "module-reV.rep_profiles"]], "reV.rep_profiles.cli_rep_profiles": [[173, "module-reV.rep_profiles.cli_rep_profiles"]], "reV.rep_profiles.rep_profiles": [[174, "module-reV.rep_profiles.rep_profiles"]], "reV.rep_profiles.rep_profiles.RegionRepProfile": [[175, "rev-rep-profiles-rep-profiles-regionrepprofile"]], "reV.rep_profiles.rep_profiles.RepProfiles": [[176, "rev-rep-profiles-rep-profiles-repprofiles"]], "reV.rep_profiles.rep_profiles.RepProfilesBase": [[177, "rev-rep-profiles-rep-profiles-repprofilesbase"]], "reV.rep_profiles.rep_profiles.RepresentativeMethods": [[178, "rev-rep-profiles-rep-profiles-representativemethods"]], "reV.supply_curve": [[179, "module-reV.supply_curve"]], "reV.supply_curve.aggregation": [[180, "module-reV.supply_curve.aggregation"]], "reV.supply_curve.aggregation.AbstractAggFileHandler": [[181, "rev-supply-curve-aggregation-abstractaggfilehandler"]], "reV.supply_curve.aggregation.AggFileHandler": [[182, "rev-supply-curve-aggregation-aggfilehandler"]], "reV.supply_curve.aggregation.Aggregation": [[183, "rev-supply-curve-aggregation-aggregation"]], "reV.supply_curve.aggregation.BaseAggregation": [[184, "rev-supply-curve-aggregation-baseaggregation"]], "reV.supply_curve.cli_sc_aggregation": [[185, "module-reV.supply_curve.cli_sc_aggregation"]], "reV.supply_curve.cli_supply_curve": [[186, "module-reV.supply_curve.cli_supply_curve"]], "reV.supply_curve.competitive_wind_farms": [[187, "module-reV.supply_curve.competitive_wind_farms"]], "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms": [[188, "rev-supply-curve-competitive-wind-farms-competitivewindfarms"]], "reV.supply_curve.exclusions": [[189, "module-reV.supply_curve.exclusions"]], "reV.supply_curve.exclusions.ExclusionMask": [[190, "rev-supply-curve-exclusions-exclusionmask"]], "reV.supply_curve.exclusions.ExclusionMaskFromDict": [[191, "rev-supply-curve-exclusions-exclusionmaskfromdict"]], "reV.supply_curve.exclusions.FrictionMask": [[192, "rev-supply-curve-exclusions-frictionmask"]], "reV.supply_curve.exclusions.LayerMask": [[193, "rev-supply-curve-exclusions-layermask"]], "reV.supply_curve.extent": [[194, "module-reV.supply_curve.extent"]], "reV.supply_curve.extent.SupplyCurveExtent": [[195, "rev-supply-curve-extent-supplycurveextent"]], "reV.supply_curve.points": [[196, "module-reV.supply_curve.points"]], "reV.supply_curve.points.AbstractSupplyCurvePoint": [[197, "rev-supply-curve-points-abstractsupplycurvepoint"]], "reV.supply_curve.points.AggregationSupplyCurvePoint": [[198, "rev-supply-curve-points-aggregationsupplycurvepoint"]], "reV.supply_curve.points.GenerationSupplyCurvePoint": [[199, "rev-supply-curve-points-generationsupplycurvepoint"]], "reV.supply_curve.points.SupplyCurvePoint": [[200, "rev-supply-curve-points-supplycurvepoint"]], "reV.supply_curve.sc_aggregation": [[201, "module-reV.supply_curve.sc_aggregation"]], "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler": [[202, "rev-supply-curve-sc-aggregation-supplycurveaggfilehandler"]], "reV.supply_curve.sc_aggregation.SupplyCurveAggregation": [[203, "rev-supply-curve-sc-aggregation-supplycurveaggregation"]], "reV.supply_curve.supply_curve": [[204, "module-reV.supply_curve.supply_curve"]], "reV.supply_curve.supply_curve.SupplyCurve": [[205, "rev-supply-curve-supply-curve-supplycurve"]], "reV.supply_curve.tech_mapping": [[206, "module-reV.supply_curve.tech_mapping"]], "reV.supply_curve.tech_mapping.TechMapping": [[207, "rev-supply-curve-tech-mapping-techmapping"]], "reV.utilities": [[208, "module-reV.utilities"]], "reV.utilities.ModuleName": [[209, "rev-utilities-modulename"]], "Reference": [[209, "reference"]], "reV.utilities.cli_functions": [[210, "module-reV.utilities.cli_functions"]], "reV.utilities.cli_functions.format_analysis_years": [[211, "rev-utilities-cli-functions-format-analysis-years"]], "reV.utilities.cli_functions.init_cli_logging": [[212, "rev-utilities-cli-functions-init-cli-logging"]], "reV.utilities.cli_functions.parse_from_pipeline": [[213, "rev-utilities-cli-functions-parse-from-pipeline"]], "reV.utilities.curtailment": [[214, "module-reV.utilities.curtailment"]], "reV.utilities.curtailment.curtail": [[215, "rev-utilities-curtailment-curtail"]], "reV.utilities.exceptions": [[216, "module-reV.utilities.exceptions"]], "reV.utilities.exceptions.CollectionRuntimeError": [[217, "rev-utilities-exceptions-collectionruntimeerror"]], "reV.utilities.exceptions.CollectionValueError": [[218, "rev-utilities-exceptions-collectionvalueerror"]], "reV.utilities.exceptions.CollectionWarning": [[219, "rev-utilities-exceptions-collectionwarning"]], "reV.utilities.exceptions.ConfigError": [[220, "rev-utilities-exceptions-configerror"]], "reV.utilities.exceptions.ConfigWarning": [[221, "rev-utilities-exceptions-configwarning"]], "reV.utilities.exceptions.DataShapeError": [[222, "rev-utilities-exceptions-datashapeerror"]], "reV.utilities.exceptions.EmptySupplyCurvePointError": [[223, "rev-utilities-exceptions-emptysupplycurvepointerror"]], "reV.utilities.exceptions.ExclusionLayerError": [[224, "rev-utilities-exceptions-exclusionlayererror"]], "reV.utilities.exceptions.ExecutionError": [[225, "rev-utilities-exceptions-executionerror"]], "reV.utilities.exceptions.ExtrapolationWarning": [[226, "rev-utilities-exceptions-extrapolationwarning"]], "reV.utilities.exceptions.FileInputError": [[227, "rev-utilities-exceptions-fileinputerror"]], "reV.utilities.exceptions.FileInputWarning": [[228, "rev-utilities-exceptions-fileinputwarning"]], "reV.utilities.exceptions.HandlerKeyError": [[229, "rev-utilities-exceptions-handlerkeyerror"]], "reV.utilities.exceptions.HandlerRuntimeError": [[230, "rev-utilities-exceptions-handlerruntimeerror"]], "reV.utilities.exceptions.HandlerValueError": [[231, "rev-utilities-exceptions-handlervalueerror"]], "reV.utilities.exceptions.HandlerWarning": [[232, "rev-utilities-exceptions-handlerwarning"]], "reV.utilities.exceptions.InputError": [[233, "rev-utilities-exceptions-inputerror"]], "reV.utilities.exceptions.InputWarning": [[234, "rev-utilities-exceptions-inputwarning"]], "reV.utilities.exceptions.JSONError": [[235, "rev-utilities-exceptions-jsonerror"]], "reV.utilities.exceptions.MultiFileExclusionError": [[236, "rev-utilities-exceptions-multifileexclusionerror"]], "reV.utilities.exceptions.NearestNeighborError": [[237, "rev-utilities-exceptions-nearestneighborerror"]], "reV.utilities.exceptions.OffshoreWindInputError": [[238, "rev-utilities-exceptions-offshorewindinputerror"]], "reV.utilities.exceptions.OffshoreWindInputWarning": [[239, "rev-utilities-exceptions-offshorewindinputwarning"]], "reV.utilities.exceptions.OutputWarning": [[240, "rev-utilities-exceptions-outputwarning"]], "reV.utilities.exceptions.ParallelExecutionWarning": [[241, "rev-utilities-exceptions-parallelexecutionwarning"]], "reV.utilities.exceptions.PipelineError": [[242, "rev-utilities-exceptions-pipelineerror"]], "reV.utilities.exceptions.ProjectPointsValueError": [[243, "rev-utilities-exceptions-projectpointsvalueerror"]], "reV.utilities.exceptions.PySAMVersionError": [[244, "rev-utilities-exceptions-pysamversionerror"]], "reV.utilities.exceptions.PySAMVersionWarning": [[245, "rev-utilities-exceptions-pysamversionwarning"]], "reV.utilities.exceptions.ResourceError": [[246, "rev-utilities-exceptions-resourceerror"]], "reV.utilities.exceptions.SAMExecutionError": [[247, "rev-utilities-exceptions-samexecutionerror"]], "reV.utilities.exceptions.SAMExecutionWarning": [[248, "rev-utilities-exceptions-samexecutionwarning"]], "reV.utilities.exceptions.SAMInputError": [[249, "rev-utilities-exceptions-saminputerror"]], "reV.utilities.exceptions.SAMInputWarning": [[250, "rev-utilities-exceptions-saminputwarning"]], "reV.utilities.exceptions.SlurmWarning": [[251, "rev-utilities-exceptions-slurmwarning"]], "reV.utilities.exceptions.SupplyCurveError": [[252, "rev-utilities-exceptions-supplycurveerror"]], "reV.utilities.exceptions.SupplyCurveInputError": [[253, "rev-utilities-exceptions-supplycurveinputerror"]], "reV.utilities.exceptions.WhileLoopPackingError": [[254, "rev-utilities-exceptions-whilelooppackingerror"]], "reV.utilities.exceptions.reVDeprecationWarning": [[255, "rev-utilities-exceptions-revdeprecationwarning"]], "reV.utilities.exceptions.reVError": [[256, "rev-utilities-exceptions-reverror"]], "reV.utilities.exceptions.reVLossesValueError": [[257, "rev-utilities-exceptions-revlossesvalueerror"]], "reV.utilities.exceptions.reVLossesWarning": [[258, "rev-utilities-exceptions-revlosseswarning"]], "reV.utilities.log_versions": [[259, "rev-utilities-log-versions"]], "reV.utilities.pytest_utils": [[260, "module-reV.utilities.pytest_utils"]], "reV.utilities.pytest_utils.make_fake_h5_chunks": [[261, "rev-utilities-pytest-utils-make-fake-h5-chunks"]], "reV.utilities.pytest_utils.pd_date_range": [[262, "rev-utilities-pytest-utils-pd-date-range"]], "reV.utilities.pytest_utils.write_chunk": [[263, "rev-utilities-pytest-utils-write-chunk"]], "reV.utilities.slots": [[264, "module-reV.utilities.slots"]], "reV.utilities.slots.SlottedDict": [[265, "rev-utilities-slots-slotteddict"]], "reV.version": [[266, "module-reV.version"]], "Command Line Interfaces (CLIs)": [[267, "command-line-interfaces-clis"]], "reV batch": [[269, "rev-batch"]], "Parameters": [[269, "parameters"], [270, "parameters"], [271, "parameters"], [272, "parameters"], [273, "parameters"], [274, "parameters"], [275, "parameters"], [276, "parameters"], [277, "parameters"], [279, "parameters"], [280, "parameters"], [282, "parameters"], [284, "parameters"], [285, "parameters"]], "reV bespoke": [[270, "rev-bespoke"]], "reV collect": [[271, "rev-collect"]], "reV econ": [[272, "rev-econ"]], "reV generation": [[273, "rev-generation"]], "reV hybrids": [[274, "rev-hybrids"]], "reV multiyear": [[275, "rev-multiyear"]], "reV nrwal": [[276, "rev-nrwal"]], "reV pipeline": [[277, "rev-pipeline"]], "reV project-points": [[278, "rev-project-points"]], "from-lat-lons": [[278, "rev-project-points-from-lat-lons"]], "from-regions": [[278, "rev-project-points-from-regions"]], "reV qa-qc": [[279, "rev-qa-qc"]], "reV rep-profiles": [[280, "rev-rep-profiles"]], "reV reset-status": [[281, "rev-reset-status"]], "reV script": [[282, "rev-script"]], "reV status": [[283, "rev-status"]], "reV supply-curve": [[284, "rev-supply-curve"]], "reV supply-curve-aggregation": [[285, "rev-supply-curve-aggregation"]], "reV template-configs": [[286, "rev-template-configs"]], "reV documentation": [[288, "rev-documentation"]], "What is reV?": [[288, "what-is-rev"]], "How does reV work?": [[288, "how-does-rev-work"]], "Installing reV": [[288, "installing-rev"]], "Option 1: Install from PIP (recommended for analysts):": [[288, "option-1-install-from-pip-recommended-for-analysts"], [302, "option-1-install-from-pip-recommended-for-analysts"]], "Option 2: Clone repo (recommended for developers)": [[288, "option-2-clone-repo-recommended-for-developers"], [302, "option-2-clone-repo-recommended-for-developers"]], "reV command line tools": [[288, "rev-command-line-tools"], [302, "rev-command-line-tools"]], "Launching a run": [[288, "launching-a-run"], [302, "launching-a-run"]], "General Run times and Node configuration on Eagle": [[288, "general-run-times-and-node-configuration-on-eagle"], [302, "general-run-times-and-node-configuration-on-eagle"]], "Recommended Citation": [[288, "recommended-citation"], [302, "recommended-citation"]], "Examples": [[289, "examples"]], "SAM Single Owner Modeling": [[290, "sam-single-owner-modeling"]], "Workflow Description": [[290, "workflow-description"]], "Running reV on an AWS Parallel Cluster": [[291, "running-rev-on-an-aws-parallel-cluster"]], "Setting up an AWS Parallel Cluster": [[291, "setting-up-an-aws-parallel-cluster"]], "Notes on Running reV in the AWS Parallel Cluster": [[291, "notes-on-running-rev-in-the-aws-parallel-cluster"]], "Setting up HSDS Local Servers on your Compute Cluster": [[291, "setting-up-hsds-local-servers-on-your-compute-cluster"]], "Setting up an HSDS Kubernetes Service": [[291, "setting-up-an-hsds-kubernetes-service"]], "Setting up an HSDS Lambda Service": [[291, "setting-up-an-hsds-lambda-service"]], "Simple H5PYD Test": [[291, "simple-h5pyd-test"]], "Compute Cost Estimates": [[291, "compute-cost-estimates"]], "reV PCluster Compute Costs (Empirical)": [[291, "id2"]], "CONUS Compute Costs (Estimated)": [[291, "id3"]], "Batched Execution": [[292, "batched-execution"]], "Batching Config Description": [[292, "batching-config-description"]], "How to Run": [[292, "how-to-run"]], "Eagle Node Requests": [[293, "eagle-node-requests"]], "Full Pipeline Execution": [[294, "full-pipeline-execution"]], "Pipeline Input Requirements": [[294, "pipeline-input-requirements"]], "Failed Jobs": [[294, "failed-jobs"]], "File Inputs": [[294, "file-inputs"]], "reV Marine Energy": [[295, "rev-marine-energy"]], "Plots of the Example Marine Energy Output": [[295, "plots-of-the-example-marine-energy-output"]], "Offshore Wind Modeling": [[296, "offshore-wind-modeling"]], "reV Offshore Module Description": [[296, "rev-offshore-module-description"]], "Treatment of Offshore Points in Supply Curve": [[296, "treatment-of-offshore-points-in-supply-curve"]], "Plots of the Example Offshore Output": [[296, "plots-of-the-example-offshore-output"]], "reV Project Points": [[297, "rev-project-points"]], "Command Line Interface (CLI)": [[297, "command-line-interface-cli"], [299, "command-line-interface-cli"], [300, "command-line-interface-cli"]], "reV Losses": [[298, "rev-losses"]], "Power Curve Losses (Wind only)": [[298, "power-curve-losses-wind-only"]], "Power Curve Losses for a single site": [[298, "power-curve-losses-for-a-single-site"]], "Outage Losses (Wind and Solar)": [[298, "outage-losses-wind-and-solar"]], "Run reV locally": [[299, "run-rev-locally"]], "reV Gen": [[299, "id1"], [300, "rev-gen"]], "windpower": [[299, "windpower"], [300, "windpower"]], "pvwatts": [[299, "pvwatts"], [300, "pvwatts"]], "Running with HSDS": [[300, "running-with-hsds"]], "Setting up HSDS": [[300, "setting-up-hsds"]], "Using HSDS with reV": [[300, "using-hsds-with-rev"]], "Single Module Execution": [[301, "single-module-execution"]], "Installation": [[302, "installation"]], "Command Line Tools": [[302, "command-line-tools"]], "Installation and Usage": [[303, "installation-and-usage"]]}, "indexentries": {"module": [[0, "module-reV"], [1, "module-reV.SAM"], [2, "module-reV.SAM.SAM"], [6, "module-reV.SAM.defaults"], [20, "module-reV.SAM.econ"], [24, "module-reV.SAM.generation"], [42, "module-reV.SAM.version_checker"], [44, "module-reV.SAM.windbos"], [46, "module-reV.bespoke"], [47, "module-reV.bespoke.bespoke"], [52, "module-reV.bespoke.cli_bespoke"], [53, "module-reV.bespoke.gradient_free"], [55, "module-reV.bespoke.pack_turbs"], [58, "module-reV.bespoke.place_turbines"], [61, "module-reV.bespoke.plotting_functions"], [66, "module-reV.cli"], [67, "module-reV.config"], [68, "module-reV.config.base_analysis_config"], [70, "module-reV.config.base_config"], [72, "module-reV.config.cli_project_points"], [73, "module-reV.config.curtailment"], [75, "module-reV.config.execution"], [79, "module-reV.config.output_request"], [82, "module-reV.config.project_points"], [85, "module-reV.config.sam_config"], [88, "module-reV.econ"], [89, "module-reV.econ.cli_econ"], [90, "module-reV.econ.econ"], [92, "module-reV.econ.economies_of_scale"], [94, "module-reV.econ.utilities"], [96, "module-reV.generation"], [97, "module-reV.generation.base"], [99, "module-reV.generation.cli_gen"], [100, "module-reV.generation.generation"], [102, "module-reV.handlers"], [103, "module-reV.handlers.cli_collect"], [104, "module-reV.handlers.cli_multi_year"], [105, "module-reV.handlers.exclusions"], [107, "module-reV.handlers.multi_year"], [111, "module-reV.handlers.outputs"], [113, "module-reV.handlers.transmission"], [116, "module-reV.hybrids"], [117, "module-reV.hybrids.cli_hybrids"], [118, "module-reV.hybrids.hybrid_methods"], [123, "module-reV.hybrids.hybrids"], [129, "module-reV.losses"], [130, "module-reV.losses.power_curve"], [142, "module-reV.losses.scheduled"], [147, "module-reV.losses.utils"], [155, "module-reV.nrwal"], [156, "module-reV.nrwal.cli_nrwal"], [157, "module-reV.nrwal.nrwal"], [159, "module-reV.qa_qc"], [160, "module-reV.qa_qc.cli_qa_qc"], [162, "module-reV.qa_qc.qa_qc"], [165, "module-reV.qa_qc.summary"], [172, "module-reV.rep_profiles"], [173, "module-reV.rep_profiles.cli_rep_profiles"], [174, "module-reV.rep_profiles.rep_profiles"], [179, "module-reV.supply_curve"], [180, "module-reV.supply_curve.aggregation"], [185, "module-reV.supply_curve.cli_sc_aggregation"], [186, "module-reV.supply_curve.cli_supply_curve"], [187, "module-reV.supply_curve.competitive_wind_farms"], [189, "module-reV.supply_curve.exclusions"], [194, "module-reV.supply_curve.extent"], [196, "module-reV.supply_curve.points"], [201, "module-reV.supply_curve.sc_aggregation"], [204, "module-reV.supply_curve.supply_curve"], [206, "module-reV.supply_curve.tech_mapping"], [208, "module-reV.utilities"], [210, "module-reV.utilities.cli_functions"], [214, "module-reV.utilities.curtailment"], [216, "module-reV.utilities.exceptions"], [260, "module-reV.utilities.pytest_utils"], [264, "module-reV.utilities.slots"], [266, "module-reV.version"]], "rev": [[0, "module-reV"]], "rev.sam": [[1, "module-reV.SAM"]], "rev.sam.sam": [[2, "module-reV.SAM.SAM"]], "pysam (revpysam attribute)": [[3, "reV.SAM.SAM.RevPySam.PYSAM"]], "revpysam (class in rev.sam.sam)": [[3, "reV.SAM.SAM.RevPySam"]], "assign_inputs() (revpysam method)": [[3, "reV.SAM.SAM.RevPySam.assign_inputs"]], "attr_dict (revpysam property)": [[3, "reV.SAM.SAM.RevPySam.attr_dict"]], "collect_outputs() (revpysam method)": [[3, "reV.SAM.SAM.RevPySam.collect_outputs"]], "default() (revpysam class method)": [[3, "reV.SAM.SAM.RevPySam.default"]], "drop_leap() (revpysam static method)": [[3, "reV.SAM.SAM.RevPySam.drop_leap"]], "ensure_res_len() (revpysam static method)": [[3, "reV.SAM.SAM.RevPySam.ensure_res_len"]], "execute() (revpysam method)": [[3, "reV.SAM.SAM.RevPySam.execute"]], "get_sam_res() (revpysam static method)": [[3, "reV.SAM.SAM.RevPySam.get_sam_res"]], "get_time_interval() (revpysam class method)": [[3, "reV.SAM.SAM.RevPySam.get_time_interval"]], "input_list (revpysam property)": [[3, "reV.SAM.SAM.RevPySam.input_list"]], "make_datetime() (revpysam static method)": [[3, "reV.SAM.SAM.RevPySam.make_datetime"]], "meta (revpysam property)": [[3, "reV.SAM.SAM.RevPySam.meta"]], "module (revpysam property)": [[3, "reV.SAM.SAM.RevPySam.module"]], "outputs_to_utc_arr() (revpysam method)": [[3, "reV.SAM.SAM.RevPySam.outputs_to_utc_arr"]], "pysam (revpysam property)": [[3, "reV.SAM.SAM.RevPySam.pysam"]], "site (revpysam property)": [[3, "reV.SAM.SAM.RevPySam.site"]], "pysam (sam attribute)": [[4, "reV.SAM.SAM.Sam.PYSAM"]], "sam (class in rev.sam.sam)": [[4, "reV.SAM.SAM.Sam"]], "assign_inputs() (sam method)": [[4, "reV.SAM.SAM.Sam.assign_inputs"]], "attr_dict (sam property)": [[4, "reV.SAM.SAM.Sam.attr_dict"]], "default() (sam class method)": [[4, "reV.SAM.SAM.Sam.default"]], "execute() (sam method)": [[4, "reV.SAM.SAM.Sam.execute"]], "input_list (sam property)": [[4, "reV.SAM.SAM.Sam.input_list"]], "pysam (sam property)": [[4, "reV.SAM.SAM.Sam.pysam"]], "samresourceretriever (class in rev.sam.sam)": [[5, "reV.SAM.SAM.SamResourceRetriever"]], "get() (samresourceretriever class method)": [[5, "reV.SAM.SAM.SamResourceRetriever.get"]], "rev.sam.defaults": [[6, "module-reV.SAM.defaults"]], "abstractdefaultfromconfigfile (class in rev.sam.defaults)": [[7, "reV.SAM.defaults.AbstractDefaultFromConfigFile"]], "config_file_name (abstractdefaultfromconfigfile property)": [[7, "reV.SAM.defaults.AbstractDefaultFromConfigFile.CONFIG_FILE_NAME"]], "pysam_module (abstractdefaultfromconfigfile property)": [[7, "reV.SAM.defaults.AbstractDefaultFromConfigFile.PYSAM_MODULE"]], "init_default_pysam_obj() (abstractdefaultfromconfigfile class method)": [[7, "reV.SAM.defaults.AbstractDefaultFromConfigFile.init_default_pysam_obj"]], "defaultgeothermal (class in rev.sam.defaults)": [[8, "reV.SAM.defaults.DefaultGeothermal"]], "pysam_module (defaultgeothermal attribute)": [[8, "reV.SAM.defaults.DefaultGeothermal.PYSAM_MODULE"]], "default() (defaultgeothermal static method)": [[8, "reV.SAM.defaults.DefaultGeothermal.default"]], "init_default_pysam_obj() (defaultgeothermal class method)": [[8, "reV.SAM.defaults.DefaultGeothermal.init_default_pysam_obj"]], "defaultlcoe (class in rev.sam.defaults)": [[9, "reV.SAM.defaults.DefaultLCOE"]], "default() (defaultlcoe static method)": [[9, "reV.SAM.defaults.DefaultLCOE.default"]], "defaultlinearfresneldsgiph (class in rev.sam.defaults)": [[10, "reV.SAM.defaults.DefaultLinearFresnelDsgIph"]], "default() (defaultlinearfresneldsgiph static method)": [[10, "reV.SAM.defaults.DefaultLinearFresnelDsgIph.default"]], "defaultmhkwave (class in rev.sam.defaults)": [[11, "reV.SAM.defaults.DefaultMhkWave"]], "default() (defaultmhkwave static method)": [[11, "reV.SAM.defaults.DefaultMhkWave.default"]], "defaultpvsamv1 (class in rev.sam.defaults)": [[12, "reV.SAM.defaults.DefaultPvSamv1"]], "default() (defaultpvsamv1 static method)": [[12, "reV.SAM.defaults.DefaultPvSamv1.default"]], "defaultpvwattsv5 (class in rev.sam.defaults)": [[13, "reV.SAM.defaults.DefaultPvWattsv5"]], "pysam_module (defaultpvwattsv5 attribute)": [[13, "reV.SAM.defaults.DefaultPvWattsv5.PYSAM_MODULE"]], "default() (defaultpvwattsv5 static method)": [[13, "reV.SAM.defaults.DefaultPvWattsv5.default"]], "init_default_pysam_obj() (defaultpvwattsv5 class method)": [[13, "reV.SAM.defaults.DefaultPvWattsv5.init_default_pysam_obj"]], "defaultpvwattsv8 (class in rev.sam.defaults)": [[14, "reV.SAM.defaults.DefaultPvWattsv8"]], "default() (defaultpvwattsv8 static method)": [[14, "reV.SAM.defaults.DefaultPvWattsv8.default"]], "defaultsingleowner (class in rev.sam.defaults)": [[15, "reV.SAM.defaults.DefaultSingleOwner"]], "default() (defaultsingleowner static method)": [[15, "reV.SAM.defaults.DefaultSingleOwner.default"]], "defaultswh (class in rev.sam.defaults)": [[16, "reV.SAM.defaults.DefaultSwh"]], "default() (defaultswh static method)": [[16, "reV.SAM.defaults.DefaultSwh.default"]], "defaulttcsmoltensalt (class in rev.sam.defaults)": [[17, "reV.SAM.defaults.DefaultTcsMoltenSalt"]], "default() (defaulttcsmoltensalt static method)": [[17, "reV.SAM.defaults.DefaultTcsMoltenSalt.default"]], "defaulttroughphysicalprocessheat (class in rev.sam.defaults)": [[18, "reV.SAM.defaults.DefaultTroughPhysicalProcessHeat"]], "default() (defaulttroughphysicalprocessheat static method)": [[18, "reV.SAM.defaults.DefaultTroughPhysicalProcessHeat.default"]], "defaultwindpower (class in rev.sam.defaults)": [[19, "reV.SAM.defaults.DefaultWindPower"]], "default() (defaultwindpower static method)": [[19, "reV.SAM.defaults.DefaultWindPower.default"]], "rev.sam.econ": [[20, "module-reV.SAM.econ"]], "economic (class in rev.sam.econ)": [[21, "reV.SAM.econ.Economic"]], "pysam (economic attribute)": [[21, "reV.SAM.econ.Economic.PYSAM"]], "assign_inputs() (economic method)": [[21, "reV.SAM.econ.Economic.assign_inputs"]], "attr_dict (economic property)": [[21, "reV.SAM.econ.Economic.attr_dict"]], "collect_outputs() (economic method)": [[21, "reV.SAM.econ.Economic.collect_outputs"]], "default() (economic class method)": [[21, "reV.SAM.econ.Economic.default"]], "drop_leap() (economic static method)": [[21, "reV.SAM.econ.Economic.drop_leap"]], "ensure_res_len() (economic static method)": [[21, "reV.SAM.econ.Economic.ensure_res_len"]], "execute() (economic method)": [[21, "reV.SAM.econ.Economic.execute"]], "flip_actual_irr() (economic method)": [[21, "reV.SAM.econ.Economic.flip_actual_irr"]], "get_sam_res() (economic static method)": [[21, "reV.SAM.econ.Economic.get_sam_res"]], "get_time_interval() (economic class method)": [[21, "reV.SAM.econ.Economic.get_time_interval"]], "gross_revenue() (economic method)": [[21, "reV.SAM.econ.Economic.gross_revenue"]], "input_list (economic property)": [[21, "reV.SAM.econ.Economic.input_list"]], "lcoe_fcr() (economic method)": [[21, "reV.SAM.econ.Economic.lcoe_fcr"]], "lcoe_nom() (economic method)": [[21, "reV.SAM.econ.Economic.lcoe_nom"]], "lcoe_real() (economic method)": [[21, "reV.SAM.econ.Economic.lcoe_real"]], "make_datetime() (economic static method)": [[21, "reV.SAM.econ.Economic.make_datetime"]], "meta (economic property)": [[21, "reV.SAM.econ.Economic.meta"]], "module (economic property)": [[21, "reV.SAM.econ.Economic.module"]], "npv() (economic method)": [[21, "reV.SAM.econ.Economic.npv"]], "outputs_to_utc_arr() (economic method)": [[21, "reV.SAM.econ.Economic.outputs_to_utc_arr"]], "ppa_price() (economic method)": [[21, "reV.SAM.econ.Economic.ppa_price"]], "pysam (economic property)": [[21, "reV.SAM.econ.Economic.pysam"]], "rev_run() (economic class method)": [[21, "reV.SAM.econ.Economic.reV_run"]], "site (economic property)": [[21, "reV.SAM.econ.Economic.site"]], "lcoe (class in rev.sam.econ)": [[22, "reV.SAM.econ.LCOE"]], "pysam (lcoe attribute)": [[22, "reV.SAM.econ.LCOE.PYSAM"]], "assign_inputs() (lcoe method)": [[22, "reV.SAM.econ.LCOE.assign_inputs"]], "attr_dict (lcoe property)": [[22, "reV.SAM.econ.LCOE.attr_dict"]], "collect_outputs() (lcoe method)": [[22, "reV.SAM.econ.LCOE.collect_outputs"]], "default() (lcoe static method)": [[22, "reV.SAM.econ.LCOE.default"]], "drop_leap() (lcoe static method)": [[22, "reV.SAM.econ.LCOE.drop_leap"]], "ensure_res_len() (lcoe static method)": [[22, "reV.SAM.econ.LCOE.ensure_res_len"]], "execute() (lcoe method)": [[22, "reV.SAM.econ.LCOE.execute"]], "flip_actual_irr() (lcoe method)": [[22, "reV.SAM.econ.LCOE.flip_actual_irr"]], "get_sam_res() (lcoe static method)": [[22, "reV.SAM.econ.LCOE.get_sam_res"]], "get_time_interval() (lcoe class method)": [[22, "reV.SAM.econ.LCOE.get_time_interval"]], "gross_revenue() (lcoe method)": [[22, "reV.SAM.econ.LCOE.gross_revenue"]], "input_list (lcoe property)": [[22, "reV.SAM.econ.LCOE.input_list"]], "lcoe_fcr() (lcoe method)": [[22, "reV.SAM.econ.LCOE.lcoe_fcr"]], "lcoe_nom() (lcoe method)": [[22, "reV.SAM.econ.LCOE.lcoe_nom"]], "lcoe_real() (lcoe method)": [[22, "reV.SAM.econ.LCOE.lcoe_real"]], "make_datetime() (lcoe static method)": [[22, "reV.SAM.econ.LCOE.make_datetime"]], "meta (lcoe property)": [[22, "reV.SAM.econ.LCOE.meta"]], "module (lcoe property)": [[22, "reV.SAM.econ.LCOE.module"]], "npv() (lcoe method)": [[22, "reV.SAM.econ.LCOE.npv"]], "outputs_to_utc_arr() (lcoe method)": [[22, "reV.SAM.econ.LCOE.outputs_to_utc_arr"]], "ppa_price() (lcoe method)": [[22, "reV.SAM.econ.LCOE.ppa_price"]], "pysam (lcoe property)": [[22, "reV.SAM.econ.LCOE.pysam"]], "rev_run() (lcoe class method)": [[22, "reV.SAM.econ.LCOE.reV_run"]], "site (lcoe property)": [[22, "reV.SAM.econ.LCOE.site"]], "pysam (singleowner attribute)": [[23, "reV.SAM.econ.SingleOwner.PYSAM"]], "singleowner (class in rev.sam.econ)": [[23, "reV.SAM.econ.SingleOwner"]], "assign_inputs() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.assign_inputs"]], "attr_dict (singleowner property)": [[23, "reV.SAM.econ.SingleOwner.attr_dict"]], "collect_outputs() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.collect_outputs"]], "default() (singleowner static method)": [[23, "reV.SAM.econ.SingleOwner.default"]], "drop_leap() (singleowner static method)": [[23, "reV.SAM.econ.SingleOwner.drop_leap"]], "ensure_res_len() (singleowner static method)": [[23, "reV.SAM.econ.SingleOwner.ensure_res_len"]], "execute() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.execute"]], "flip_actual_irr() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.flip_actual_irr"]], "get_sam_res() (singleowner static method)": [[23, "reV.SAM.econ.SingleOwner.get_sam_res"]], "get_time_interval() (singleowner class method)": [[23, "reV.SAM.econ.SingleOwner.get_time_interval"]], "gross_revenue() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.gross_revenue"]], "input_list (singleowner property)": [[23, "reV.SAM.econ.SingleOwner.input_list"]], "lcoe_fcr() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.lcoe_fcr"]], "lcoe_nom() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.lcoe_nom"]], "lcoe_real() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.lcoe_real"]], "make_datetime() (singleowner static method)": [[23, "reV.SAM.econ.SingleOwner.make_datetime"]], "meta (singleowner property)": [[23, "reV.SAM.econ.SingleOwner.meta"]], "module (singleowner property)": [[23, "reV.SAM.econ.SingleOwner.module"]], "npv() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.npv"]], "outputs_to_utc_arr() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.outputs_to_utc_arr"]], "ppa_price() (singleowner method)": [[23, "reV.SAM.econ.SingleOwner.ppa_price"]], "pysam (singleowner property)": [[23, "reV.SAM.econ.SingleOwner.pysam"]], "rev_run() (singleowner class method)": [[23, "reV.SAM.econ.SingleOwner.reV_run"]], "site (singleowner property)": [[23, "reV.SAM.econ.SingleOwner.site"]], "rev.sam.generation": [[24, "module-reV.SAM.generation"]], "abstractsamgeneration (class in rev.sam.generation)": [[25, "reV.SAM.generation.AbstractSamGeneration"]], "outage_config_key (abstractsamgeneration attribute)": [[25, "reV.SAM.generation.AbstractSamGeneration.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (abstractsamgeneration attribute)": [[25, "reV.SAM.generation.AbstractSamGeneration.OUTAGE_SEED_CONFIG_KEY"]], "pysam (abstractsamgeneration attribute)": [[25, "reV.SAM.generation.AbstractSamGeneration.PYSAM"]], "add_scheduled_losses() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.add_scheduled_losses"]], "annual_energy() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.annual_energy"]], "assign_inputs() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.assign_inputs"]], "attr_dict (abstractsamgeneration property)": [[25, "reV.SAM.generation.AbstractSamGeneration.attr_dict"]], "cf_mean() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.cf_mean"]], "cf_profile() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.cf_profile"]], "check_resource_data() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.check_resource_data"]], "collect_outputs() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.collect_outputs"]], "default() (abstractsamgeneration class method)": [[25, "reV.SAM.generation.AbstractSamGeneration.default"]], "drop_leap() (abstractsamgeneration static method)": [[25, "reV.SAM.generation.AbstractSamGeneration.drop_leap"]], "energy_yield() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.energy_yield"]], "ensure_res_len() (abstractsamgeneration static method)": [[25, "reV.SAM.generation.AbstractSamGeneration.ensure_res_len"]], "execute() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.execute"]], "gen_profile() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.gen_profile"]], "get_sam_res() (abstractsamgeneration static method)": [[25, "reV.SAM.generation.AbstractSamGeneration.get_sam_res"]], "get_time_interval() (abstractsamgeneration class method)": [[25, "reV.SAM.generation.AbstractSamGeneration.get_time_interval"]], "has_timezone (abstractsamgeneration property)": [[25, "reV.SAM.generation.AbstractSamGeneration.has_timezone"]], "input_list (abstractsamgeneration property)": [[25, "reV.SAM.generation.AbstractSamGeneration.input_list"]], "make_datetime() (abstractsamgeneration static method)": [[25, "reV.SAM.generation.AbstractSamGeneration.make_datetime"]], "meta (abstractsamgeneration property)": [[25, "reV.SAM.generation.AbstractSamGeneration.meta"]], "module (abstractsamgeneration property)": [[25, "reV.SAM.generation.AbstractSamGeneration.module"]], "outage_seed (abstractsamgeneration property)": [[25, "reV.SAM.generation.AbstractSamGeneration.outage_seed"]], "outputs_to_utc_arr() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.outputs_to_utc_arr"]], "pysam (abstractsamgeneration property)": [[25, "reV.SAM.generation.AbstractSamGeneration.pysam"]], "rev_run() (abstractsamgeneration class method)": [[25, "reV.SAM.generation.AbstractSamGeneration.reV_run"]], "run() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.run"]], "run_gen_and_econ() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.run_gen_and_econ"]], "set_resource_data() (abstractsamgeneration method)": [[25, "reV.SAM.generation.AbstractSamGeneration.set_resource_data"]], "site (abstractsamgeneration property)": [[25, "reV.SAM.generation.AbstractSamGeneration.site"]], "tz_elev_check() (abstractsamgeneration static method)": [[25, "reV.SAM.generation.AbstractSamGeneration.tz_elev_check"]], "abstractsamgenerationfromweatherfile (class in rev.sam.generation)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile"]], "outage_config_key (abstractsamgenerationfromweatherfile attribute)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (abstractsamgenerationfromweatherfile attribute)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.OUTAGE_SEED_CONFIG_KEY"]], "pysam (abstractsamgenerationfromweatherfile attribute)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.PYSAM"]], "pysam_weather_tag (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.PYSAM_WEATHER_TAG"]], "add_scheduled_losses() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.add_scheduled_losses"]], "annual_energy() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.annual_energy"]], "assign_inputs() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.assign_inputs"]], "attr_dict (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.attr_dict"]], "cf_mean() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.cf_mean"]], "cf_profile() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.cf_profile"]], "check_resource_data() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.check_resource_data"]], "collect_outputs() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.collect_outputs"]], "default() (abstractsamgenerationfromweatherfile class method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.default"]], "drop_leap() (abstractsamgenerationfromweatherfile static method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.drop_leap"]], "energy_yield() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.energy_yield"]], "ensure_res_len() (abstractsamgenerationfromweatherfile static method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.ensure_res_len"]], "execute() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.execute"]], "gen_profile() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.gen_profile"]], "get_sam_res() (abstractsamgenerationfromweatherfile static method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.get_sam_res"]], "get_time_interval() (abstractsamgenerationfromweatherfile class method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.get_time_interval"]], "has_timezone (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.has_timezone"]], "input_list (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.input_list"]], "make_datetime() (abstractsamgenerationfromweatherfile static method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.make_datetime"]], "meta (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.meta"]], "module (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.module"]], "outage_seed (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.outage_seed"]], "outputs_to_utc_arr() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.outputs_to_utc_arr"]], "pysam (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.pysam"]], "rev_run() (abstractsamgenerationfromweatherfile class method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.reV_run"]], "run() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.run"]], "run_gen_and_econ() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.run_gen_and_econ"]], "set_resource_data() (abstractsamgenerationfromweatherfile method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.set_resource_data"]], "site (abstractsamgenerationfromweatherfile property)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.site"]], "tz_elev_check() (abstractsamgenerationfromweatherfile static method)": [[26, "reV.SAM.generation.AbstractSamGenerationFromWeatherFile.tz_elev_check"]], "abstractsampv (class in rev.sam.generation)": [[27, "reV.SAM.generation.AbstractSamPv"]], "outage_config_key (abstractsampv attribute)": [[27, "reV.SAM.generation.AbstractSamPv.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (abstractsampv attribute)": [[27, "reV.SAM.generation.AbstractSamPv.OUTAGE_SEED_CONFIG_KEY"]], "pysam (abstractsampv attribute)": [[27, "reV.SAM.generation.AbstractSamPv.PYSAM"]], "ac() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.ac"]], "add_scheduled_losses() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.add_scheduled_losses"]], "agg_albedo() (abstractsampv static method)": [[27, "reV.SAM.generation.AbstractSamPv.agg_albedo"]], "annual_energy() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.annual_energy"]], "assign_inputs() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.assign_inputs"]], "attr_dict (abstractsampv property)": [[27, "reV.SAM.generation.AbstractSamPv.attr_dict"]], "cf_mean() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.cf_mean"]], "cf_mean_ac() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.cf_mean_ac"]], "cf_profile() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.cf_profile"]], "cf_profile_ac() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.cf_profile_ac"]], "check_resource_data() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.check_resource_data"]], "clipped_power() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.clipped_power"]], "collect_outputs() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.collect_outputs"]], "dc() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.dc"]], "default() (abstractsampv static method)": [[27, "reV.SAM.generation.AbstractSamPv.default"]], "drop_leap() (abstractsampv static method)": [[27, "reV.SAM.generation.AbstractSamPv.drop_leap"]], "energy_yield() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.energy_yield"]], "ensure_res_len() (abstractsampv static method)": [[27, "reV.SAM.generation.AbstractSamPv.ensure_res_len"]], "execute() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.execute"]], "gen_profile() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.gen_profile"]], "get_sam_res() (abstractsampv static method)": [[27, "reV.SAM.generation.AbstractSamPv.get_sam_res"]], "get_time_interval() (abstractsampv class method)": [[27, "reV.SAM.generation.AbstractSamPv.get_time_interval"]], "has_timezone (abstractsampv property)": [[27, "reV.SAM.generation.AbstractSamPv.has_timezone"]], "input_list (abstractsampv property)": [[27, "reV.SAM.generation.AbstractSamPv.input_list"]], "make_datetime() (abstractsampv static method)": [[27, "reV.SAM.generation.AbstractSamPv.make_datetime"]], "meta (abstractsampv property)": [[27, "reV.SAM.generation.AbstractSamPv.meta"]], "module (abstractsampv property)": [[27, "reV.SAM.generation.AbstractSamPv.module"]], "outage_seed (abstractsampv property)": [[27, "reV.SAM.generation.AbstractSamPv.outage_seed"]], "outputs_to_utc_arr() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.outputs_to_utc_arr"]], "pysam (abstractsampv property)": [[27, "reV.SAM.generation.AbstractSamPv.pysam"]], "rev_run() (abstractsampv class method)": [[27, "reV.SAM.generation.AbstractSamPv.reV_run"]], "run() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.run"]], "run_gen_and_econ() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.run_gen_and_econ"]], "set_latitude_tilt_az() (abstractsampv static method)": [[27, "reV.SAM.generation.AbstractSamPv.set_latitude_tilt_az"]], "set_resource_data() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.set_resource_data"]], "site (abstractsampv property)": [[27, "reV.SAM.generation.AbstractSamPv.site"]], "system_capacity_ac() (abstractsampv method)": [[27, "reV.SAM.generation.AbstractSamPv.system_capacity_ac"]], "tz_elev_check() (abstractsampv static method)": [[27, "reV.SAM.generation.AbstractSamPv.tz_elev_check"]], "abstractsamsolar (class in rev.sam.generation)": [[28, "reV.SAM.generation.AbstractSamSolar"]], "outage_config_key (abstractsamsolar attribute)": [[28, "reV.SAM.generation.AbstractSamSolar.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (abstractsamsolar attribute)": [[28, "reV.SAM.generation.AbstractSamSolar.OUTAGE_SEED_CONFIG_KEY"]], "pysam (abstractsamsolar attribute)": [[28, "reV.SAM.generation.AbstractSamSolar.PYSAM"]], "add_scheduled_losses() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.add_scheduled_losses"]], "agg_albedo() (abstractsamsolar static method)": [[28, "reV.SAM.generation.AbstractSamSolar.agg_albedo"]], "annual_energy() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.annual_energy"]], "assign_inputs() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.assign_inputs"]], "attr_dict (abstractsamsolar property)": [[28, "reV.SAM.generation.AbstractSamSolar.attr_dict"]], "cf_mean() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.cf_mean"]], "cf_profile() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.cf_profile"]], "check_resource_data() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.check_resource_data"]], "collect_outputs() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.collect_outputs"]], "default() (abstractsamsolar class method)": [[28, "reV.SAM.generation.AbstractSamSolar.default"]], "drop_leap() (abstractsamsolar static method)": [[28, "reV.SAM.generation.AbstractSamSolar.drop_leap"]], "energy_yield() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.energy_yield"]], "ensure_res_len() (abstractsamsolar static method)": [[28, "reV.SAM.generation.AbstractSamSolar.ensure_res_len"]], "execute() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.execute"]], "gen_profile() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.gen_profile"]], "get_sam_res() (abstractsamsolar static method)": [[28, "reV.SAM.generation.AbstractSamSolar.get_sam_res"]], "get_time_interval() (abstractsamsolar class method)": [[28, "reV.SAM.generation.AbstractSamSolar.get_time_interval"]], "has_timezone (abstractsamsolar property)": [[28, "reV.SAM.generation.AbstractSamSolar.has_timezone"]], "input_list (abstractsamsolar property)": [[28, "reV.SAM.generation.AbstractSamSolar.input_list"]], "make_datetime() (abstractsamsolar static method)": [[28, "reV.SAM.generation.AbstractSamSolar.make_datetime"]], "meta (abstractsamsolar property)": [[28, "reV.SAM.generation.AbstractSamSolar.meta"]], "module (abstractsamsolar property)": [[28, "reV.SAM.generation.AbstractSamSolar.module"]], "outage_seed (abstractsamsolar property)": [[28, "reV.SAM.generation.AbstractSamSolar.outage_seed"]], "outputs_to_utc_arr() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.outputs_to_utc_arr"]], "pysam (abstractsamsolar property)": [[28, "reV.SAM.generation.AbstractSamSolar.pysam"]], "rev_run() (abstractsamsolar class method)": [[28, "reV.SAM.generation.AbstractSamSolar.reV_run"]], "run() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.run"]], "run_gen_and_econ() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.run_gen_and_econ"]], "set_resource_data() (abstractsamsolar method)": [[28, "reV.SAM.generation.AbstractSamSolar.set_resource_data"]], "site (abstractsamsolar property)": [[28, "reV.SAM.generation.AbstractSamSolar.site"]], "tz_elev_check() (abstractsamsolar static method)": [[28, "reV.SAM.generation.AbstractSamSolar.tz_elev_check"]], "abstractsamwind (class in rev.sam.generation)": [[29, "reV.SAM.generation.AbstractSamWind"]], "outage_config_key (abstractsamwind attribute)": [[29, "reV.SAM.generation.AbstractSamWind.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (abstractsamwind attribute)": [[29, "reV.SAM.generation.AbstractSamWind.OUTAGE_SEED_CONFIG_KEY"]], "power_curve_config_key (abstractsamwind attribute)": [[29, "reV.SAM.generation.AbstractSamWind.POWER_CURVE_CONFIG_KEY"]], "pysam (abstractsamwind attribute)": [[29, "reV.SAM.generation.AbstractSamWind.PYSAM"]], "add_power_curve_losses() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.add_power_curve_losses"]], "add_scheduled_losses() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.add_scheduled_losses"]], "annual_energy() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.annual_energy"]], "assign_inputs() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.assign_inputs"]], "attr_dict (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.attr_dict"]], "cf_mean() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.cf_mean"]], "cf_profile() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.cf_profile"]], "check_resource_data() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.check_resource_data"]], "collect_outputs() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.collect_outputs"]], "default() (abstractsamwind class method)": [[29, "reV.SAM.generation.AbstractSamWind.default"]], "drop_leap() (abstractsamwind static method)": [[29, "reV.SAM.generation.AbstractSamWind.drop_leap"]], "energy_yield() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.energy_yield"]], "ensure_res_len() (abstractsamwind static method)": [[29, "reV.SAM.generation.AbstractSamWind.ensure_res_len"]], "execute() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.execute"]], "gen_profile() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.gen_profile"]], "get_sam_res() (abstractsamwind static method)": [[29, "reV.SAM.generation.AbstractSamWind.get_sam_res"]], "get_time_interval() (abstractsamwind class method)": [[29, "reV.SAM.generation.AbstractSamWind.get_time_interval"]], "has_timezone (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.has_timezone"]], "input_list (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.input_list"]], "input_power_curve (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.input_power_curve"]], "make_datetime() (abstractsamwind static method)": [[29, "reV.SAM.generation.AbstractSamWind.make_datetime"]], "meta (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.meta"]], "module (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.module"]], "outage_seed (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.outage_seed"]], "outputs_to_utc_arr() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.outputs_to_utc_arr"]], "pysam (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.pysam"]], "rev_run() (abstractsamwind class method)": [[29, "reV.SAM.generation.AbstractSamWind.reV_run"]], "run() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.run"]], "run_gen_and_econ() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.run_gen_and_econ"]], "set_resource_data() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.set_resource_data"]], "site (abstractsamwind property)": [[29, "reV.SAM.generation.AbstractSamWind.site"]], "tz_elev_check() (abstractsamwind static method)": [[29, "reV.SAM.generation.AbstractSamWind.tz_elev_check"]], "wind_resource_from_input() (abstractsamwind method)": [[29, "reV.SAM.generation.AbstractSamWind.wind_resource_from_input"]], "geothermal (class in rev.sam.generation)": [[30, "reV.SAM.generation.Geothermal"]], "outage_config_key (geothermal attribute)": [[30, "reV.SAM.generation.Geothermal.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (geothermal attribute)": [[30, "reV.SAM.generation.Geothermal.OUTAGE_SEED_CONFIG_KEY"]], "pysam (geothermal attribute)": [[30, "reV.SAM.generation.Geothermal.PYSAM"]], "add_scheduled_losses() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.add_scheduled_losses"]], "annual_energy() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.annual_energy"]], "assign_inputs() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.assign_inputs"]], "attr_dict (geothermal property)": [[30, "reV.SAM.generation.Geothermal.attr_dict"]], "cf_mean() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.cf_mean"]], "cf_profile() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.cf_profile"]], "check_resource_data() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.check_resource_data"]], "collect_outputs() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.collect_outputs"]], "default() (geothermal static method)": [[30, "reV.SAM.generation.Geothermal.default"]], "drop_leap() (geothermal static method)": [[30, "reV.SAM.generation.Geothermal.drop_leap"]], "energy_yield() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.energy_yield"]], "ensure_res_len() (geothermal static method)": [[30, "reV.SAM.generation.Geothermal.ensure_res_len"]], "execute() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.execute"]], "gen_profile() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.gen_profile"]], "get_sam_res() (geothermal static method)": [[30, "reV.SAM.generation.Geothermal.get_sam_res"]], "get_time_interval() (geothermal class method)": [[30, "reV.SAM.generation.Geothermal.get_time_interval"]], "has_timezone (geothermal property)": [[30, "reV.SAM.generation.Geothermal.has_timezone"]], "input_list (geothermal property)": [[30, "reV.SAM.generation.Geothermal.input_list"]], "make_datetime() (geothermal static method)": [[30, "reV.SAM.generation.Geothermal.make_datetime"]], "meta (geothermal property)": [[30, "reV.SAM.generation.Geothermal.meta"]], "module (geothermal property)": [[30, "reV.SAM.generation.Geothermal.module"]], "outage_seed (geothermal property)": [[30, "reV.SAM.generation.Geothermal.outage_seed"]], "outputs_to_utc_arr() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.outputs_to_utc_arr"]], "pysam (geothermal property)": [[30, "reV.SAM.generation.Geothermal.pysam"]], "rev_run() (geothermal class method)": [[30, "reV.SAM.generation.Geothermal.reV_run"]], "run() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.run"]], "run_gen_and_econ() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.run_gen_and_econ"]], "set_resource_data() (geothermal method)": [[30, "reV.SAM.generation.Geothermal.set_resource_data"]], "site (geothermal property)": [[30, "reV.SAM.generation.Geothermal.site"]], "tz_elev_check() (geothermal static method)": [[30, "reV.SAM.generation.Geothermal.tz_elev_check"]], "lineardirectsteam (class in rev.sam.generation)": [[31, "reV.SAM.generation.LinearDirectSteam"]], "outage_config_key (lineardirectsteam attribute)": [[31, "reV.SAM.generation.LinearDirectSteam.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (lineardirectsteam attribute)": [[31, "reV.SAM.generation.LinearDirectSteam.OUTAGE_SEED_CONFIG_KEY"]], "pysam (lineardirectsteam attribute)": [[31, "reV.SAM.generation.LinearDirectSteam.PYSAM"]], "add_scheduled_losses() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.add_scheduled_losses"]], "annual_energy() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.annual_energy"]], "assign_inputs() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.assign_inputs"]], "attr_dict (lineardirectsteam property)": [[31, "reV.SAM.generation.LinearDirectSteam.attr_dict"]], "cf_mean() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.cf_mean"]], "cf_profile() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.cf_profile"]], "check_resource_data() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.check_resource_data"]], "collect_outputs() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.collect_outputs"]], "default() (lineardirectsteam static method)": [[31, "reV.SAM.generation.LinearDirectSteam.default"]], "drop_leap() (lineardirectsteam static method)": [[31, "reV.SAM.generation.LinearDirectSteam.drop_leap"]], "energy_yield() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.energy_yield"]], "ensure_res_len() (lineardirectsteam static method)": [[31, "reV.SAM.generation.LinearDirectSteam.ensure_res_len"]], "execute() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.execute"]], "gen_profile() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.gen_profile"]], "get_sam_res() (lineardirectsteam static method)": [[31, "reV.SAM.generation.LinearDirectSteam.get_sam_res"]], "get_time_interval() (lineardirectsteam class method)": [[31, "reV.SAM.generation.LinearDirectSteam.get_time_interval"]], "has_timezone (lineardirectsteam property)": [[31, "reV.SAM.generation.LinearDirectSteam.has_timezone"]], "input_list (lineardirectsteam property)": [[31, "reV.SAM.generation.LinearDirectSteam.input_list"]], "make_datetime() (lineardirectsteam static method)": [[31, "reV.SAM.generation.LinearDirectSteam.make_datetime"]], "meta (lineardirectsteam property)": [[31, "reV.SAM.generation.LinearDirectSteam.meta"]], "module (lineardirectsteam property)": [[31, "reV.SAM.generation.LinearDirectSteam.module"]], "outage_seed (lineardirectsteam property)": [[31, "reV.SAM.generation.LinearDirectSteam.outage_seed"]], "outputs_to_utc_arr() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.outputs_to_utc_arr"]], "pysam (lineardirectsteam property)": [[31, "reV.SAM.generation.LinearDirectSteam.pysam"]], "rev_run() (lineardirectsteam class method)": [[31, "reV.SAM.generation.LinearDirectSteam.reV_run"]], "run() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.run"]], "run_gen_and_econ() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.run_gen_and_econ"]], "set_resource_data() (lineardirectsteam method)": [[31, "reV.SAM.generation.LinearDirectSteam.set_resource_data"]], "site (lineardirectsteam property)": [[31, "reV.SAM.generation.LinearDirectSteam.site"]], "tz_elev_check() (lineardirectsteam static method)": [[31, "reV.SAM.generation.LinearDirectSteam.tz_elev_check"]], "mhkwave (class in rev.sam.generation)": [[32, "reV.SAM.generation.MhkWave"]], "outage_config_key (mhkwave attribute)": [[32, "reV.SAM.generation.MhkWave.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (mhkwave attribute)": [[32, "reV.SAM.generation.MhkWave.OUTAGE_SEED_CONFIG_KEY"]], "pysam (mhkwave attribute)": [[32, "reV.SAM.generation.MhkWave.PYSAM"]], "add_scheduled_losses() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.add_scheduled_losses"]], "annual_energy() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.annual_energy"]], "assign_inputs() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.assign_inputs"]], "attr_dict (mhkwave property)": [[32, "reV.SAM.generation.MhkWave.attr_dict"]], "cf_mean() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.cf_mean"]], "cf_profile() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.cf_profile"]], "check_resource_data() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.check_resource_data"]], "collect_outputs() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.collect_outputs"]], "default() (mhkwave static method)": [[32, "reV.SAM.generation.MhkWave.default"]], "drop_leap() (mhkwave static method)": [[32, "reV.SAM.generation.MhkWave.drop_leap"]], "energy_yield() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.energy_yield"]], "ensure_res_len() (mhkwave static method)": [[32, "reV.SAM.generation.MhkWave.ensure_res_len"]], "execute() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.execute"]], "gen_profile() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.gen_profile"]], "get_sam_res() (mhkwave static method)": [[32, "reV.SAM.generation.MhkWave.get_sam_res"]], "get_time_interval() (mhkwave class method)": [[32, "reV.SAM.generation.MhkWave.get_time_interval"]], "has_timezone (mhkwave property)": [[32, "reV.SAM.generation.MhkWave.has_timezone"]], "input_list (mhkwave property)": [[32, "reV.SAM.generation.MhkWave.input_list"]], "make_datetime() (mhkwave static method)": [[32, "reV.SAM.generation.MhkWave.make_datetime"]], "meta (mhkwave property)": [[32, "reV.SAM.generation.MhkWave.meta"]], "module (mhkwave property)": [[32, "reV.SAM.generation.MhkWave.module"]], "outage_seed (mhkwave property)": [[32, "reV.SAM.generation.MhkWave.outage_seed"]], "outputs_to_utc_arr() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.outputs_to_utc_arr"]], "pysam (mhkwave property)": [[32, "reV.SAM.generation.MhkWave.pysam"]], "rev_run() (mhkwave class method)": [[32, "reV.SAM.generation.MhkWave.reV_run"]], "run() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.run"]], "run_gen_and_econ() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.run_gen_and_econ"]], "set_resource_data() (mhkwave method)": [[32, "reV.SAM.generation.MhkWave.set_resource_data"]], "site (mhkwave property)": [[32, "reV.SAM.generation.MhkWave.site"]], "tz_elev_check() (mhkwave static method)": [[32, "reV.SAM.generation.MhkWave.tz_elev_check"]], "outage_config_key (pvsamv1 attribute)": [[33, "reV.SAM.generation.PvSamv1.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (pvsamv1 attribute)": [[33, "reV.SAM.generation.PvSamv1.OUTAGE_SEED_CONFIG_KEY"]], "pysam (pvsamv1 attribute)": [[33, "reV.SAM.generation.PvSamv1.PYSAM"]], "pvsamv1 (class in rev.sam.generation)": [[33, "reV.SAM.generation.PvSamv1"]], "ac() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.ac"]], "add_scheduled_losses() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.add_scheduled_losses"]], "agg_albedo() (pvsamv1 static method)": [[33, "reV.SAM.generation.PvSamv1.agg_albedo"]], "annual_energy() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.annual_energy"]], "assign_inputs() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.assign_inputs"]], "attr_dict (pvsamv1 property)": [[33, "reV.SAM.generation.PvSamv1.attr_dict"]], "cf_mean() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.cf_mean"]], "cf_mean_ac() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.cf_mean_ac"]], "cf_profile() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.cf_profile"]], "cf_profile_ac() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.cf_profile_ac"]], "check_resource_data() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.check_resource_data"]], "clipped_power() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.clipped_power"]], "collect_outputs() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.collect_outputs"]], "dc() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.dc"]], "default() (pvsamv1 static method)": [[33, "reV.SAM.generation.PvSamv1.default"]], "drop_leap() (pvsamv1 static method)": [[33, "reV.SAM.generation.PvSamv1.drop_leap"]], "energy_yield() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.energy_yield"]], "ensure_res_len() (pvsamv1 static method)": [[33, "reV.SAM.generation.PvSamv1.ensure_res_len"]], "execute() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.execute"]], "gen_profile() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.gen_profile"]], "get_sam_res() (pvsamv1 static method)": [[33, "reV.SAM.generation.PvSamv1.get_sam_res"]], "get_time_interval() (pvsamv1 class method)": [[33, "reV.SAM.generation.PvSamv1.get_time_interval"]], "has_timezone (pvsamv1 property)": [[33, "reV.SAM.generation.PvSamv1.has_timezone"]], "input_list (pvsamv1 property)": [[33, "reV.SAM.generation.PvSamv1.input_list"]], "make_datetime() (pvsamv1 static method)": [[33, "reV.SAM.generation.PvSamv1.make_datetime"]], "meta (pvsamv1 property)": [[33, "reV.SAM.generation.PvSamv1.meta"]], "module (pvsamv1 property)": [[33, "reV.SAM.generation.PvSamv1.module"]], "outage_seed (pvsamv1 property)": [[33, "reV.SAM.generation.PvSamv1.outage_seed"]], "outputs_to_utc_arr() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.outputs_to_utc_arr"]], "pysam (pvsamv1 property)": [[33, "reV.SAM.generation.PvSamv1.pysam"]], "rev_run() (pvsamv1 class method)": [[33, "reV.SAM.generation.PvSamv1.reV_run"]], "run() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.run"]], "run_gen_and_econ() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.run_gen_and_econ"]], "set_latitude_tilt_az() (pvsamv1 static method)": [[33, "reV.SAM.generation.PvSamv1.set_latitude_tilt_az"]], "set_resource_data() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.set_resource_data"]], "site (pvsamv1 property)": [[33, "reV.SAM.generation.PvSamv1.site"]], "system_capacity_ac() (pvsamv1 method)": [[33, "reV.SAM.generation.PvSamv1.system_capacity_ac"]], "tz_elev_check() (pvsamv1 static method)": [[33, "reV.SAM.generation.PvSamv1.tz_elev_check"]], "outage_config_key (pvwattsv5 attribute)": [[34, "reV.SAM.generation.PvWattsv5.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (pvwattsv5 attribute)": [[34, "reV.SAM.generation.PvWattsv5.OUTAGE_SEED_CONFIG_KEY"]], "pysam (pvwattsv5 attribute)": [[34, "reV.SAM.generation.PvWattsv5.PYSAM"]], "pvwattsv5 (class in rev.sam.generation)": [[34, "reV.SAM.generation.PvWattsv5"]], "ac() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.ac"]], "add_scheduled_losses() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.add_scheduled_losses"]], "agg_albedo() (pvwattsv5 static method)": [[34, "reV.SAM.generation.PvWattsv5.agg_albedo"]], "annual_energy() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.annual_energy"]], "assign_inputs() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.assign_inputs"]], "attr_dict (pvwattsv5 property)": [[34, "reV.SAM.generation.PvWattsv5.attr_dict"]], "cf_mean() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.cf_mean"]], "cf_mean_ac() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.cf_mean_ac"]], "cf_profile() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.cf_profile"]], "cf_profile_ac() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.cf_profile_ac"]], "check_resource_data() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.check_resource_data"]], "clipped_power() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.clipped_power"]], "collect_outputs() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.collect_outputs"]], "dc() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.dc"]], "default() (pvwattsv5 static method)": [[34, "reV.SAM.generation.PvWattsv5.default"]], "drop_leap() (pvwattsv5 static method)": [[34, "reV.SAM.generation.PvWattsv5.drop_leap"]], "energy_yield() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.energy_yield"]], "ensure_res_len() (pvwattsv5 static method)": [[34, "reV.SAM.generation.PvWattsv5.ensure_res_len"]], "execute() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.execute"]], "gen_profile() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.gen_profile"]], "get_sam_res() (pvwattsv5 static method)": [[34, "reV.SAM.generation.PvWattsv5.get_sam_res"]], "get_time_interval() (pvwattsv5 class method)": [[34, "reV.SAM.generation.PvWattsv5.get_time_interval"]], "has_timezone (pvwattsv5 property)": [[34, "reV.SAM.generation.PvWattsv5.has_timezone"]], "input_list (pvwattsv5 property)": [[34, "reV.SAM.generation.PvWattsv5.input_list"]], "make_datetime() (pvwattsv5 static method)": [[34, "reV.SAM.generation.PvWattsv5.make_datetime"]], "meta (pvwattsv5 property)": [[34, "reV.SAM.generation.PvWattsv5.meta"]], "module (pvwattsv5 property)": [[34, "reV.SAM.generation.PvWattsv5.module"]], "outage_seed (pvwattsv5 property)": [[34, "reV.SAM.generation.PvWattsv5.outage_seed"]], "outputs_to_utc_arr() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.outputs_to_utc_arr"]], "pysam (pvwattsv5 property)": [[34, "reV.SAM.generation.PvWattsv5.pysam"]], "rev_run() (pvwattsv5 class method)": [[34, "reV.SAM.generation.PvWattsv5.reV_run"]], "run() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.run"]], "run_gen_and_econ() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.run_gen_and_econ"]], "set_latitude_tilt_az() (pvwattsv5 static method)": [[34, "reV.SAM.generation.PvWattsv5.set_latitude_tilt_az"]], "set_resource_data() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.set_resource_data"]], "site (pvwattsv5 property)": [[34, "reV.SAM.generation.PvWattsv5.site"]], "system_capacity_ac() (pvwattsv5 method)": [[34, "reV.SAM.generation.PvWattsv5.system_capacity_ac"]], "tz_elev_check() (pvwattsv5 static method)": [[34, "reV.SAM.generation.PvWattsv5.tz_elev_check"]], "outage_config_key (pvwattsv7 attribute)": [[35, "reV.SAM.generation.PvWattsv7.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (pvwattsv7 attribute)": [[35, "reV.SAM.generation.PvWattsv7.OUTAGE_SEED_CONFIG_KEY"]], "pysam (pvwattsv7 attribute)": [[35, "reV.SAM.generation.PvWattsv7.PYSAM"]], "pvwattsv7 (class in rev.sam.generation)": [[35, "reV.SAM.generation.PvWattsv7"]], "ac() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.ac"]], "add_scheduled_losses() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.add_scheduled_losses"]], "agg_albedo() (pvwattsv7 static method)": [[35, "reV.SAM.generation.PvWattsv7.agg_albedo"]], "annual_energy() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.annual_energy"]], "assign_inputs() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.assign_inputs"]], "attr_dict (pvwattsv7 property)": [[35, "reV.SAM.generation.PvWattsv7.attr_dict"]], "cf_mean() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.cf_mean"]], "cf_mean_ac() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.cf_mean_ac"]], "cf_profile() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.cf_profile"]], "cf_profile_ac() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.cf_profile_ac"]], "check_resource_data() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.check_resource_data"]], "clipped_power() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.clipped_power"]], "collect_outputs() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.collect_outputs"]], "dc() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.dc"]], "default() (pvwattsv7 static method)": [[35, "reV.SAM.generation.PvWattsv7.default"]], "drop_leap() (pvwattsv7 static method)": [[35, "reV.SAM.generation.PvWattsv7.drop_leap"]], "energy_yield() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.energy_yield"]], "ensure_res_len() (pvwattsv7 static method)": [[35, "reV.SAM.generation.PvWattsv7.ensure_res_len"]], "execute() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.execute"]], "gen_profile() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.gen_profile"]], "get_sam_res() (pvwattsv7 static method)": [[35, "reV.SAM.generation.PvWattsv7.get_sam_res"]], "get_time_interval() (pvwattsv7 class method)": [[35, "reV.SAM.generation.PvWattsv7.get_time_interval"]], "has_timezone (pvwattsv7 property)": [[35, "reV.SAM.generation.PvWattsv7.has_timezone"]], "input_list (pvwattsv7 property)": [[35, "reV.SAM.generation.PvWattsv7.input_list"]], "make_datetime() (pvwattsv7 static method)": [[35, "reV.SAM.generation.PvWattsv7.make_datetime"]], "meta (pvwattsv7 property)": [[35, "reV.SAM.generation.PvWattsv7.meta"]], "module (pvwattsv7 property)": [[35, "reV.SAM.generation.PvWattsv7.module"]], "outage_seed (pvwattsv7 property)": [[35, "reV.SAM.generation.PvWattsv7.outage_seed"]], "outputs_to_utc_arr() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.outputs_to_utc_arr"]], "pysam (pvwattsv7 property)": [[35, "reV.SAM.generation.PvWattsv7.pysam"]], "rev_run() (pvwattsv7 class method)": [[35, "reV.SAM.generation.PvWattsv7.reV_run"]], "run() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.run"]], "run_gen_and_econ() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.run_gen_and_econ"]], "set_latitude_tilt_az() (pvwattsv7 static method)": [[35, "reV.SAM.generation.PvWattsv7.set_latitude_tilt_az"]], "set_resource_data() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.set_resource_data"]], "site (pvwattsv7 property)": [[35, "reV.SAM.generation.PvWattsv7.site"]], "system_capacity_ac() (pvwattsv7 method)": [[35, "reV.SAM.generation.PvWattsv7.system_capacity_ac"]], "tz_elev_check() (pvwattsv7 static method)": [[35, "reV.SAM.generation.PvWattsv7.tz_elev_check"]], "outage_config_key (pvwattsv8 attribute)": [[36, "reV.SAM.generation.PvWattsv8.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (pvwattsv8 attribute)": [[36, "reV.SAM.generation.PvWattsv8.OUTAGE_SEED_CONFIG_KEY"]], "pysam (pvwattsv8 attribute)": [[36, "reV.SAM.generation.PvWattsv8.PYSAM"]], "pvwattsv8 (class in rev.sam.generation)": [[36, "reV.SAM.generation.PvWattsv8"]], "ac() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.ac"]], "add_scheduled_losses() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.add_scheduled_losses"]], "agg_albedo() (pvwattsv8 static method)": [[36, "reV.SAM.generation.PvWattsv8.agg_albedo"]], "annual_energy() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.annual_energy"]], "assign_inputs() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.assign_inputs"]], "attr_dict (pvwattsv8 property)": [[36, "reV.SAM.generation.PvWattsv8.attr_dict"]], "cf_mean() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.cf_mean"]], "cf_mean_ac() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.cf_mean_ac"]], "cf_profile() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.cf_profile"]], "cf_profile_ac() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.cf_profile_ac"]], "check_resource_data() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.check_resource_data"]], "clipped_power() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.clipped_power"]], "collect_outputs() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.collect_outputs"]], "dc() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.dc"]], "default() (pvwattsv8 static method)": [[36, "reV.SAM.generation.PvWattsv8.default"]], "drop_leap() (pvwattsv8 static method)": [[36, "reV.SAM.generation.PvWattsv8.drop_leap"]], "energy_yield() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.energy_yield"]], "ensure_res_len() (pvwattsv8 static method)": [[36, "reV.SAM.generation.PvWattsv8.ensure_res_len"]], "execute() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.execute"]], "gen_profile() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.gen_profile"]], "get_sam_res() (pvwattsv8 static method)": [[36, "reV.SAM.generation.PvWattsv8.get_sam_res"]], "get_time_interval() (pvwattsv8 class method)": [[36, "reV.SAM.generation.PvWattsv8.get_time_interval"]], "has_timezone (pvwattsv8 property)": [[36, "reV.SAM.generation.PvWattsv8.has_timezone"]], "input_list (pvwattsv8 property)": [[36, "reV.SAM.generation.PvWattsv8.input_list"]], "make_datetime() (pvwattsv8 static method)": [[36, "reV.SAM.generation.PvWattsv8.make_datetime"]], "meta (pvwattsv8 property)": [[36, "reV.SAM.generation.PvWattsv8.meta"]], "module (pvwattsv8 property)": [[36, "reV.SAM.generation.PvWattsv8.module"]], "outage_seed (pvwattsv8 property)": [[36, "reV.SAM.generation.PvWattsv8.outage_seed"]], "outputs_to_utc_arr() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.outputs_to_utc_arr"]], "pysam (pvwattsv8 property)": [[36, "reV.SAM.generation.PvWattsv8.pysam"]], "rev_run() (pvwattsv8 class method)": [[36, "reV.SAM.generation.PvWattsv8.reV_run"]], "run() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.run"]], "run_gen_and_econ() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.run_gen_and_econ"]], "set_latitude_tilt_az() (pvwattsv8 static method)": [[36, "reV.SAM.generation.PvWattsv8.set_latitude_tilt_az"]], "set_resource_data() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.set_resource_data"]], "site (pvwattsv8 property)": [[36, "reV.SAM.generation.PvWattsv8.site"]], "system_capacity_ac() (pvwattsv8 method)": [[36, "reV.SAM.generation.PvWattsv8.system_capacity_ac"]], "tz_elev_check() (pvwattsv8 static method)": [[36, "reV.SAM.generation.PvWattsv8.tz_elev_check"]], "outage_config_key (solarwaterheat attribute)": [[37, "reV.SAM.generation.SolarWaterHeat.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (solarwaterheat attribute)": [[37, "reV.SAM.generation.SolarWaterHeat.OUTAGE_SEED_CONFIG_KEY"]], "pysam (solarwaterheat attribute)": [[37, "reV.SAM.generation.SolarWaterHeat.PYSAM"]], "solarwaterheat (class in rev.sam.generation)": [[37, "reV.SAM.generation.SolarWaterHeat"]], "add_scheduled_losses() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.add_scheduled_losses"]], "annual_energy() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.annual_energy"]], "assign_inputs() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.assign_inputs"]], "attr_dict (solarwaterheat property)": [[37, "reV.SAM.generation.SolarWaterHeat.attr_dict"]], "cf_mean() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.cf_mean"]], "cf_profile() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.cf_profile"]], "check_resource_data() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.check_resource_data"]], "collect_outputs() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.collect_outputs"]], "default() (solarwaterheat static method)": [[37, "reV.SAM.generation.SolarWaterHeat.default"]], "drop_leap() (solarwaterheat static method)": [[37, "reV.SAM.generation.SolarWaterHeat.drop_leap"]], "energy_yield() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.energy_yield"]], "ensure_res_len() (solarwaterheat static method)": [[37, "reV.SAM.generation.SolarWaterHeat.ensure_res_len"]], "execute() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.execute"]], "gen_profile() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.gen_profile"]], "get_sam_res() (solarwaterheat static method)": [[37, "reV.SAM.generation.SolarWaterHeat.get_sam_res"]], "get_time_interval() (solarwaterheat class method)": [[37, "reV.SAM.generation.SolarWaterHeat.get_time_interval"]], "has_timezone (solarwaterheat property)": [[37, "reV.SAM.generation.SolarWaterHeat.has_timezone"]], "input_list (solarwaterheat property)": [[37, "reV.SAM.generation.SolarWaterHeat.input_list"]], "make_datetime() (solarwaterheat static method)": [[37, "reV.SAM.generation.SolarWaterHeat.make_datetime"]], "meta (solarwaterheat property)": [[37, "reV.SAM.generation.SolarWaterHeat.meta"]], "module (solarwaterheat property)": [[37, "reV.SAM.generation.SolarWaterHeat.module"]], "outage_seed (solarwaterheat property)": [[37, "reV.SAM.generation.SolarWaterHeat.outage_seed"]], "outputs_to_utc_arr() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.outputs_to_utc_arr"]], "pysam (solarwaterheat property)": [[37, "reV.SAM.generation.SolarWaterHeat.pysam"]], "rev_run() (solarwaterheat class method)": [[37, "reV.SAM.generation.SolarWaterHeat.reV_run"]], "run() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.run"]], "run_gen_and_econ() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.run_gen_and_econ"]], "set_resource_data() (solarwaterheat method)": [[37, "reV.SAM.generation.SolarWaterHeat.set_resource_data"]], "site (solarwaterheat property)": [[37, "reV.SAM.generation.SolarWaterHeat.site"]], "tz_elev_check() (solarwaterheat static method)": [[37, "reV.SAM.generation.SolarWaterHeat.tz_elev_check"]], "outage_config_key (tcsmoltensalt attribute)": [[38, "reV.SAM.generation.TcsMoltenSalt.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (tcsmoltensalt attribute)": [[38, "reV.SAM.generation.TcsMoltenSalt.OUTAGE_SEED_CONFIG_KEY"]], "pysam (tcsmoltensalt attribute)": [[38, "reV.SAM.generation.TcsMoltenSalt.PYSAM"]], "tcsmoltensalt (class in rev.sam.generation)": [[38, "reV.SAM.generation.TcsMoltenSalt"]], "add_scheduled_losses() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.add_scheduled_losses"]], "agg_albedo() (tcsmoltensalt static method)": [[38, "reV.SAM.generation.TcsMoltenSalt.agg_albedo"]], "annual_energy() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.annual_energy"]], "assign_inputs() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.assign_inputs"]], "attr_dict (tcsmoltensalt property)": [[38, "reV.SAM.generation.TcsMoltenSalt.attr_dict"]], "cf_mean() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.cf_mean"]], "cf_profile() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.cf_profile"]], "check_resource_data() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.check_resource_data"]], "collect_outputs() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.collect_outputs"]], "default() (tcsmoltensalt static method)": [[38, "reV.SAM.generation.TcsMoltenSalt.default"]], "drop_leap() (tcsmoltensalt static method)": [[38, "reV.SAM.generation.TcsMoltenSalt.drop_leap"]], "energy_yield() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.energy_yield"]], "ensure_res_len() (tcsmoltensalt static method)": [[38, "reV.SAM.generation.TcsMoltenSalt.ensure_res_len"]], "execute() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.execute"]], "gen_profile() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.gen_profile"]], "get_sam_res() (tcsmoltensalt static method)": [[38, "reV.SAM.generation.TcsMoltenSalt.get_sam_res"]], "get_time_interval() (tcsmoltensalt class method)": [[38, "reV.SAM.generation.TcsMoltenSalt.get_time_interval"]], "has_timezone (tcsmoltensalt property)": [[38, "reV.SAM.generation.TcsMoltenSalt.has_timezone"]], "input_list (tcsmoltensalt property)": [[38, "reV.SAM.generation.TcsMoltenSalt.input_list"]], "make_datetime() (tcsmoltensalt static method)": [[38, "reV.SAM.generation.TcsMoltenSalt.make_datetime"]], "meta (tcsmoltensalt property)": [[38, "reV.SAM.generation.TcsMoltenSalt.meta"]], "module (tcsmoltensalt property)": [[38, "reV.SAM.generation.TcsMoltenSalt.module"]], "outage_seed (tcsmoltensalt property)": [[38, "reV.SAM.generation.TcsMoltenSalt.outage_seed"]], "outputs_to_utc_arr() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.outputs_to_utc_arr"]], "pysam (tcsmoltensalt property)": [[38, "reV.SAM.generation.TcsMoltenSalt.pysam"]], "rev_run() (tcsmoltensalt class method)": [[38, "reV.SAM.generation.TcsMoltenSalt.reV_run"]], "run() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.run"]], "run_gen_and_econ() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.run_gen_and_econ"]], "set_resource_data() (tcsmoltensalt method)": [[38, "reV.SAM.generation.TcsMoltenSalt.set_resource_data"]], "site (tcsmoltensalt property)": [[38, "reV.SAM.generation.TcsMoltenSalt.site"]], "tz_elev_check() (tcsmoltensalt static method)": [[38, "reV.SAM.generation.TcsMoltenSalt.tz_elev_check"]], "outage_config_key (troughphysicalheat attribute)": [[39, "reV.SAM.generation.TroughPhysicalHeat.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (troughphysicalheat attribute)": [[39, "reV.SAM.generation.TroughPhysicalHeat.OUTAGE_SEED_CONFIG_KEY"]], "pysam (troughphysicalheat attribute)": [[39, "reV.SAM.generation.TroughPhysicalHeat.PYSAM"]], "troughphysicalheat (class in rev.sam.generation)": [[39, "reV.SAM.generation.TroughPhysicalHeat"]], "add_scheduled_losses() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.add_scheduled_losses"]], "annual_energy() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.annual_energy"]], "assign_inputs() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.assign_inputs"]], "attr_dict (troughphysicalheat property)": [[39, "reV.SAM.generation.TroughPhysicalHeat.attr_dict"]], "cf_mean() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.cf_mean"]], "cf_profile() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.cf_profile"]], "check_resource_data() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.check_resource_data"]], "collect_outputs() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.collect_outputs"]], "default() (troughphysicalheat static method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.default"]], "drop_leap() (troughphysicalheat static method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.drop_leap"]], "energy_yield() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.energy_yield"]], "ensure_res_len() (troughphysicalheat static method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.ensure_res_len"]], "execute() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.execute"]], "gen_profile() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.gen_profile"]], "get_sam_res() (troughphysicalheat static method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.get_sam_res"]], "get_time_interval() (troughphysicalheat class method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.get_time_interval"]], "has_timezone (troughphysicalheat property)": [[39, "reV.SAM.generation.TroughPhysicalHeat.has_timezone"]], "input_list (troughphysicalheat property)": [[39, "reV.SAM.generation.TroughPhysicalHeat.input_list"]], "make_datetime() (troughphysicalheat static method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.make_datetime"]], "meta (troughphysicalheat property)": [[39, "reV.SAM.generation.TroughPhysicalHeat.meta"]], "module (troughphysicalheat property)": [[39, "reV.SAM.generation.TroughPhysicalHeat.module"]], "outage_seed (troughphysicalheat property)": [[39, "reV.SAM.generation.TroughPhysicalHeat.outage_seed"]], "outputs_to_utc_arr() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.outputs_to_utc_arr"]], "pysam (troughphysicalheat property)": [[39, "reV.SAM.generation.TroughPhysicalHeat.pysam"]], "rev_run() (troughphysicalheat class method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.reV_run"]], "run() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.run"]], "run_gen_and_econ() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.run_gen_and_econ"]], "set_resource_data() (troughphysicalheat method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.set_resource_data"]], "site (troughphysicalheat property)": [[39, "reV.SAM.generation.TroughPhysicalHeat.site"]], "tz_elev_check() (troughphysicalheat static method)": [[39, "reV.SAM.generation.TroughPhysicalHeat.tz_elev_check"]], "outage_config_key (windpower attribute)": [[40, "reV.SAM.generation.WindPower.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (windpower attribute)": [[40, "reV.SAM.generation.WindPower.OUTAGE_SEED_CONFIG_KEY"]], "power_curve_config_key (windpower attribute)": [[40, "reV.SAM.generation.WindPower.POWER_CURVE_CONFIG_KEY"]], "pysam (windpower attribute)": [[40, "reV.SAM.generation.WindPower.PYSAM"]], "windpower (class in rev.sam.generation)": [[40, "reV.SAM.generation.WindPower"]], "add_power_curve_losses() (windpower method)": [[40, "reV.SAM.generation.WindPower.add_power_curve_losses"]], "add_scheduled_losses() (windpower method)": [[40, "reV.SAM.generation.WindPower.add_scheduled_losses"]], "annual_energy() (windpower method)": [[40, "reV.SAM.generation.WindPower.annual_energy"]], "assign_inputs() (windpower method)": [[40, "reV.SAM.generation.WindPower.assign_inputs"]], "attr_dict (windpower property)": [[40, "reV.SAM.generation.WindPower.attr_dict"]], "cf_mean() (windpower method)": [[40, "reV.SAM.generation.WindPower.cf_mean"]], "cf_profile() (windpower method)": [[40, "reV.SAM.generation.WindPower.cf_profile"]], "check_resource_data() (windpower method)": [[40, "reV.SAM.generation.WindPower.check_resource_data"]], "collect_outputs() (windpower method)": [[40, "reV.SAM.generation.WindPower.collect_outputs"]], "default() (windpower static method)": [[40, "reV.SAM.generation.WindPower.default"]], "drop_leap() (windpower static method)": [[40, "reV.SAM.generation.WindPower.drop_leap"]], "energy_yield() (windpower method)": [[40, "reV.SAM.generation.WindPower.energy_yield"]], "ensure_res_len() (windpower static method)": [[40, "reV.SAM.generation.WindPower.ensure_res_len"]], "execute() (windpower method)": [[40, "reV.SAM.generation.WindPower.execute"]], "gen_profile() (windpower method)": [[40, "reV.SAM.generation.WindPower.gen_profile"]], "get_sam_res() (windpower static method)": [[40, "reV.SAM.generation.WindPower.get_sam_res"]], "get_time_interval() (windpower class method)": [[40, "reV.SAM.generation.WindPower.get_time_interval"]], "has_timezone (windpower property)": [[40, "reV.SAM.generation.WindPower.has_timezone"]], "input_list (windpower property)": [[40, "reV.SAM.generation.WindPower.input_list"]], "input_power_curve (windpower property)": [[40, "reV.SAM.generation.WindPower.input_power_curve"]], "make_datetime() (windpower static method)": [[40, "reV.SAM.generation.WindPower.make_datetime"]], "meta (windpower property)": [[40, "reV.SAM.generation.WindPower.meta"]], "module (windpower property)": [[40, "reV.SAM.generation.WindPower.module"]], "outage_seed (windpower property)": [[40, "reV.SAM.generation.WindPower.outage_seed"]], "outputs_to_utc_arr() (windpower method)": [[40, "reV.SAM.generation.WindPower.outputs_to_utc_arr"]], "pysam (windpower property)": [[40, "reV.SAM.generation.WindPower.pysam"]], "rev_run() (windpower class method)": [[40, "reV.SAM.generation.WindPower.reV_run"]], "run() (windpower method)": [[40, "reV.SAM.generation.WindPower.run"]], "run_gen_and_econ() (windpower method)": [[40, "reV.SAM.generation.WindPower.run_gen_and_econ"]], "set_resource_data() (windpower method)": [[40, "reV.SAM.generation.WindPower.set_resource_data"]], "site (windpower property)": [[40, "reV.SAM.generation.WindPower.site"]], "tz_elev_check() (windpower static method)": [[40, "reV.SAM.generation.WindPower.tz_elev_check"]], "wind_resource_from_input() (windpower method)": [[40, "reV.SAM.generation.WindPower.wind_resource_from_input"]], "outage_config_key (windpowerpd attribute)": [[41, "reV.SAM.generation.WindPowerPD.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (windpowerpd attribute)": [[41, "reV.SAM.generation.WindPowerPD.OUTAGE_SEED_CONFIG_KEY"]], "power_curve_config_key (windpowerpd attribute)": [[41, "reV.SAM.generation.WindPowerPD.POWER_CURVE_CONFIG_KEY"]], "pysam (windpowerpd attribute)": [[41, "reV.SAM.generation.WindPowerPD.PYSAM"]], "windpowerpd (class in rev.sam.generation)": [[41, "reV.SAM.generation.WindPowerPD"]], "add_power_curve_losses() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.add_power_curve_losses"]], "add_scheduled_losses() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.add_scheduled_losses"]], "annual_energy() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.annual_energy"]], "assign_inputs() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.assign_inputs"]], "attr_dict (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.attr_dict"]], "cf_mean() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.cf_mean"]], "cf_profile() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.cf_profile"]], "check_resource_data() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.check_resource_data"]], "collect_outputs() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.collect_outputs"]], "default() (windpowerpd class method)": [[41, "reV.SAM.generation.WindPowerPD.default"]], "drop_leap() (windpowerpd static method)": [[41, "reV.SAM.generation.WindPowerPD.drop_leap"]], "energy_yield() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.energy_yield"]], "ensure_res_len() (windpowerpd static method)": [[41, "reV.SAM.generation.WindPowerPD.ensure_res_len"]], "execute() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.execute"]], "gen_profile() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.gen_profile"]], "get_sam_res() (windpowerpd static method)": [[41, "reV.SAM.generation.WindPowerPD.get_sam_res"]], "get_time_interval() (windpowerpd class method)": [[41, "reV.SAM.generation.WindPowerPD.get_time_interval"]], "has_timezone (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.has_timezone"]], "input_list (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.input_list"]], "input_power_curve (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.input_power_curve"]], "make_datetime() (windpowerpd static method)": [[41, "reV.SAM.generation.WindPowerPD.make_datetime"]], "meta (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.meta"]], "module (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.module"]], "outage_seed (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.outage_seed"]], "outputs_to_utc_arr() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.outputs_to_utc_arr"]], "pysam (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.pysam"]], "rev_run() (windpowerpd class method)": [[41, "reV.SAM.generation.WindPowerPD.reV_run"]], "run() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.run"]], "run_gen_and_econ() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.run_gen_and_econ"]], "set_resource_data() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.set_resource_data"]], "site (windpowerpd property)": [[41, "reV.SAM.generation.WindPowerPD.site"]], "tz_elev_check() (windpowerpd static method)": [[41, "reV.SAM.generation.WindPowerPD.tz_elev_check"]], "wind_resource_from_input() (windpowerpd method)": [[41, "reV.SAM.generation.WindPowerPD.wind_resource_from_input"]], "rev.sam.version_checker": [[42, "module-reV.SAM.version_checker"]], "pysamversionchecker (class in rev.sam.version_checker)": [[43, "reV.SAM.version_checker.PySamVersionChecker"]], "pysam_version (pysamversionchecker property)": [[43, "reV.SAM.version_checker.PySamVersionChecker.pysam_version"]], "run() (pysamversionchecker class method)": [[43, "reV.SAM.version_checker.PySamVersionChecker.run"]], "rev.sam.windbos": [[44, "module-reV.SAM.windbos"]], "windbos (class in rev.sam.windbos)": [[45, "reV.SAM.windbos.WindBos"]], "bos_cost (windbos property)": [[45, "reV.SAM.windbos.WindBos.bos_cost"]], "hub_height (windbos property)": [[45, "reV.SAM.windbos.WindBos.hub_height"]], "machine_rating (windbos property)": [[45, "reV.SAM.windbos.WindBos.machine_rating"]], "number_of_turbines (windbos property)": [[45, "reV.SAM.windbos.WindBos.number_of_turbines"]], "output (windbos property)": [[45, "reV.SAM.windbos.WindBos.output"]], "rev_run() (windbos class method)": [[45, "reV.SAM.windbos.WindBos.reV_run"]], "rotor_diameter (windbos property)": [[45, "reV.SAM.windbos.WindBos.rotor_diameter"]], "sales_tax_cost (windbos property)": [[45, "reV.SAM.windbos.WindBos.sales_tax_cost"]], "sales_tax_mult (windbos property)": [[45, "reV.SAM.windbos.WindBos.sales_tax_mult"]], "total_installed_cost (windbos property)": [[45, "reV.SAM.windbos.WindBos.total_installed_cost"]], "turbine_capital_cost (windbos property)": [[45, "reV.SAM.windbos.WindBos.turbine_capital_cost"]], "turbine_cost (windbos property)": [[45, "reV.SAM.windbos.WindBos.turbine_cost"]], "rev.bespoke": [[46, "module-reV.bespoke"]], "rev.bespoke.bespoke": [[47, "module-reV.bespoke.bespoke"]], "bespokemultiplantdata (class in rev.bespoke.bespoke)": [[48, "reV.bespoke.bespoke.BespokeMultiPlantData"]], "get_preloaded_data_for_gid() (bespokemultiplantdata method)": [[48, "reV.bespoke.bespoke.BespokeMultiPlantData.get_preloaded_data_for_gid"]], "bespokesingleplant (class in rev.bespoke.bespoke)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant"]], "agg_data_layers() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.agg_data_layers"]], "annual_time_indexes (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.annual_time_indexes"]], "check_dependencies() (bespokesingleplant class method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.check_dependencies"]], "close() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.close"]], "get_lcoe_kwargs() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.get_lcoe_kwargs"]], "get_weighted_res_dir() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.get_weighted_res_dir"]], "get_weighted_res_ts() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.get_weighted_res_ts"]], "get_wind_handler() (bespokesingleplant static method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.get_wind_handler"]], "gid (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.gid"]], "hub_height (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.hub_height"]], "include_mask (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.include_mask"]], "initialize_wind_plant_ts() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.initialize_wind_plant_ts"]], "meta (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.meta"]], "original_sam_sys_inputs (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.original_sam_sys_inputs"]], "outputs (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.outputs"]], "pixel_side_length (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.pixel_side_length"]], "plant_optimizer (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.plant_optimizer"]], "recalc_lcoe() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.recalc_lcoe"]], "res_df (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.res_df"]], "run() (bespokesingleplant class method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.run"]], "run_plant_optimization() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.run_plant_optimization"]], "run_wind_plant_ts() (bespokesingleplant method)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.run_wind_plant_ts"]], "sam_sys_inputs (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.sam_sys_inputs"]], "sc_point (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.sc_point"]], "wind_dist (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.wind_dist"]], "wind_plant_pd (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.wind_plant_pd"]], "wind_plant_ts (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.wind_plant_ts"]], "years (bespokesingleplant property)": [[49, "reV.bespoke.bespoke.BespokeSinglePlant.years"]], "bespokesingleplantdata (class in rev.bespoke.bespoke)": [[50, "reV.bespoke.bespoke.BespokeSinglePlantData"]], "bespokewindplants (class in rev.bespoke.bespoke)": [[51, "reV.bespoke.bespoke.BespokeWindPlants"]], "completed_gids (bespokewindplants property)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.completed_gids"]], "gids (bespokewindplants property)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.gids"]], "meta (bespokewindplants property)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.meta"]], "outputs (bespokewindplants property)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.outputs"]], "run() (bespokewindplants method)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.run"]], "run_parallel() (bespokewindplants method)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.run_parallel"]], "run_serial() (bespokewindplants class method)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.run_serial"]], "sam_sys_inputs_with_site_data() (bespokewindplants method)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.sam_sys_inputs_with_site_data"]], "save_outputs() (bespokewindplants method)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.save_outputs"]], "shape (bespokewindplants property)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.shape"]], "slice_lookup (bespokewindplants property)": [[51, "reV.bespoke.bespoke.BespokeWindPlants.slice_lookup"]], "rev.bespoke.cli_bespoke": [[52, "module-reV.bespoke.cli_bespoke"]], "rev.bespoke.gradient_free": [[53, "module-reV.bespoke.gradient_free"]], "geneticalgorithm (class in rev.bespoke.gradient_free)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm"]], "chromosome_2_variables() (geneticalgorithm method)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm.chromosome_2_variables"]], "crossover() (geneticalgorithm method)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm.crossover"]], "initialize_bits() (geneticalgorithm method)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm.initialize_bits"]], "initialize_design_variables() (geneticalgorithm method)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm.initialize_design_variables"]], "initialize_fitness() (geneticalgorithm method)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm.initialize_fitness"]], "initialize_population() (geneticalgorithm method)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm.initialize_population"]], "mutate() (geneticalgorithm method)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm.mutate"]], "optimize_ga() (geneticalgorithm method)": [[54, "reV.bespoke.gradient_free.GeneticAlgorithm.optimize_ga"]], "rev.bespoke.pack_turbs": [[55, "module-reV.bespoke.pack_turbs"]], "packturbines (class in rev.bespoke.pack_turbs)": [[56, "reV.bespoke.pack_turbs.PackTurbines"]], "clear() (packturbines method)": [[56, "reV.bespoke.pack_turbs.PackTurbines.clear"]], "pack_turbines_poly() (packturbines method)": [[56, "reV.bespoke.pack_turbs.PackTurbines.pack_turbines_poly"]], "smallest_area_with_tiebreakers() (in module rev.bespoke.pack_turbs)": [[57, "reV.bespoke.pack_turbs.smallest_area_with_tiebreakers"]], "rev.bespoke.place_turbines": [[58, "module-reV.bespoke.place_turbines"]], "placeturbines (class in rev.bespoke.place_turbines)": [[59, "reV.bespoke.place_turbines.PlaceTurbines"]], "aep (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.aep"]], "area (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.area"]], "capacity (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.capacity"]], "capacity_density (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.capacity_density"]], "capital_cost (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.capital_cost"]], "capital_cost_per_kw() (placeturbines method)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.capital_cost_per_kw"]], "convex_hull (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.convex_hull"]], "convex_hull_area (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.convex_hull_area"]], "convex_hull_capacity_density (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.convex_hull_capacity_density"]], "define_exclusions() (placeturbines method)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.define_exclusions"]], "fixed_charge_rate (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.fixed_charge_rate"]], "fixed_operating_cost (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.fixed_operating_cost"]], "full_cell_area (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.full_cell_area"]], "full_cell_capacity_density (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.full_cell_capacity_density"]], "initialize_packing() (placeturbines method)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.initialize_packing"]], "nturbs (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.nturbs"]], "objective (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.objective"]], "optimization_objective() (placeturbines method)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.optimization_objective"]], "optimize() (placeturbines method)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.optimize"]], "place_turbines() (placeturbines method)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.place_turbines"]], "turbine_x (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.turbine_x"]], "turbine_y (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.turbine_y"]], "variable_operating_cost (placeturbines property)": [[59, "reV.bespoke.place_turbines.PlaceTurbines.variable_operating_cost"]], "none_until_optimized() (in module rev.bespoke.place_turbines)": [[60, "reV.bespoke.place_turbines.none_until_optimized"]], "rev.bespoke.plotting_functions": [[61, "module-reV.bespoke.plotting_functions"]], "get_xy() (in module rev.bespoke.plotting_functions)": [[62, "reV.bespoke.plotting_functions.get_xy"]], "plot_poly() (in module rev.bespoke.plotting_functions)": [[63, "reV.bespoke.plotting_functions.plot_poly"]], "plot_turbines() (in module rev.bespoke.plotting_functions)": [[64, "reV.bespoke.plotting_functions.plot_turbines"]], "plot_windrose() (in module rev.bespoke.plotting_functions)": [[65, "reV.bespoke.plotting_functions.plot_windrose"]], "rev.cli": [[66, "module-reV.cli"]], "rev.config": [[67, "module-reV.config"]], "rev.config.base_analysis_config": [[68, "module-reV.config.base_analysis_config"]], "analysisconfig (class in rev.config.base_analysis_config)": [[69, "reV.config.base_analysis_config.AnalysisConfig"]], "requirements (analysisconfig attribute)": [[69, "reV.config.base_analysis_config.AnalysisConfig.REQUIREMENTS"]], "str_rep (analysisconfig attribute)": [[69, "reV.config.base_analysis_config.AnalysisConfig.STR_REP"]], "analysis_years (analysisconfig property)": [[69, "reV.config.base_analysis_config.AnalysisConfig.analysis_years"]], "check_files() (analysisconfig static method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.check_files"]], "check_overwrite_keys() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.check_overwrite_keys"]], "clear() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.clear"]], "config_dir (analysisconfig property)": [[69, "reV.config.base_analysis_config.AnalysisConfig.config_dir"]], "config_keys (analysisconfig property)": [[69, "reV.config.base_analysis_config.AnalysisConfig.config_keys"]], "copy() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.copy"]], "execution_control (analysisconfig property)": [[69, "reV.config.base_analysis_config.AnalysisConfig.execution_control"]], "fromkeys() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.fromkeys"]], "get() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.get"]], "items() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.items"]], "keys() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.keys"]], "log_directory (analysisconfig property)": [[69, "reV.config.base_analysis_config.AnalysisConfig.log_directory"]], "log_level (analysisconfig property)": [[69, "reV.config.base_analysis_config.AnalysisConfig.log_level"]], "name (analysisconfig property)": [[69, "reV.config.base_analysis_config.AnalysisConfig.name"]], "pop() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.pop"]], "popitem() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.popitem"]], "resolve_path() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.resolve_path"]], "set_self_dict() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.set_self_dict"]], "setdefault() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.setdefault"]], "str_replace_and_resolve() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.str_replace_and_resolve"]], "update() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.update"]], "values() (analysisconfig method)": [[69, "reV.config.base_analysis_config.AnalysisConfig.values"]], "rev.config.base_config": [[70, "module-reV.config.base_config"]], "baseconfig (class in rev.config.base_config)": [[71, "reV.config.base_config.BaseConfig"]], "requirements (baseconfig attribute)": [[71, "reV.config.base_config.BaseConfig.REQUIREMENTS"]], "str_rep (baseconfig attribute)": [[71, "reV.config.base_config.BaseConfig.STR_REP"]], "check_files() (baseconfig static method)": [[71, "reV.config.base_config.BaseConfig.check_files"]], "check_overwrite_keys() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.check_overwrite_keys"]], "clear() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.clear"]], "config_dir (baseconfig property)": [[71, "reV.config.base_config.BaseConfig.config_dir"]], "config_keys (baseconfig property)": [[71, "reV.config.base_config.BaseConfig.config_keys"]], "copy() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.copy"]], "fromkeys() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.fromkeys"]], "get() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.get"]], "items() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.items"]], "keys() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.keys"]], "log_level (baseconfig property)": [[71, "reV.config.base_config.BaseConfig.log_level"]], "name (baseconfig property)": [[71, "reV.config.base_config.BaseConfig.name"]], "pop() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.pop"]], "popitem() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.popitem"]], "resolve_path() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.resolve_path"]], "set_self_dict() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.set_self_dict"]], "setdefault() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.setdefault"]], "str_replace_and_resolve() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.str_replace_and_resolve"]], "update() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.update"]], "values() (baseconfig method)": [[71, "reV.config.base_config.BaseConfig.values"]], "rev.config.cli_project_points": [[72, "module-reV.config.cli_project_points"]], "rev.config.curtailment": [[73, "module-reV.config.curtailment"]], "curtailment (class in rev.config.curtailment)": [[74, "reV.config.curtailment.Curtailment"]], "requirements (curtailment attribute)": [[74, "reV.config.curtailment.Curtailment.REQUIREMENTS"]], "str_rep (curtailment attribute)": [[74, "reV.config.curtailment.Curtailment.STR_REP"]], "check_files() (curtailment static method)": [[74, "reV.config.curtailment.Curtailment.check_files"]], "check_overwrite_keys() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.check_overwrite_keys"]], "clear() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.clear"]], "config_dir (curtailment property)": [[74, "reV.config.curtailment.Curtailment.config_dir"]], "config_keys (curtailment property)": [[74, "reV.config.curtailment.Curtailment.config_keys"]], "copy() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.copy"]], "date_range (curtailment property)": [[74, "reV.config.curtailment.Curtailment.date_range"]], "dawn_dusk (curtailment property)": [[74, "reV.config.curtailment.Curtailment.dawn_dusk"]], "equation (curtailment property)": [[74, "reV.config.curtailment.Curtailment.equation"]], "fromkeys() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.fromkeys"]], "get() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.get"]], "items() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.items"]], "keys() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.keys"]], "log_level (curtailment property)": [[74, "reV.config.curtailment.Curtailment.log_level"]], "months (curtailment property)": [[74, "reV.config.curtailment.Curtailment.months"]], "name (curtailment property)": [[74, "reV.config.curtailment.Curtailment.name"]], "pop() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.pop"]], "popitem() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.popitem"]], "precipitation (curtailment property)": [[74, "reV.config.curtailment.Curtailment.precipitation"]], "probability (curtailment property)": [[74, "reV.config.curtailment.Curtailment.probability"]], "random_seed (curtailment property)": [[74, "reV.config.curtailment.Curtailment.random_seed"]], "resolve_path() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.resolve_path"]], "set_self_dict() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.set_self_dict"]], "setdefault() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.setdefault"]], "str_replace_and_resolve() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.str_replace_and_resolve"]], "temperature (curtailment property)": [[74, "reV.config.curtailment.Curtailment.temperature"]], "update() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.update"]], "values() (curtailment method)": [[74, "reV.config.curtailment.Curtailment.values"]], "wind_speed (curtailment property)": [[74, "reV.config.curtailment.Curtailment.wind_speed"]], "rev.config.execution": [[75, "module-reV.config.execution"]], "baseexecutionconfig (class in rev.config.execution)": [[76, "reV.config.execution.BaseExecutionConfig"]], "requirements (baseexecutionconfig attribute)": [[76, "reV.config.execution.BaseExecutionConfig.REQUIREMENTS"]], "str_rep (baseexecutionconfig attribute)": [[76, "reV.config.execution.BaseExecutionConfig.STR_REP"]], "check_files() (baseexecutionconfig static method)": [[76, "reV.config.execution.BaseExecutionConfig.check_files"]], "check_overwrite_keys() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.check_overwrite_keys"]], "clear() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.clear"]], "config_dir (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.config_dir"]], "config_keys (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.config_keys"]], "copy() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.copy"]], "fromkeys() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.fromkeys"]], "get() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.get"]], "items() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.items"]], "keys() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.keys"]], "log_level (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.log_level"]], "max_workers (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.max_workers"]], "memory_utilization_limit (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.memory_utilization_limit"]], "name (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.name"]], "nodes (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.nodes"]], "option (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.option"]], "pop() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.pop"]], "popitem() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.popitem"]], "resolve_path() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.resolve_path"]], "set_self_dict() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.set_self_dict"]], "setdefault() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.setdefault"]], "sh_script (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.sh_script"]], "sites_per_worker (baseexecutionconfig property)": [[76, "reV.config.execution.BaseExecutionConfig.sites_per_worker"]], "str_replace_and_resolve() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.str_replace_and_resolve"]], "update() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.update"]], "values() (baseexecutionconfig method)": [[76, "reV.config.execution.BaseExecutionConfig.values"]], "hpcconfig (class in rev.config.execution)": [[77, "reV.config.execution.HPCConfig"]], "requirements (hpcconfig attribute)": [[77, "reV.config.execution.HPCConfig.REQUIREMENTS"]], "str_rep (hpcconfig attribute)": [[77, "reV.config.execution.HPCConfig.STR_REP"]], "allocation (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.allocation"]], "check_files() (hpcconfig static method)": [[77, "reV.config.execution.HPCConfig.check_files"]], "check_overwrite_keys() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.check_overwrite_keys"]], "clear() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.clear"]], "conda_env (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.conda_env"]], "config_dir (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.config_dir"]], "config_keys (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.config_keys"]], "copy() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.copy"]], "feature (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.feature"]], "fromkeys() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.fromkeys"]], "get() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.get"]], "items() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.items"]], "keys() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.keys"]], "log_level (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.log_level"]], "max_workers (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.max_workers"]], "memory_utilization_limit (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.memory_utilization_limit"]], "module (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.module"]], "name (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.name"]], "nodes (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.nodes"]], "option (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.option"]], "pop() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.pop"]], "popitem() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.popitem"]], "resolve_path() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.resolve_path"]], "set_self_dict() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.set_self_dict"]], "setdefault() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.setdefault"]], "sh_script (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.sh_script"]], "sites_per_worker (hpcconfig property)": [[77, "reV.config.execution.HPCConfig.sites_per_worker"]], "str_replace_and_resolve() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.str_replace_and_resolve"]], "update() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.update"]], "values() (hpcconfig method)": [[77, "reV.config.execution.HPCConfig.values"]], "requirements (slurmconfig attribute)": [[78, "reV.config.execution.SlurmConfig.REQUIREMENTS"]], "str_rep (slurmconfig attribute)": [[78, "reV.config.execution.SlurmConfig.STR_REP"]], "slurmconfig (class in rev.config.execution)": [[78, "reV.config.execution.SlurmConfig"]], "allocation (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.allocation"]], "check_files() (slurmconfig static method)": [[78, "reV.config.execution.SlurmConfig.check_files"]], "check_overwrite_keys() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.check_overwrite_keys"]], "clear() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.clear"]], "conda_env (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.conda_env"]], "config_dir (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.config_dir"]], "config_keys (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.config_keys"]], "copy() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.copy"]], "feature (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.feature"]], "fromkeys() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.fromkeys"]], "get() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.get"]], "items() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.items"]], "keys() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.keys"]], "log_level (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.log_level"]], "max_workers (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.max_workers"]], "memory (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.memory"]], "memory_utilization_limit (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.memory_utilization_limit"]], "module (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.module"]], "name (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.name"]], "nodes (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.nodes"]], "option (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.option"]], "pop() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.pop"]], "popitem() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.popitem"]], "resolve_path() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.resolve_path"]], "set_self_dict() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.set_self_dict"]], "setdefault() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.setdefault"]], "sh_script (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.sh_script"]], "sites_per_worker (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.sites_per_worker"]], "str_replace_and_resolve() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.str_replace_and_resolve"]], "update() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.update"]], "values() (slurmconfig method)": [[78, "reV.config.execution.SlurmConfig.values"]], "walltime (slurmconfig property)": [[78, "reV.config.execution.SlurmConfig.walltime"]], "rev.config.output_request": [[79, "module-reV.config.output_request"]], "outputrequest (class in rev.config.output_request)": [[80, "reV.config.output_request.OutputRequest"]], "__add__() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.__add__"]], "__mul__() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.__mul__"]], "append() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.append"]], "clear() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.clear"]], "copy() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.copy"]], "count() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.count"]], "extend() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.extend"]], "index() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.index"]], "insert() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.insert"]], "pop() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.pop"]], "remove() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.remove"]], "reverse() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.reverse"]], "sort() (outputrequest method)": [[80, "reV.config.output_request.OutputRequest.sort"]], "samoutputrequest (class in rev.config.output_request)": [[81, "reV.config.output_request.SAMOutputRequest"]], "__add__() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.__add__"]], "__mul__() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.__mul__"]], "append() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.append"]], "clear() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.clear"]], "copy() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.copy"]], "count() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.count"]], "extend() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.extend"]], "index() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.index"]], "insert() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.insert"]], "pop() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.pop"]], "remove() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.remove"]], "reverse() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.reverse"]], "sort() (samoutputrequest method)": [[81, "reV.config.output_request.SAMOutputRequest.sort"]], "rev.config.project_points": [[82, "module-reV.config.project_points"]], "n (pointscontrol property)": [[83, "reV.config.project_points.PointsControl.N"]], "pointscontrol (class in rev.config.project_points)": [[83, "reV.config.project_points.PointsControl"]], "project_points (pointscontrol property)": [[83, "reV.config.project_points.PointsControl.project_points"]], "sites (pointscontrol property)": [[83, "reV.config.project_points.PointsControl.sites"]], "sites_per_split (pointscontrol property)": [[83, "reV.config.project_points.PointsControl.sites_per_split"]], "split() (pointscontrol class method)": [[83, "reV.config.project_points.PointsControl.split"]], "split_range (pointscontrol property)": [[83, "reV.config.project_points.PointsControl.split_range"]], "projectpoints (class in rev.config.project_points)": [[84, "reV.config.project_points.ProjectPoints"]], "all_sam_input_keys (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.all_sam_input_keys"]], "curtailment (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.curtailment"]], "d (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.d"]], "df (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.df"]], "get_sites_from_config() (projectpoints method)": [[84, "reV.config.project_points.ProjectPoints.get_sites_from_config"]], "gids (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.gids"]], "h (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.h"]], "index() (projectpoints method)": [[84, "reV.config.project_points.ProjectPoints.index"]], "join_df() (projectpoints method)": [[84, "reV.config.project_points.ProjectPoints.join_df"]], "lat_lon_coords() (projectpoints class method)": [[84, "reV.config.project_points.ProjectPoints.lat_lon_coords"]], "regions() (projectpoints class method)": [[84, "reV.config.project_points.ProjectPoints.regions"]], "sam_config_ids (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.sam_config_ids"]], "sam_config_obj (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.sam_config_obj"]], "sam_inputs (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.sam_inputs"]], "sites (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.sites"]], "sites_as_slice (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.sites_as_slice"]], "split() (projectpoints class method)": [[84, "reV.config.project_points.ProjectPoints.split"]], "tech (projectpoints property)": [[84, "reV.config.project_points.ProjectPoints.tech"]], "rev.config.sam_config": [[85, "module-reV.config.sam_config"]], "requirements (samconfig attribute)": [[86, "reV.config.sam_config.SAMConfig.REQUIREMENTS"]], "samconfig (class in rev.config.sam_config)": [[86, "reV.config.sam_config.SAMConfig"]], "str_rep (samconfig attribute)": [[86, "reV.config.sam_config.SAMConfig.STR_REP"]], "bifacial (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.bifacial"]], "check_files() (samconfig static method)": [[86, "reV.config.sam_config.SAMConfig.check_files"]], "check_overwrite_keys() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.check_overwrite_keys"]], "clear() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.clear"]], "clearsky (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.clearsky"]], "config_dir (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.config_dir"]], "config_keys (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.config_keys"]], "copy() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.copy"]], "downscale (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.downscale"]], "fromkeys() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.fromkeys"]], "get() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.get"]], "icing (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.icing"]], "inputs (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.inputs"]], "items() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.items"]], "keys() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.keys"]], "log_level (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.log_level"]], "name (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.name"]], "pop() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.pop"]], "popitem() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.popitem"]], "resolve_path() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.resolve_path"]], "set_self_dict() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.set_self_dict"]], "setdefault() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.setdefault"]], "str_replace_and_resolve() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.str_replace_and_resolve"]], "time_index_step (samconfig property)": [[86, "reV.config.sam_config.SAMConfig.time_index_step"]], "update() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.update"]], "values() (samconfig method)": [[86, "reV.config.sam_config.SAMConfig.values"]], "saminputschecker (class in rev.config.sam_config)": [[87, "reV.config.sam_config.SAMInputsChecker"]], "check() (saminputschecker class method)": [[87, "reV.config.sam_config.SAMInputsChecker.check"]], "check_pv() (saminputschecker method)": [[87, "reV.config.sam_config.SAMInputsChecker.check_pv"]], "rev.econ": [[88, "module-reV.econ"]], "rev.econ.cli_econ": [[89, "module-reV.econ.cli_econ"]], "rev.econ.econ": [[90, "module-reV.econ.econ"]], "econ (class in rev.econ.econ)": [[91, "reV.econ.econ.Econ"]], "options (econ attribute)": [[91, "reV.econ.econ.Econ.OPTIONS"]], "add_site_data_to_pp() (econ method)": [[91, "reV.econ.econ.Econ.add_site_data_to_pp"]], "cf_file (econ property)": [[91, "reV.econ.econ.Econ.cf_file"]], "flush() (econ method)": [[91, "reV.econ.econ.Econ.flush"]], "get_pc() (econ class method)": [[91, "reV.econ.econ.Econ.get_pc"]], "get_sites_per_worker() (econ static method)": [[91, "reV.econ.econ.Econ.get_sites_per_worker"]], "handle_leap_ti() (econ static method)": [[91, "reV.econ.econ.Econ.handle_leap_ti"]], "meta (econ property)": [[91, "reV.econ.econ.Econ.meta"]], "out (econ property)": [[91, "reV.econ.econ.Econ.out"]], "out_chunk (econ property)": [[91, "reV.econ.econ.Econ.out_chunk"]], "output_request (econ property)": [[91, "reV.econ.econ.Econ.output_request"]], "points_control (econ property)": [[91, "reV.econ.econ.Econ.points_control"]], "project_points (econ property)": [[91, "reV.econ.econ.Econ.project_points"]], "run() (econ method)": [[91, "reV.econ.econ.Econ.run"]], "run_attrs (econ property)": [[91, "reV.econ.econ.Econ.run_attrs"]], "sam_configs (econ property)": [[91, "reV.econ.econ.Econ.sam_configs"]], "sam_metas (econ property)": [[91, "reV.econ.econ.Econ.sam_metas"]], "sam_module (econ property)": [[91, "reV.econ.econ.Econ.sam_module"]], "site_data (econ property)": [[91, "reV.econ.econ.Econ.site_data"]], "site_index() (econ method)": [[91, "reV.econ.econ.Econ.site_index"]], "site_limit (econ property)": [[91, "reV.econ.econ.Econ.site_limit"]], "site_mem (econ property)": [[91, "reV.econ.econ.Econ.site_mem"]], "tech (econ property)": [[91, "reV.econ.econ.Econ.tech"]], "time_index (econ property)": [[91, "reV.econ.econ.Econ.time_index"]], "unpack_futures() (econ static method)": [[91, "reV.econ.econ.Econ.unpack_futures"]], "unpack_output() (econ method)": [[91, "reV.econ.econ.Econ.unpack_output"]], "year (econ property)": [[91, "reV.econ.econ.Econ.year"]], "rev.econ.economies_of_scale": [[92, "module-reV.econ.economies_of_scale"]], "economiesofscale (class in rev.econ.economies_of_scale)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale"]], "aep (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.aep"]], "capital_cost_scalar (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.capital_cost_scalar"]], "fcr (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.fcr"]], "foc (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.foc"]], "is_method() (economiesofscale static method)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.is_method"]], "is_num() (economiesofscale static method)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.is_num"]], "raw_capital_cost (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.raw_capital_cost"]], "raw_lcoe (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.raw_lcoe"]], "scaled_capital_cost (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.scaled_capital_cost"]], "scaled_lcoe (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.scaled_lcoe"]], "system_capacity (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.system_capacity"]], "vars (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.vars"]], "voc (economiesofscale property)": [[93, "reV.econ.economies_of_scale.EconomiesOfScale.voc"]], "rev.econ.utilities": [[94, "module-reV.econ.utilities"]], "lcoe_fcr() (in module rev.econ.utilities)": [[95, "reV.econ.utilities.lcoe_fcr"]], "rev.generation": [[96, "module-reV.generation"]], "rev.generation.base": [[97, "module-reV.generation.base"]], "basegen (class in rev.generation.base)": [[98, "reV.generation.base.BaseGen"]], "add_site_data_to_pp() (basegen method)": [[98, "reV.generation.base.BaseGen.add_site_data_to_pp"]], "flush() (basegen method)": [[98, "reV.generation.base.BaseGen.flush"]], "get_pc() (basegen class method)": [[98, "reV.generation.base.BaseGen.get_pc"]], "get_sites_per_worker() (basegen static method)": [[98, "reV.generation.base.BaseGen.get_sites_per_worker"]], "handle_leap_ti() (basegen static method)": [[98, "reV.generation.base.BaseGen.handle_leap_ti"]], "meta (basegen property)": [[98, "reV.generation.base.BaseGen.meta"]], "out (basegen property)": [[98, "reV.generation.base.BaseGen.out"]], "out_chunk (basegen property)": [[98, "reV.generation.base.BaseGen.out_chunk"]], "output_request (basegen property)": [[98, "reV.generation.base.BaseGen.output_request"]], "points_control (basegen property)": [[98, "reV.generation.base.BaseGen.points_control"]], "project_points (basegen property)": [[98, "reV.generation.base.BaseGen.project_points"]], "run_attrs (basegen property)": [[98, "reV.generation.base.BaseGen.run_attrs"]], "sam_configs (basegen property)": [[98, "reV.generation.base.BaseGen.sam_configs"]], "sam_metas (basegen property)": [[98, "reV.generation.base.BaseGen.sam_metas"]], "sam_module (basegen property)": [[98, "reV.generation.base.BaseGen.sam_module"]], "site_data (basegen property)": [[98, "reV.generation.base.BaseGen.site_data"]], "site_index() (basegen method)": [[98, "reV.generation.base.BaseGen.site_index"]], "site_limit (basegen property)": [[98, "reV.generation.base.BaseGen.site_limit"]], "site_mem (basegen property)": [[98, "reV.generation.base.BaseGen.site_mem"]], "tech (basegen property)": [[98, "reV.generation.base.BaseGen.tech"]], "time_index (basegen property)": [[98, "reV.generation.base.BaseGen.time_index"]], "unpack_futures() (basegen static method)": [[98, "reV.generation.base.BaseGen.unpack_futures"]], "unpack_output() (basegen method)": [[98, "reV.generation.base.BaseGen.unpack_output"]], "year (basegen property)": [[98, "reV.generation.base.BaseGen.year"]], "rev.generation.cli_gen": [[99, "module-reV.generation.cli_gen"]], "rev.generation.generation": [[100, "module-reV.generation.generation"]], "gen (class in rev.generation.generation)": [[101, "reV.generation.generation.Gen"]], "options (gen attribute)": [[101, "reV.generation.generation.Gen.OPTIONS"]], "add_site_data_to_pp() (gen method)": [[101, "reV.generation.generation.Gen.add_site_data_to_pp"]], "flush() (gen method)": [[101, "reV.generation.generation.Gen.flush"]], "get_pc() (gen class method)": [[101, "reV.generation.generation.Gen.get_pc"]], "get_sites_per_worker() (gen static method)": [[101, "reV.generation.generation.Gen.get_sites_per_worker"]], "handle_leap_ti() (gen static method)": [[101, "reV.generation.generation.Gen.handle_leap_ti"]], "lr_res_file (gen property)": [[101, "reV.generation.generation.Gen.lr_res_file"]], "meta (gen property)": [[101, "reV.generation.generation.Gen.meta"]], "out (gen property)": [[101, "reV.generation.generation.Gen.out"]], "out_chunk (gen property)": [[101, "reV.generation.generation.Gen.out_chunk"]], "output_request (gen property)": [[101, "reV.generation.generation.Gen.output_request"]], "points_control (gen property)": [[101, "reV.generation.generation.Gen.points_control"]], "project_points (gen property)": [[101, "reV.generation.generation.Gen.project_points"]], "res_file (gen property)": [[101, "reV.generation.generation.Gen.res_file"]], "run() (gen method)": [[101, "reV.generation.generation.Gen.run"]], "run_attrs (gen property)": [[101, "reV.generation.generation.Gen.run_attrs"]], "sam_configs (gen property)": [[101, "reV.generation.generation.Gen.sam_configs"]], "sam_metas (gen property)": [[101, "reV.generation.generation.Gen.sam_metas"]], "sam_module (gen property)": [[101, "reV.generation.generation.Gen.sam_module"]], "site_data (gen property)": [[101, "reV.generation.generation.Gen.site_data"]], "site_index() (gen method)": [[101, "reV.generation.generation.Gen.site_index"]], "site_limit (gen property)": [[101, "reV.generation.generation.Gen.site_limit"]], "site_mem (gen property)": [[101, "reV.generation.generation.Gen.site_mem"]], "tech (gen property)": [[101, "reV.generation.generation.Gen.tech"]], "time_index (gen property)": [[101, "reV.generation.generation.Gen.time_index"]], "unpack_futures() (gen static method)": [[101, "reV.generation.generation.Gen.unpack_futures"]], "unpack_output() (gen method)": [[101, "reV.generation.generation.Gen.unpack_output"]], "year (gen property)": [[101, "reV.generation.generation.Gen.year"]], "rev.handlers": [[102, "module-reV.handlers"]], "rev.handlers.cli_collect": [[103, "module-reV.handlers.cli_collect"]], "rev.handlers.cli_multi_year": [[104, "module-reV.handlers.cli_multi_year"]], "rev.handlers.exclusions": [[105, "module-reV.handlers.exclusions"]], "exclusionlayers (class in rev.handlers.exclusions)": [[106, "reV.handlers.exclusions.ExclusionLayers"]], "chunks (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.chunks"]], "close() (exclusionlayers method)": [[106, "reV.handlers.exclusions.ExclusionLayers.close"]], "crs (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.crs"]], "get_layer_crs() (exclusionlayers method)": [[106, "reV.handlers.exclusions.ExclusionLayers.get_layer_crs"]], "get_layer_description() (exclusionlayers method)": [[106, "reV.handlers.exclusions.ExclusionLayers.get_layer_description"]], "get_layer_profile() (exclusionlayers method)": [[106, "reV.handlers.exclusions.ExclusionLayers.get_layer_profile"]], "get_layer_values() (exclusionlayers method)": [[106, "reV.handlers.exclusions.ExclusionLayers.get_layer_values"]], "get_nodata_value() (exclusionlayers method)": [[106, "reV.handlers.exclusions.ExclusionLayers.get_nodata_value"]], "h5 (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.h5"]], "iarr (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.iarr"]], "latitude (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.latitude"]], "layers (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.layers"]], "longitude (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.longitude"]], "pixel_area (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.pixel_area"]], "profile (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.profile"]], "shape (exclusionlayers property)": [[106, "reV.handlers.exclusions.ExclusionLayers.shape"]], "rev.handlers.multi_year": [[107, "module-reV.handlers.multi_year"]], "cv() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.CV"]], "multiyear (class in rev.handlers.multi_year)": [[108, "reV.handlers.multi_year.MultiYear"]], "sam_configs (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.SAM_configs"]], "add_dataset() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.add_dataset"]], "adders (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.adders"]], "attrs (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.attrs"]], "chunks (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.chunks"]], "close() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.close"]], "collect() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.collect"]], "collect_means() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.collect_means"]], "collect_profiles() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.collect_profiles"]], "coordinates (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.coordinates"]], "data_version (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.data_version"]], "datasets (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.datasets"]], "df_str_decode() (multiyear static method)": [[108, "reV.handlers.multi_year.MultiYear.df_str_decode"]], "dsets (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.dsets"]], "dtypes (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.dtypes"]], "full_version_record (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.full_version_record"]], "get_sam_df() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.get_SAM_df"]], "get_attrs() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.get_attrs"]], "get_config() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.get_config"]], "get_dset_properties() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.get_dset_properties"]], "get_meta_arr() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.get_meta_arr"]], "get_scale_factor() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.get_scale_factor"]], "get_units() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.get_units"]], "global_attrs (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.global_attrs"]], "groups (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.groups"]], "h5 (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.h5"]], "init_h5() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.init_h5"]], "is_profile() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.is_profile"]], "lat_lon (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.lat_lon"]], "means() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.means"]], "meta (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.meta"]], "open_dataset() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.open_dataset"]], "package (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.package"]], "parse_source_files_pattern() (multiyear static method)": [[108, "reV.handlers.multi_year.MultiYear.parse_source_files_pattern"]], "pass_through() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.pass_through"]], "preload_sam() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.preload_SAM"]], "res_dsets (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.res_dsets"]], "resource_datasets (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.resource_datasets"]], "run_attrs (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.run_attrs"]], "scale_factors (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.scale_factors"]], "set_configs() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.set_configs"]], "set_version_attr() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.set_version_attr"]], "shape (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.shape"]], "shapes (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.shapes"]], "source (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.source"]], "stdev() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.stdev"]], "time_index (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.time_index"]], "units (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.units"]], "update_dset() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.update_dset"]], "version (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.version"]], "writable (multiyear property)": [[108, "reV.handlers.multi_year.MultiYear.writable"]], "write_dataset() (multiyear method)": [[108, "reV.handlers.multi_year.MultiYear.write_dataset"]], "write_means() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.write_means"]], "write_profiles() (multiyear class method)": [[108, "reV.handlers.multi_year.MultiYear.write_profiles"]], "multiyeargroup (class in rev.handlers.multi_year)": [[109, "reV.handlers.multi_year.MultiYearGroup"]], "dsets (multiyeargroup property)": [[109, "reV.handlers.multi_year.MultiYearGroup.dsets"]], "name (multiyeargroup property)": [[109, "reV.handlers.multi_year.MultiYearGroup.name"]], "pass_through_dsets (multiyeargroup property)": [[109, "reV.handlers.multi_year.MultiYearGroup.pass_through_dsets"]], "source_files (multiyeargroup property)": [[109, "reV.handlers.multi_year.MultiYearGroup.source_files"]], "my_collect_groups() (in module rev.handlers.multi_year)": [[110, "reV.handlers.multi_year.my_collect_groups"]], "rev.handlers.outputs": [[111, "module-reV.handlers.outputs"]], "outputs (class in rev.handlers.outputs)": [[112, "reV.handlers.outputs.Outputs"]], "sam_configs (outputs property)": [[112, "reV.handlers.outputs.Outputs.SAM_configs"]], "add_dataset() (outputs class method)": [[112, "reV.handlers.outputs.Outputs.add_dataset"]], "adders (outputs property)": [[112, "reV.handlers.outputs.Outputs.adders"]], "attrs (outputs property)": [[112, "reV.handlers.outputs.Outputs.attrs"]], "chunks (outputs property)": [[112, "reV.handlers.outputs.Outputs.chunks"]], "close() (outputs method)": [[112, "reV.handlers.outputs.Outputs.close"]], "coordinates (outputs property)": [[112, "reV.handlers.outputs.Outputs.coordinates"]], "data_version (outputs property)": [[112, "reV.handlers.outputs.Outputs.data_version"]], "datasets (outputs property)": [[112, "reV.handlers.outputs.Outputs.datasets"]], "df_str_decode() (outputs static method)": [[112, "reV.handlers.outputs.Outputs.df_str_decode"]], "dsets (outputs property)": [[112, "reV.handlers.outputs.Outputs.dsets"]], "dtypes (outputs property)": [[112, "reV.handlers.outputs.Outputs.dtypes"]], "full_version_record (outputs property)": [[112, "reV.handlers.outputs.Outputs.full_version_record"]], "get_sam_df() (outputs method)": [[112, "reV.handlers.outputs.Outputs.get_SAM_df"]], "get_attrs() (outputs method)": [[112, "reV.handlers.outputs.Outputs.get_attrs"]], "get_config() (outputs method)": [[112, "reV.handlers.outputs.Outputs.get_config"]], "get_dset_properties() (outputs method)": [[112, "reV.handlers.outputs.Outputs.get_dset_properties"]], "get_meta_arr() (outputs method)": [[112, "reV.handlers.outputs.Outputs.get_meta_arr"]], "get_scale_factor() (outputs method)": [[112, "reV.handlers.outputs.Outputs.get_scale_factor"]], "get_units() (outputs method)": [[112, "reV.handlers.outputs.Outputs.get_units"]], "global_attrs (outputs property)": [[112, "reV.handlers.outputs.Outputs.global_attrs"]], "groups (outputs property)": [[112, "reV.handlers.outputs.Outputs.groups"]], "h5 (outputs property)": [[112, "reV.handlers.outputs.Outputs.h5"]], "init_h5() (outputs class method)": [[112, "reV.handlers.outputs.Outputs.init_h5"]], "lat_lon (outputs property)": [[112, "reV.handlers.outputs.Outputs.lat_lon"]], "meta (outputs property)": [[112, "reV.handlers.outputs.Outputs.meta"]], "open_dataset() (outputs method)": [[112, "reV.handlers.outputs.Outputs.open_dataset"]], "package (outputs property)": [[112, "reV.handlers.outputs.Outputs.package"]], "preload_sam() (outputs class method)": [[112, "reV.handlers.outputs.Outputs.preload_SAM"]], "res_dsets (outputs property)": [[112, "reV.handlers.outputs.Outputs.res_dsets"]], "resource_datasets (outputs property)": [[112, "reV.handlers.outputs.Outputs.resource_datasets"]], "run_attrs (outputs property)": [[112, "reV.handlers.outputs.Outputs.run_attrs"]], "scale_factors (outputs property)": [[112, "reV.handlers.outputs.Outputs.scale_factors"]], "set_configs() (outputs method)": [[112, "reV.handlers.outputs.Outputs.set_configs"]], "set_version_attr() (outputs method)": [[112, "reV.handlers.outputs.Outputs.set_version_attr"]], "shape (outputs property)": [[112, "reV.handlers.outputs.Outputs.shape"]], "shapes (outputs property)": [[112, "reV.handlers.outputs.Outputs.shapes"]], "source (outputs property)": [[112, "reV.handlers.outputs.Outputs.source"]], "time_index (outputs property)": [[112, "reV.handlers.outputs.Outputs.time_index"]], "units (outputs property)": [[112, "reV.handlers.outputs.Outputs.units"]], "update_dset() (outputs method)": [[112, "reV.handlers.outputs.Outputs.update_dset"]], "version (outputs property)": [[112, "reV.handlers.outputs.Outputs.version"]], "writable (outputs property)": [[112, "reV.handlers.outputs.Outputs.writable"]], "write_dataset() (outputs method)": [[112, "reV.handlers.outputs.Outputs.write_dataset"]], "write_means() (outputs class method)": [[112, "reV.handlers.outputs.Outputs.write_means"]], "write_profiles() (outputs class method)": [[112, "reV.handlers.outputs.Outputs.write_profiles"]], "rev.handlers.transmission": [[113, "module-reV.handlers.transmission"]], "transmissioncosts (class in rev.handlers.transmission)": [[114, "reV.handlers.transmission.TransmissionCosts"]], "available_capacity() (transmissioncosts method)": [[114, "reV.handlers.transmission.TransmissionCosts.available_capacity"]], "check_availability() (transmissioncosts method)": [[114, "reV.handlers.transmission.TransmissionCosts.check_availability"]], "check_feature_dependencies() (transmissioncosts method)": [[114, "reV.handlers.transmission.TransmissionCosts.check_feature_dependencies"]], "connect() (transmissioncosts method)": [[114, "reV.handlers.transmission.TransmissionCosts.connect"]], "cost() (transmissioncosts method)": [[114, "reV.handlers.transmission.TransmissionCosts.cost"]], "feature_capacity() (transmissioncosts class method)": [[114, "reV.handlers.transmission.TransmissionCosts.feature_capacity"]], "feature_costs() (transmissioncosts class method)": [[114, "reV.handlers.transmission.TransmissionCosts.feature_costs"]], "transmissionfeatures (class in rev.handlers.transmission)": [[115, "reV.handlers.transmission.TransmissionFeatures"]], "available_capacity() (transmissionfeatures method)": [[115, "reV.handlers.transmission.TransmissionFeatures.available_capacity"]], "check_availability() (transmissionfeatures method)": [[115, "reV.handlers.transmission.TransmissionFeatures.check_availability"]], "check_feature_dependencies() (transmissionfeatures method)": [[115, "reV.handlers.transmission.TransmissionFeatures.check_feature_dependencies"]], "connect() (transmissionfeatures method)": [[115, "reV.handlers.transmission.TransmissionFeatures.connect"]], "cost() (transmissionfeatures method)": [[115, "reV.handlers.transmission.TransmissionFeatures.cost"]], "feature_capacity() (transmissionfeatures class method)": [[115, "reV.handlers.transmission.TransmissionFeatures.feature_capacity"]], "rev.hybrids": [[116, "module-reV.hybrids"]], "rev.hybrids.cli_hybrids": [[117, "module-reV.hybrids.cli_hybrids"]], "rev.hybrids.hybrid_methods": [[118, "module-reV.hybrids.hybrid_methods"]], "aggregate_capacity() (in module rev.hybrids.hybrid_methods)": [[119, "reV.hybrids.hybrid_methods.aggregate_capacity"]], "aggregate_capacity_factor() (in module rev.hybrids.hybrid_methods)": [[120, "reV.hybrids.hybrid_methods.aggregate_capacity_factor"]], "aggregate_solar_capacity() (in module rev.hybrids.hybrid_methods)": [[121, "reV.hybrids.hybrid_methods.aggregate_solar_capacity"]], "aggregate_wind_capacity() (in module rev.hybrids.hybrid_methods)": [[122, "reV.hybrids.hybrid_methods.aggregate_wind_capacity"]], "rev.hybrids.hybrids": [[123, "module-reV.hybrids.hybrids"]], "colnameformatter (class in rev.hybrids.hybrids)": [[124, "reV.hybrids.hybrids.ColNameFormatter"]], "fmt() (colnameformatter class method)": [[124, "reV.hybrids.hybrids.ColNameFormatter.fmt"]], "hybridization (class in rev.hybrids.hybrids)": [[125, "reV.hybrids.hybrids.Hybridization"]], "hybrid_meta (hybridization property)": [[125, "reV.hybrids.hybrids.Hybridization.hybrid_meta"]], "hybrid_time_index (hybridization property)": [[125, "reV.hybrids.hybrids.Hybridization.hybrid_time_index"]], "profiles (hybridization property)": [[125, "reV.hybrids.hybrids.Hybridization.profiles"]], "run() (hybridization method)": [[125, "reV.hybrids.hybrids.Hybridization.run"]], "run_meta() (hybridization method)": [[125, "reV.hybrids.hybrids.Hybridization.run_meta"]], "run_profiles() (hybridization method)": [[125, "reV.hybrids.hybrids.Hybridization.run_profiles"]], "save_profiles() (hybridization method)": [[125, "reV.hybrids.hybrids.Hybridization.save_profiles"]], "solar_meta (hybridization property)": [[125, "reV.hybrids.hybrids.Hybridization.solar_meta"]], "solar_time_index (hybridization property)": [[125, "reV.hybrids.hybrids.Hybridization.solar_time_index"]], "wind_meta (hybridization property)": [[125, "reV.hybrids.hybrids.Hybridization.wind_meta"]], "wind_time_index (hybridization property)": [[125, "reV.hybrids.hybrids.Hybridization.wind_time_index"]], "hybridsdata (class in rev.hybrids.hybrids)": [[126, "reV.hybrids.hybrids.HybridsData"]], "contains_col() (hybridsdata method)": [[126, "reV.hybrids.hybrids.HybridsData.contains_col"]], "hybrid_time_index (hybridsdata property)": [[126, "reV.hybrids.hybrids.HybridsData.hybrid_time_index"]], "solar_meta (hybridsdata property)": [[126, "reV.hybrids.hybrids.HybridsData.solar_meta"]], "solar_time_index (hybridsdata property)": [[126, "reV.hybrids.hybrids.HybridsData.solar_time_index"]], "validate() (hybridsdata method)": [[126, "reV.hybrids.hybrids.HybridsData.validate"]], "wind_meta (hybridsdata property)": [[126, "reV.hybrids.hybrids.HybridsData.wind_meta"]], "wind_time_index (hybridsdata property)": [[126, "reV.hybrids.hybrids.HybridsData.wind_time_index"]], "metahybridizer (class in rev.hybrids.hybrids)": [[127, "reV.hybrids.hybrids.MetaHybridizer"]], "hybrid_meta (metahybridizer property)": [[127, "reV.hybrids.hybrids.MetaHybridizer.hybrid_meta"]], "hybridize() (metahybridizer method)": [[127, "reV.hybrids.hybrids.MetaHybridizer.hybridize"]], "solar_profile_indices_map (metahybridizer property)": [[127, "reV.hybrids.hybrids.MetaHybridizer.solar_profile_indices_map"]], "validate_input() (metahybridizer method)": [[127, "reV.hybrids.hybrids.MetaHybridizer.validate_input"]], "wind_profile_indices_map (metahybridizer property)": [[127, "reV.hybrids.hybrids.MetaHybridizer.wind_profile_indices_map"]], "ratiocolumns (class in rev.hybrids.hybrids)": [[128, "reV.hybrids.hybrids.RatioColumns"]], "__add__() (ratiocolumns method)": [[128, "reV.hybrids.hybrids.RatioColumns.__add__"]], "__mul__() (ratiocolumns method)": [[128, "reV.hybrids.hybrids.RatioColumns.__mul__"]], "count() (ratiocolumns method)": [[128, "reV.hybrids.hybrids.RatioColumns.count"]], "denom (ratiocolumns attribute)": [[128, "reV.hybrids.hybrids.RatioColumns.denom"]], "fixed (ratiocolumns attribute)": [[128, "reV.hybrids.hybrids.RatioColumns.fixed"]], "index() (ratiocolumns method)": [[128, "reV.hybrids.hybrids.RatioColumns.index"]], "num (ratiocolumns attribute)": [[128, "reV.hybrids.hybrids.RatioColumns.num"]], "rev.losses": [[129, "module-reV.losses"]], "rev.losses.power_curve": [[130, "module-reV.losses.power_curve"]], "abstractpowercurvetransformation (class in rev.losses.power_curve)": [[131, "reV.losses.power_curve.AbstractPowerCurveTransformation"]], "apply() (abstractpowercurvetransformation method)": [[131, "reV.losses.power_curve.AbstractPowerCurveTransformation.apply"]], "bounds (abstractpowercurvetransformation property)": [[131, "reV.losses.power_curve.AbstractPowerCurveTransformation.bounds"]], "optm_bounds (abstractpowercurvetransformation property)": [[131, "reV.losses.power_curve.AbstractPowerCurveTransformation.optm_bounds"]], "power_curve (abstractpowercurvetransformation attribute)": [[131, "reV.losses.power_curve.AbstractPowerCurveTransformation.power_curve"]], "exponentialstretching (class in rev.losses.power_curve)": [[132, "reV.losses.power_curve.ExponentialStretching"]], "apply() (exponentialstretching method)": [[132, "reV.losses.power_curve.ExponentialStretching.apply"]], "bounds (exponentialstretching property)": [[132, "reV.losses.power_curve.ExponentialStretching.bounds"]], "optm_bounds (exponentialstretching property)": [[132, "reV.losses.power_curve.ExponentialStretching.optm_bounds"]], "power_curve (exponentialstretching attribute)": [[132, "reV.losses.power_curve.ExponentialStretching.power_curve"]], "horizontaltranslation (class in rev.losses.power_curve)": [[133, "reV.losses.power_curve.HorizontalTranslation"]], "apply() (horizontaltranslation method)": [[133, "reV.losses.power_curve.HorizontalTranslation.apply"]], "bounds (horizontaltranslation property)": [[133, "reV.losses.power_curve.HorizontalTranslation.bounds"]], "optm_bounds (horizontaltranslation property)": [[133, "reV.losses.power_curve.HorizontalTranslation.optm_bounds"]], "power_curve (horizontaltranslation attribute)": [[133, "reV.losses.power_curve.HorizontalTranslation.power_curve"]], "linearstretching (class in rev.losses.power_curve)": [[134, "reV.losses.power_curve.LinearStretching"]], "apply() (linearstretching method)": [[134, "reV.losses.power_curve.LinearStretching.apply"]], "bounds (linearstretching property)": [[134, "reV.losses.power_curve.LinearStretching.bounds"]], "optm_bounds (linearstretching property)": [[134, "reV.losses.power_curve.LinearStretching.optm_bounds"]], "power_curve (linearstretching attribute)": [[134, "reV.losses.power_curve.LinearStretching.power_curve"]], "powercurve (class in rev.losses.power_curve)": [[135, "reV.losses.power_curve.PowerCurve"]], "__call__() (powercurve method)": [[135, "reV.losses.power_curve.PowerCurve.__call__"]], "cutin_wind_speed (powercurve property)": [[135, "reV.losses.power_curve.PowerCurve.cutin_wind_speed"]], "cutoff_wind_speed (powercurve property)": [[135, "reV.losses.power_curve.PowerCurve.cutoff_wind_speed"]], "generation (powercurve attribute)": [[135, "reV.losses.power_curve.PowerCurve.generation"]], "rated_power (powercurve property)": [[135, "reV.losses.power_curve.PowerCurve.rated_power"]], "wind_speed (powercurve attribute)": [[135, "reV.losses.power_curve.PowerCurve.wind_speed"]], "powercurvelosses (class in rev.losses.power_curve)": [[136, "reV.losses.power_curve.PowerCurveLosses"]], "annual_losses_with_transformed_power_curve() (powercurvelosses method)": [[136, "reV.losses.power_curve.PowerCurveLosses.annual_losses_with_transformed_power_curve"]], "fit() (powercurvelosses method)": [[136, "reV.losses.power_curve.PowerCurveLosses.fit"]], "power_curve (powercurvelosses attribute)": [[136, "reV.losses.power_curve.PowerCurveLosses.power_curve"]], "power_gen_no_losses (powercurvelosses property)": [[136, "reV.losses.power_curve.PowerCurveLosses.power_gen_no_losses"]], "weights (powercurvelosses attribute)": [[136, "reV.losses.power_curve.PowerCurveLosses.weights"]], "wind_resource (powercurvelosses attribute)": [[136, "reV.losses.power_curve.PowerCurveLosses.wind_resource"]], "powercurvelossesinput (class in rev.losses.power_curve)": [[137, "reV.losses.power_curve.PowerCurveLossesInput"]], "required_keys (powercurvelossesinput attribute)": [[137, "reV.losses.power_curve.PowerCurveLossesInput.REQUIRED_KEYS"]], "target (powercurvelossesinput property)": [[137, "reV.losses.power_curve.PowerCurveLossesInput.target"]], "transformation (powercurvelossesinput property)": [[137, "reV.losses.power_curve.PowerCurveLossesInput.transformation"]], "power_curve_config_key (powercurvelossesmixin attribute)": [[138, "reV.losses.power_curve.PowerCurveLossesMixin.POWER_CURVE_CONFIG_KEY"]], "powercurvelossesmixin (class in rev.losses.power_curve)": [[138, "reV.losses.power_curve.PowerCurveLossesMixin"]], "add_power_curve_losses() (powercurvelossesmixin method)": [[138, "reV.losses.power_curve.PowerCurveLossesMixin.add_power_curve_losses"]], "input_power_curve (powercurvelossesmixin property)": [[138, "reV.losses.power_curve.PowerCurveLossesMixin.input_power_curve"]], "wind_resource_from_input() (powercurvelossesmixin method)": [[138, "reV.losses.power_curve.PowerCurveLossesMixin.wind_resource_from_input"]], "powercurvewindresource (class in rev.losses.power_curve)": [[139, "reV.losses.power_curve.PowerCurveWindResource"]], "wind_resource_for_site() (powercurvewindresource method)": [[139, "reV.losses.power_curve.PowerCurveWindResource.wind_resource_for_site"]], "wind_speeds (powercurvewindresource property)": [[139, "reV.losses.power_curve.PowerCurveWindResource.wind_speeds"]], "transformations (in module rev.losses.power_curve)": [[140, "reV.losses.power_curve.TRANSFORMATIONS"]], "adjust_power_curve() (in module rev.losses.power_curve)": [[141, "reV.losses.power_curve.adjust_power_curve"]], "rev.losses.scheduled": [[142, "module-reV.losses.scheduled"]], "outage (class in rev.losses.scheduled)": [[143, "reV.losses.scheduled.Outage"]], "required_keys (outage attribute)": [[143, "reV.losses.scheduled.Outage.REQUIRED_KEYS"]], "allow_outage_overlap (outage property)": [[143, "reV.losses.scheduled.Outage.allow_outage_overlap"]], "allowed_months (outage property)": [[143, "reV.losses.scheduled.Outage.allowed_months"]], "count (outage property)": [[143, "reV.losses.scheduled.Outage.count"]], "duration (outage property)": [[143, "reV.losses.scheduled.Outage.duration"]], "name (outage property)": [[143, "reV.losses.scheduled.Outage.name"]], "percentage_of_capacity_lost (outage property)": [[143, "reV.losses.scheduled.Outage.percentage_of_capacity_lost"]], "total_available_hours (outage property)": [[143, "reV.losses.scheduled.Outage.total_available_hours"]], "outagescheduler (class in rev.losses.scheduled)": [[144, "reV.losses.scheduled.OutageScheduler"]], "calculate() (outagescheduler method)": [[144, "reV.losses.scheduled.OutageScheduler.calculate"]], "can_schedule_more (outagescheduler attribute)": [[144, "reV.losses.scheduled.OutageScheduler.can_schedule_more"]], "outages (outagescheduler attribute)": [[144, "reV.losses.scheduled.OutageScheduler.outages"]], "seed (outagescheduler attribute)": [[144, "reV.losses.scheduled.OutageScheduler.seed"]], "total_losses (outagescheduler attribute)": [[144, "reV.losses.scheduled.OutageScheduler.total_losses"]], "outage_config_key (scheduledlossesmixin attribute)": [[145, "reV.losses.scheduled.ScheduledLossesMixin.OUTAGE_CONFIG_KEY"]], "outage_seed_config_key (scheduledlossesmixin attribute)": [[145, "reV.losses.scheduled.ScheduledLossesMixin.OUTAGE_SEED_CONFIG_KEY"]], "scheduledlossesmixin (class in rev.losses.scheduled)": [[145, "reV.losses.scheduled.ScheduledLossesMixin"]], "add_scheduled_losses() (scheduledlossesmixin method)": [[145, "reV.losses.scheduled.ScheduledLossesMixin.add_scheduled_losses"]], "outage_seed (scheduledlossesmixin property)": [[145, "reV.losses.scheduled.ScheduledLossesMixin.outage_seed"]], "max_iter (singleoutagescheduler attribute)": [[146, "reV.losses.scheduled.SingleOutageScheduler.MAX_ITER"]], "singleoutagescheduler (class in rev.losses.scheduled)": [[146, "reV.losses.scheduled.SingleOutageScheduler"]], "calculate() (singleoutagescheduler method)": [[146, "reV.losses.scheduled.SingleOutageScheduler.calculate"]], "can_schedule_more (singleoutagescheduler attribute)": [[146, "reV.losses.scheduled.SingleOutageScheduler.can_schedule_more"]], "find_random_outage_slice() (singleoutagescheduler method)": [[146, "reV.losses.scheduled.SingleOutageScheduler.find_random_outage_slice"]], "outage (singleoutagescheduler attribute)": [[146, "reV.losses.scheduled.SingleOutageScheduler.outage"]], "schedule_losses() (singleoutagescheduler method)": [[146, "reV.losses.scheduled.SingleOutageScheduler.schedule_losses"]], "scheduler (singleoutagescheduler attribute)": [[146, "reV.losses.scheduled.SingleOutageScheduler.scheduler"]], "update_when_can_schedule() (singleoutagescheduler method)": [[146, "reV.losses.scheduled.SingleOutageScheduler.update_when_can_schedule"]], "update_when_can_schedule_from_months() (singleoutagescheduler method)": [[146, "reV.losses.scheduled.SingleOutageScheduler.update_when_can_schedule_from_months"]], "rev.losses.utils": [[147, "module-reV.losses.utils"]], "convert_to_full_month_names() (in module rev.losses.utils)": [[148, "reV.losses.utils.convert_to_full_month_names"]], "filter_unknown_month_names() (in module rev.losses.utils)": [[149, "reV.losses.utils.filter_unknown_month_names"]], "format_month_name() (in module rev.losses.utils)": [[150, "reV.losses.utils.format_month_name"]], "full_month_name_from_abbr() (in module rev.losses.utils)": [[151, "reV.losses.utils.full_month_name_from_abbr"]], "hourly_indices_for_months() (in module rev.losses.utils)": [[152, "reV.losses.utils.hourly_indices_for_months"]], "month_index() (in module rev.losses.utils)": [[153, "reV.losses.utils.month_index"]], "month_indices() (in module rev.losses.utils)": [[154, "reV.losses.utils.month_indices"]], "rev.nrwal": [[155, "module-reV.nrwal"]], "rev.nrwal.cli_nrwal": [[156, "module-reV.nrwal.cli_nrwal"]], "rev.nrwal.nrwal": [[157, "module-reV.nrwal.nrwal"]], "default_meta_cols (revnrwal attribute)": [[158, "reV.nrwal.nrwal.RevNrwal.DEFAULT_META_COLS"]], "revnrwal (class in rev.nrwal.nrwal)": [[158, "reV.nrwal.nrwal.RevNrwal"]], "analysis_gids (revnrwal property)": [[158, "reV.nrwal.nrwal.RevNrwal.analysis_gids"]], "analysis_mask (revnrwal property)": [[158, "reV.nrwal.nrwal.RevNrwal.analysis_mask"]], "check_outputs() (revnrwal method)": [[158, "reV.nrwal.nrwal.RevNrwal.check_outputs"]], "gen_dsets (revnrwal property)": [[158, "reV.nrwal.nrwal.RevNrwal.gen_dsets"]], "meta_out (revnrwal property)": [[158, "reV.nrwal.nrwal.RevNrwal.meta_out"]], "meta_source (revnrwal property)": [[158, "reV.nrwal.nrwal.RevNrwal.meta_source"]], "outputs (revnrwal property)": [[158, "reV.nrwal.nrwal.RevNrwal.outputs"]], "run() (revnrwal method)": [[158, "reV.nrwal.nrwal.RevNrwal.run"]], "run_nrwal() (revnrwal method)": [[158, "reV.nrwal.nrwal.RevNrwal.run_nrwal"]], "save_raw_dsets() (revnrwal method)": [[158, "reV.nrwal.nrwal.RevNrwal.save_raw_dsets"]], "time_index (revnrwal property)": [[158, "reV.nrwal.nrwal.RevNrwal.time_index"]], "write_meta_to_csv() (revnrwal method)": [[158, "reV.nrwal.nrwal.RevNrwal.write_meta_to_csv"]], "write_to_gen_fpath() (revnrwal method)": [[158, "reV.nrwal.nrwal.RevNrwal.write_to_gen_fpath"]], "rev.qa_qc": [[159, "module-reV.qa_qc"]], "rev.qa_qc.cli_qa_qc": [[160, "module-reV.qa_qc.cli_qa_qc"]], "cli_qa_qc() (in module rev.qa_qc.cli_qa_qc)": [[161, "reV.qa_qc.cli_qa_qc.cli_qa_qc"]], "rev.qa_qc.qa_qc": [[162, "module-reV.qa_qc.qa_qc"]], "qaqc (class in rev.qa_qc.qa_qc)": [[163, "reV.qa_qc.qa_qc.QaQc"]], "create_scatter_plots() (qaqc method)": [[163, "reV.qa_qc.qa_qc.QaQc.create_scatter_plots"]], "exclusions_mask() (qaqc class method)": [[163, "reV.qa_qc.qa_qc.QaQc.exclusions_mask"]], "h5() (qaqc class method)": [[163, "reV.qa_qc.qa_qc.QaQc.h5"]], "out_dir (qaqc property)": [[163, "reV.qa_qc.qa_qc.QaQc.out_dir"]], "supply_curve() (qaqc class method)": [[163, "reV.qa_qc.qa_qc.QaQc.supply_curve"]], "qaqcmodule (class in rev.qa_qc.qa_qc)": [[164, "reV.qa_qc.qa_qc.QaQcModule"]], "area_filter_kernel (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.area_filter_kernel"]], "cmap (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.cmap"]], "columns (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.columns"]], "dsets (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.dsets"]], "excl_dict (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.excl_dict"]], "excl_fpath (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.excl_fpath"]], "fpath (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.fpath"]], "group (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.group"]], "lcoe (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.lcoe"]], "min_area (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.min_area"]], "plot_step (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.plot_step"]], "plot_type (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.plot_type"]], "process_size (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.process_size"]], "sub_dir (qaqcmodule property)": [[164, "reV.qa_qc.qa_qc.QaQcModule.sub_dir"]], "rev.qa_qc.summary": [[165, "module-reV.qa_qc.summary"]], "exclusionsmask (class in rev.qa_qc.summary)": [[166, "reV.qa_qc.summary.ExclusionsMask"]], "data (exclusionsmask property)": [[166, "reV.qa_qc.summary.ExclusionsMask.data"]], "exclusions_plot() (exclusionsmask method)": [[166, "reV.qa_qc.summary.ExclusionsMask.exclusions_plot"]], "exclusions_plotly() (exclusionsmask method)": [[166, "reV.qa_qc.summary.ExclusionsMask.exclusions_plotly"]], "mask (exclusionsmask property)": [[166, "reV.qa_qc.summary.ExclusionsMask.mask"]], "plot() (exclusionsmask class method)": [[166, "reV.qa_qc.summary.ExclusionsMask.plot"]], "plotbase (class in rev.qa_qc.summary)": [[167, "reV.qa_qc.summary.PlotBase"]], "data (plotbase property)": [[167, "reV.qa_qc.summary.PlotBase.data"]], "summarizeh5 (class in rev.qa_qc.summary)": [[168, "reV.qa_qc.summary.SummarizeH5"]], "h5_file (summarizeh5 property)": [[168, "reV.qa_qc.summary.SummarizeH5.h5_file"]], "run() (summarizeh5 class method)": [[168, "reV.qa_qc.summary.SummarizeH5.run"]], "summarize_dset() (summarizeh5 method)": [[168, "reV.qa_qc.summary.SummarizeH5.summarize_dset"]], "summarize_means() (summarizeh5 method)": [[168, "reV.qa_qc.summary.SummarizeH5.summarize_means"]], "summarizesupplycurve (class in rev.qa_qc.summary)": [[169, "reV.qa_qc.summary.SummarizeSupplyCurve"]], "run() (summarizesupplycurve class method)": [[169, "reV.qa_qc.summary.SummarizeSupplyCurve.run"]], "sc_table (summarizesupplycurve property)": [[169, "reV.qa_qc.summary.SummarizeSupplyCurve.sc_table"]], "supply_curve_summary() (summarizesupplycurve method)": [[169, "reV.qa_qc.summary.SummarizeSupplyCurve.supply_curve_summary"]], "summaryplots (class in rev.qa_qc.summary)": [[170, "reV.qa_qc.summary.SummaryPlots"]], "columns (summaryplots property)": [[170, "reV.qa_qc.summary.SummaryPlots.columns"]], "data (summaryplots property)": [[170, "reV.qa_qc.summary.SummaryPlots.data"]], "dist_plot() (summaryplots method)": [[170, "reV.qa_qc.summary.SummaryPlots.dist_plot"]], "dist_plotly() (summaryplots method)": [[170, "reV.qa_qc.summary.SummaryPlots.dist_plotly"]], "scatter() (summaryplots class method)": [[170, "reV.qa_qc.summary.SummaryPlots.scatter"]], "scatter_all() (summaryplots class method)": [[170, "reV.qa_qc.summary.SummaryPlots.scatter_all"]], "scatter_plot() (summaryplots method)": [[170, "reV.qa_qc.summary.SummaryPlots.scatter_plot"]], "scatter_plotly() (summaryplots method)": [[170, "reV.qa_qc.summary.SummaryPlots.scatter_plotly"]], "summary (summaryplots property)": [[170, "reV.qa_qc.summary.SummaryPlots.summary"]], "supplycurveplot (class in rev.qa_qc.summary)": [[171, "reV.qa_qc.summary.SupplyCurvePlot"]], "columns (supplycurveplot property)": [[171, "reV.qa_qc.summary.SupplyCurvePlot.columns"]], "data (supplycurveplot property)": [[171, "reV.qa_qc.summary.SupplyCurvePlot.data"]], "plot() (supplycurveplot class method)": [[171, "reV.qa_qc.summary.SupplyCurvePlot.plot"]], "sc_table (supplycurveplot property)": [[171, "reV.qa_qc.summary.SupplyCurvePlot.sc_table"]], "supply_curve_plot() (supplycurveplot method)": [[171, "reV.qa_qc.summary.SupplyCurvePlot.supply_curve_plot"]], "supply_curve_plotly() (supplycurveplot method)": [[171, "reV.qa_qc.summary.SupplyCurvePlot.supply_curve_plotly"]], "rev.rep_profiles": [[172, "module-reV.rep_profiles"]], "rev.rep_profiles.cli_rep_profiles": [[173, "module-reV.rep_profiles.cli_rep_profiles"]], "rev.rep_profiles.rep_profiles": [[174, "module-reV.rep_profiles.rep_profiles"]], "regionrepprofile (class in rev.rep_profiles.rep_profiles)": [[175, "reV.rep_profiles.rep_profiles.RegionRepProfile"]], "get_region_rep_profile() (regionrepprofile class method)": [[175, "reV.rep_profiles.rep_profiles.RegionRepProfile.get_region_rep_profile"]], "i_reps (regionrepprofile property)": [[175, "reV.rep_profiles.rep_profiles.RegionRepProfile.i_reps"]], "rep_gen_gids (regionrepprofile property)": [[175, "reV.rep_profiles.rep_profiles.RegionRepProfile.rep_gen_gids"]], "rep_profiles (regionrepprofile property)": [[175, "reV.rep_profiles.rep_profiles.RegionRepProfile.rep_profiles"]], "rep_res_gids (regionrepprofile property)": [[175, "reV.rep_profiles.rep_profiles.RegionRepProfile.rep_res_gids"]], "source_profiles (regionrepprofile property)": [[175, "reV.rep_profiles.rep_profiles.RegionRepProfile.source_profiles"]], "weights (regionrepprofile property)": [[175, "reV.rep_profiles.rep_profiles.RegionRepProfile.weights"]], "repprofiles (class in rev.rep_profiles.rep_profiles)": [[176, "reV.rep_profiles.rep_profiles.RepProfiles"]], "meta (repprofiles property)": [[176, "reV.rep_profiles.rep_profiles.RepProfiles.meta"]], "profiles (repprofiles property)": [[176, "reV.rep_profiles.rep_profiles.RepProfiles.profiles"]], "run() (repprofiles method)": [[176, "reV.rep_profiles.rep_profiles.RepProfiles.run"]], "save_profiles() (repprofiles method)": [[176, "reV.rep_profiles.rep_profiles.RepProfiles.save_profiles"]], "time_index (repprofiles property)": [[176, "reV.rep_profiles.rep_profiles.RepProfiles.time_index"]], "repprofilesbase (class in rev.rep_profiles.rep_profiles)": [[177, "reV.rep_profiles.rep_profiles.RepProfilesBase"]], "meta (repprofilesbase property)": [[177, "reV.rep_profiles.rep_profiles.RepProfilesBase.meta"]], "profiles (repprofilesbase property)": [[177, "reV.rep_profiles.rep_profiles.RepProfilesBase.profiles"]], "run() (repprofilesbase method)": [[177, "reV.rep_profiles.rep_profiles.RepProfilesBase.run"]], "save_profiles() (repprofilesbase method)": [[177, "reV.rep_profiles.rep_profiles.RepProfilesBase.save_profiles"]], "time_index (repprofilesbase property)": [[177, "reV.rep_profiles.rep_profiles.RepProfilesBase.time_index"]], "representativemethods (class in rev.rep_profiles.rep_profiles)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods"]], "err_methods (representativemethods property)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.err_methods"]], "mae() (representativemethods class method)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.mae"]], "mbe() (representativemethods class method)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.mbe"]], "meanoid() (representativemethods static method)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.meanoid"]], "medianoid() (representativemethods static method)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.medianoid"]], "nargmin() (representativemethods static method)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.nargmin"]], "rep_methods (representativemethods property)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.rep_methods"]], "rmse() (representativemethods class method)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.rmse"]], "run() (representativemethods class method)": [[178, "reV.rep_profiles.rep_profiles.RepresentativeMethods.run"]], "rev.supply_curve": [[179, "module-reV.supply_curve"]], "rev.supply_curve.aggregation": [[180, "module-reV.supply_curve.aggregation"]], "abstractaggfilehandler (class in rev.supply_curve.aggregation)": [[181, "reV.supply_curve.aggregation.AbstractAggFileHandler"]], "close() (abstractaggfilehandler method)": [[181, "reV.supply_curve.aggregation.AbstractAggFileHandler.close"]], "exclusions (abstractaggfilehandler property)": [[181, "reV.supply_curve.aggregation.AbstractAggFileHandler.exclusions"]], "h5 (abstractaggfilehandler property)": [[181, "reV.supply_curve.aggregation.AbstractAggFileHandler.h5"]], "aggfilehandler (class in rev.supply_curve.aggregation)": [[182, "reV.supply_curve.aggregation.AggFileHandler"]], "default_h5_handler (aggfilehandler attribute)": [[182, "reV.supply_curve.aggregation.AggFileHandler.DEFAULT_H5_HANDLER"]], "close() (aggfilehandler method)": [[182, "reV.supply_curve.aggregation.AggFileHandler.close"]], "exclusions (aggfilehandler property)": [[182, "reV.supply_curve.aggregation.AggFileHandler.exclusions"]], "h5 (aggfilehandler property)": [[182, "reV.supply_curve.aggregation.AggFileHandler.h5"]], "aggregation (class in rev.supply_curve.aggregation)": [[183, "reV.supply_curve.aggregation.Aggregation"]], "aggregate() (aggregation method)": [[183, "reV.supply_curve.aggregation.Aggregation.aggregate"]], "gids (aggregation property)": [[183, "reV.supply_curve.aggregation.Aggregation.gids"]], "run() (aggregation class method)": [[183, "reV.supply_curve.aggregation.Aggregation.run"]], "run_parallel() (aggregation method)": [[183, "reV.supply_curve.aggregation.Aggregation.run_parallel"]], "run_serial() (aggregation class method)": [[183, "reV.supply_curve.aggregation.Aggregation.run_serial"]], "save_agg_to_h5() (aggregation static method)": [[183, "reV.supply_curve.aggregation.Aggregation.save_agg_to_h5"]], "shape (aggregation property)": [[183, "reV.supply_curve.aggregation.Aggregation.shape"]], "baseaggregation (class in rev.supply_curve.aggregation)": [[184, "reV.supply_curve.aggregation.BaseAggregation"]], "gids (baseaggregation property)": [[184, "reV.supply_curve.aggregation.BaseAggregation.gids"]], "shape (baseaggregation property)": [[184, "reV.supply_curve.aggregation.BaseAggregation.shape"]], "rev.supply_curve.cli_sc_aggregation": [[185, "module-reV.supply_curve.cli_sc_aggregation"]], "rev.supply_curve.cli_supply_curve": [[186, "module-reV.supply_curve.cli_supply_curve"]], "rev.supply_curve.competitive_wind_farms": [[187, "module-reV.supply_curve.competitive_wind_farms"]], "competitivewindfarms (class in rev.supply_curve.competitive_wind_farms)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms"]], "check_sc_gid() (competitivewindfarms method)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.check_sc_gid"]], "exclude_sc_point_gid() (competitivewindfarms method)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.exclude_sc_point_gid"]], "map_downwind() (competitivewindfarms method)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.map_downwind"]], "map_sc_gid_to_sc_point_gid() (competitivewindfarms method)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.map_sc_gid_to_sc_point_gid"]], "map_sc_point_gid_to_sc_gid() (competitivewindfarms method)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.map_sc_point_gid_to_sc_gid"]], "map_upwind() (competitivewindfarms method)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.map_upwind"]], "mask (competitivewindfarms property)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.mask"]], "remove_noncompetitive_farm() (competitivewindfarms method)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.remove_noncompetitive_farm"]], "run() (competitivewindfarms class method)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.run"]], "sc_gids (competitivewindfarms property)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.sc_gids"]], "sc_point_gids (competitivewindfarms property)": [[188, "reV.supply_curve.competitive_wind_farms.CompetitiveWindFarms.sc_point_gids"]], "rev.supply_curve.exclusions": [[189, "module-reV.supply_curve.exclusions"]], "exclusionmask (class in rev.supply_curve.exclusions)": [[190, "reV.supply_curve.exclusions.ExclusionMask"]], "add_layer() (exclusionmask method)": [[190, "reV.supply_curve.exclusions.ExclusionMask.add_layer"]], "close() (exclusionmask method)": [[190, "reV.supply_curve.exclusions.ExclusionMask.close"]], "excl_h5 (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.excl_h5"]], "excl_layers (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.excl_layers"]], "latitude (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.latitude"]], "layer_names (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.layer_names"]], "layers (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.layers"]], "longitude (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.longitude"]], "mask (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.mask"]], "nodata_lookup (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.nodata_lookup"]], "run() (exclusionmask class method)": [[190, "reV.supply_curve.exclusions.ExclusionMask.run"]], "shape (exclusionmask property)": [[190, "reV.supply_curve.exclusions.ExclusionMask.shape"]], "exclusionmaskfromdict (class in rev.supply_curve.exclusions)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict"]], "add_layer() (exclusionmaskfromdict method)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.add_layer"]], "close() (exclusionmaskfromdict method)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.close"]], "excl_h5 (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.excl_h5"]], "excl_layers (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.excl_layers"]], "extract_inclusion_mask() (exclusionmaskfromdict class method)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.extract_inclusion_mask"]], "latitude (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.latitude"]], "layer_names (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.layer_names"]], "layers (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.layers"]], "longitude (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.longitude"]], "mask (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.mask"]], "nodata_lookup (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.nodata_lookup"]], "run() (exclusionmaskfromdict class method)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.run"]], "shape (exclusionmaskfromdict property)": [[191, "reV.supply_curve.exclusions.ExclusionMaskFromDict.shape"]], "frictionmask (class in rev.supply_curve.exclusions)": [[192, "reV.supply_curve.exclusions.FrictionMask"]], "add_layer() (frictionmask method)": [[192, "reV.supply_curve.exclusions.FrictionMask.add_layer"]], "close() (frictionmask method)": [[192, "reV.supply_curve.exclusions.FrictionMask.close"]], "excl_h5 (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.excl_h5"]], "excl_layers (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.excl_layers"]], "latitude (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.latitude"]], "layer_names (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.layer_names"]], "layers (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.layers"]], "longitude (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.longitude"]], "mask (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.mask"]], "nodata_lookup (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.nodata_lookup"]], "run() (frictionmask class method)": [[192, "reV.supply_curve.exclusions.FrictionMask.run"]], "shape (frictionmask property)": [[192, "reV.supply_curve.exclusions.FrictionMask.shape"]], "layermask (class in rev.supply_curve.exclusions)": [[193, "reV.supply_curve.exclusions.LayerMask"]], "exclude_values (layermask property)": [[193, "reV.supply_curve.exclusions.LayerMask.exclude_values"]], "force_include (layermask property)": [[193, "reV.supply_curve.exclusions.LayerMask.force_include"]], "include_values (layermask property)": [[193, "reV.supply_curve.exclusions.LayerMask.include_values"]], "include_weights (layermask property)": [[193, "reV.supply_curve.exclusions.LayerMask.include_weights"]], "mask_type (layermask property)": [[193, "reV.supply_curve.exclusions.LayerMask.mask_type"]], "max_value (layermask property)": [[193, "reV.supply_curve.exclusions.LayerMask.max_value"]], "min_value (layermask property)": [[193, "reV.supply_curve.exclusions.LayerMask.min_value"]], "name (layermask property)": [[193, "reV.supply_curve.exclusions.LayerMask.name"]], "rev.supply_curve.extent": [[194, "module-reV.supply_curve.extent"]], "supplycurveextent (class in rev.supply_curve.extent)": [[195, "reV.supply_curve.extent.SupplyCurveExtent"]], "close() (supplycurveextent method)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.close"]], "col_indices (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.col_indices"]], "cols_of_excl (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.cols_of_excl"]], "excl_col_slices (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.excl_col_slices"]], "excl_cols (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.excl_cols"]], "excl_row_slices (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.excl_row_slices"]], "excl_rows (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.excl_rows"]], "excl_shape (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.excl_shape"]], "exclusions (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.exclusions"]], "get_coord() (supplycurveextent method)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.get_coord"]], "get_excl_points() (supplycurveextent method)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.get_excl_points"]], "get_excl_slices() (supplycurveextent method)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.get_excl_slices"]], "get_flat_excl_ind() (supplycurveextent method)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.get_flat_excl_ind"]], "get_sc_row_col_ind() (supplycurveextent method)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.get_sc_row_col_ind"]], "get_slice_lookup() (supplycurveextent method)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.get_slice_lookup"]], "lat_lon (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.lat_lon"]], "latitude (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.latitude"]], "longitude (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.longitude"]], "n_cols (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.n_cols"]], "n_rows (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.n_rows"]], "points (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.points"]], "resolution (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.resolution"]], "row_indices (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.row_indices"]], "rows_of_excl (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.rows_of_excl"]], "shape (supplycurveextent property)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.shape"]], "valid_sc_points() (supplycurveextent method)": [[195, "reV.supply_curve.extent.SupplyCurveExtent.valid_sc_points"]], "rev.supply_curve.points": [[196, "module-reV.supply_curve.points"]], "abstractsupplycurvepoint (class in rev.supply_curve.points)": [[197, "reV.supply_curve.points.AbstractSupplyCurvePoint"]], "cols (abstractsupplycurvepoint property)": [[197, "reV.supply_curve.points.AbstractSupplyCurvePoint.cols"]], "get_agg_slices() (abstractsupplycurvepoint static method)": [[197, "reV.supply_curve.points.AbstractSupplyCurvePoint.get_agg_slices"]], "gid (abstractsupplycurvepoint property)": [[197, "reV.supply_curve.points.AbstractSupplyCurvePoint.gid"]], "resolution (abstractsupplycurvepoint property)": [[197, "reV.supply_curve.points.AbstractSupplyCurvePoint.resolution"]], "rows (abstractsupplycurvepoint property)": [[197, "reV.supply_curve.points.AbstractSupplyCurvePoint.rows"]], "sc_point_gid (abstractsupplycurvepoint property)": [[197, "reV.supply_curve.points.AbstractSupplyCurvePoint.sc_point_gid"]], "aggregationsupplycurvepoint (class in rev.supply_curve.points)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint"]], "agg_data_layers() (aggregationsupplycurvepoint method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.agg_data_layers"]], "aggregate() (aggregationsupplycurvepoint method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.aggregate"]], "area (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.area"]], "bool_mask (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.bool_mask"]], "centroid (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.centroid"]], "close() (aggregationsupplycurvepoint method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.close"]], "cols (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.cols"]], "country (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.country"]], "county (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.county"]], "elevation (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.elevation"]], "exclusion_weighted_mean() (aggregationsupplycurvepoint method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.exclusion_weighted_mean"]], "exclusions (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.exclusions"]], "get_agg_slices() (aggregationsupplycurvepoint static method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.get_agg_slices"]], "gid (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.gid"]], "gid_counts (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.gid_counts"]], "h5 (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.h5"]], "h5_gid_set (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.h5_gid_set"]], "include_mask (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.include_mask"]], "include_mask_flat (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.include_mask_flat"]], "latitude (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.latitude"]], "longitude (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.longitude"]], "mean_wind_dirs() (aggregationsupplycurvepoint method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.mean_wind_dirs"]], "n_gids (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.n_gids"]], "offshore (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.offshore"]], "pixel_area (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.pixel_area"]], "resolution (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.resolution"]], "rows (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.rows"]], "run() (aggregationsupplycurvepoint class method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.run"]], "sc_mean() (aggregationsupplycurvepoint class method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.sc_mean"]], "sc_point_gid (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.sc_point_gid"]], "sc_sum() (aggregationsupplycurvepoint class method)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.sc_sum"]], "state (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.state"]], "summary (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.summary"]], "timezone (aggregationsupplycurvepoint property)": [[198, "reV.supply_curve.points.AggregationSupplyCurvePoint.timezone"]], "generationsupplycurvepoint (class in rev.supply_curve.points)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint"]], "agg_data_layers() (generationsupplycurvepoint method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.agg_data_layers"]], "aggregate() (generationsupplycurvepoint method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.aggregate"]], "area (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.area"]], "bool_mask (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.bool_mask"]], "capacity (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.capacity"]], "capacity_ac (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.capacity_ac"]], "centroid (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.centroid"]], "close() (generationsupplycurvepoint method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.close"]], "cols (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.cols"]], "country (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.country"]], "county (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.county"]], "economies_of_scale() (generationsupplycurvepoint static method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.economies_of_scale"]], "elevation (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.elevation"]], "exclusion_weighted_mean() (generationsupplycurvepoint method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.exclusion_weighted_mean"]], "exclusions (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.exclusions"]], "friction_data (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.friction_data"]], "gen (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.gen"]], "gen_data (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.gen_data"]], "gen_gid_set (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.gen_gid_set"]], "get_agg_slices() (generationsupplycurvepoint static method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.get_agg_slices"]], "gid (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.gid"]], "gid_counts (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.gid_counts"]], "h5 (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.h5"]], "h5_dsets_data (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.h5_dsets_data"]], "h5_gid_set (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.h5_gid_set"]], "include_mask (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.include_mask"]], "include_mask_flat (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.include_mask_flat"]], "latitude (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.latitude"]], "lcoe_data (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.lcoe_data"]], "longitude (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.longitude"]], "mean_cf (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.mean_cf"]], "mean_friction (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.mean_friction"]], "mean_h5_dsets_data (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.mean_h5_dsets_data"]], "mean_lcoe (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.mean_lcoe"]], "mean_lcoe_friction (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.mean_lcoe_friction"]], "mean_res (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.mean_res"]], "mean_wind_dirs() (generationsupplycurvepoint method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.mean_wind_dirs"]], "n_gids (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.n_gids"]], "offshore (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.offshore"]], "pixel_area (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.pixel_area"]], "point_summary() (generationsupplycurvepoint method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.point_summary"]], "power_density (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.power_density"]], "power_density_ac (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.power_density_ac"]], "res_data (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.res_data"]], "res_gid_set (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.res_gid_set"]], "resolution (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.resolution"]], "rows (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.rows"]], "run() (generationsupplycurvepoint class method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.run"]], "sc_mean() (generationsupplycurvepoint class method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.sc_mean"]], "sc_point_gid (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.sc_point_gid"]], "sc_sum() (generationsupplycurvepoint class method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.sc_sum"]], "state (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.state"]], "summarize() (generationsupplycurvepoint class method)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.summarize"]], "summary (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.summary"]], "timezone (generationsupplycurvepoint property)": [[199, "reV.supply_curve.points.GenerationSupplyCurvePoint.timezone"]], "supplycurvepoint (class in rev.supply_curve.points)": [[200, "reV.supply_curve.points.SupplyCurvePoint"]], "agg_data_layers() (supplycurvepoint method)": [[200, "reV.supply_curve.points.SupplyCurvePoint.agg_data_layers"]], "aggregate() (supplycurvepoint method)": [[200, "reV.supply_curve.points.SupplyCurvePoint.aggregate"]], "area (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.area"]], "bool_mask (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.bool_mask"]], "centroid (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.centroid"]], "close() (supplycurvepoint method)": [[200, "reV.supply_curve.points.SupplyCurvePoint.close"]], "cols (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.cols"]], "exclusion_weighted_mean() (supplycurvepoint method)": [[200, "reV.supply_curve.points.SupplyCurvePoint.exclusion_weighted_mean"]], "exclusions (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.exclusions"]], "get_agg_slices() (supplycurvepoint static method)": [[200, "reV.supply_curve.points.SupplyCurvePoint.get_agg_slices"]], "gid (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.gid"]], "h5 (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.h5"]], "include_mask (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.include_mask"]], "include_mask_flat (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.include_mask_flat"]], "latitude (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.latitude"]], "longitude (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.longitude"]], "mean_wind_dirs() (supplycurvepoint method)": [[200, "reV.supply_curve.points.SupplyCurvePoint.mean_wind_dirs"]], "n_gids (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.n_gids"]], "pixel_area (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.pixel_area"]], "resolution (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.resolution"]], "rows (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.rows"]], "sc_mean() (supplycurvepoint class method)": [[200, "reV.supply_curve.points.SupplyCurvePoint.sc_mean"]], "sc_point_gid (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.sc_point_gid"]], "sc_sum() (supplycurvepoint class method)": [[200, "reV.supply_curve.points.SupplyCurvePoint.sc_sum"]], "summary (supplycurvepoint property)": [[200, "reV.supply_curve.points.SupplyCurvePoint.summary"]], "rev.supply_curve.sc_aggregation": [[201, "module-reV.supply_curve.sc_aggregation"]], "supplycurveaggfilehandler (class in rev.supply_curve.sc_aggregation)": [[202, "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler"]], "close() (supplycurveaggfilehandler method)": [[202, "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.close"]], "data_layers (supplycurveaggfilehandler property)": [[202, "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.data_layers"]], "exclusions (supplycurveaggfilehandler property)": [[202, "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.exclusions"]], "friction_layer (supplycurveaggfilehandler property)": [[202, "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.friction_layer"]], "gen (supplycurveaggfilehandler property)": [[202, "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.gen"]], "h5 (supplycurveaggfilehandler property)": [[202, "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.h5"]], "power_density (supplycurveaggfilehandler property)": [[202, "reV.supply_curve.sc_aggregation.SupplyCurveAggFileHandler.power_density"]], "supplycurveaggregation (class in rev.supply_curve.sc_aggregation)": [[203, "reV.supply_curve.sc_aggregation.SupplyCurveAggregation"]], "gids (supplycurveaggregation property)": [[203, "reV.supply_curve.sc_aggregation.SupplyCurveAggregation.gids"]], "run() (supplycurveaggregation method)": [[203, "reV.supply_curve.sc_aggregation.SupplyCurveAggregation.run"]], "run_parallel() (supplycurveaggregation method)": [[203, "reV.supply_curve.sc_aggregation.SupplyCurveAggregation.run_parallel"]], "run_serial() (supplycurveaggregation class method)": [[203, "reV.supply_curve.sc_aggregation.SupplyCurveAggregation.run_serial"]], "shape (supplycurveaggregation property)": [[203, "reV.supply_curve.sc_aggregation.SupplyCurveAggregation.shape"]], "summarize() (supplycurveaggregation method)": [[203, "reV.supply_curve.sc_aggregation.SupplyCurveAggregation.summarize"]], "rev.supply_curve.supply_curve": [[204, "module-reV.supply_curve.supply_curve"]], "supplycurve (class in rev.supply_curve.supply_curve)": [[205, "reV.supply_curve.supply_curve.SupplyCurve"]], "add_sum_cols() (supplycurve static method)": [[205, "reV.supply_curve.supply_curve.SupplyCurve.add_sum_cols"]], "compute_total_lcoe() (supplycurve method)": [[205, "reV.supply_curve.supply_curve.SupplyCurve.compute_total_lcoe"]], "full_sort() (supplycurve method)": [[205, "reV.supply_curve.supply_curve.SupplyCurve.full_sort"]], "run() (supplycurve method)": [[205, "reV.supply_curve.supply_curve.SupplyCurve.run"]], "simple_sort() (supplycurve method)": [[205, "reV.supply_curve.supply_curve.SupplyCurve.simple_sort"]], "rev.supply_curve.tech_mapping": [[206, "module-reV.supply_curve.tech_mapping"]], "techmapping (class in rev.supply_curve.tech_mapping)": [[207, "reV.supply_curve.tech_mapping.TechMapping"]], "distance_threshold (techmapping property)": [[207, "reV.supply_curve.tech_mapping.TechMapping.distance_threshold"]], "map_resource() (techmapping method)": [[207, "reV.supply_curve.tech_mapping.TechMapping.map_resource"]], "map_resource_gids() (techmapping class method)": [[207, "reV.supply_curve.tech_mapping.TechMapping.map_resource_gids"]], "run() (techmapping class method)": [[207, "reV.supply_curve.tech_mapping.TechMapping.run"]], "save_tech_map() (techmapping static method)": [[207, "reV.supply_curve.tech_mapping.TechMapping.save_tech_map"]], "rev.utilities": [[208, "module-reV.utilities"]], "modulename (class in rev.utilities)": [[209, "reV.utilities.ModuleName"]], "all_names() (modulename class method)": [[209, "reV.utilities.ModuleName.all_names"]], "rev.utilities.cli_functions": [[210, "module-reV.utilities.cli_functions"]], "format_analysis_years() (in module rev.utilities.cli_functions)": [[211, "reV.utilities.cli_functions.format_analysis_years"]], "init_cli_logging() (in module rev.utilities.cli_functions)": [[212, "reV.utilities.cli_functions.init_cli_logging"]], "parse_from_pipeline() (in module rev.utilities.cli_functions)": [[213, "reV.utilities.cli_functions.parse_from_pipeline"]], "rev.utilities.curtailment": [[214, "module-reV.utilities.curtailment"]], "curtail() (in module rev.utilities.curtailment)": [[215, "reV.utilities.curtailment.curtail"]], "rev.utilities.exceptions": [[216, "module-reV.utilities.exceptions"]], "collectionruntimeerror": [[217, "reV.utilities.exceptions.CollectionRuntimeError"]], "collectionvalueerror": [[218, "reV.utilities.exceptions.CollectionValueError"]], "collectionwarning": [[219, "reV.utilities.exceptions.CollectionWarning"]], "configerror": [[220, "reV.utilities.exceptions.ConfigError"]], "configwarning": [[221, "reV.utilities.exceptions.ConfigWarning"]], "datashapeerror": [[222, "reV.utilities.exceptions.DataShapeError"]], "emptysupplycurvepointerror": [[223, "reV.utilities.exceptions.EmptySupplyCurvePointError"]], "exclusionlayererror": [[224, "reV.utilities.exceptions.ExclusionLayerError"]], "executionerror": [[225, "reV.utilities.exceptions.ExecutionError"]], "extrapolationwarning": [[226, "reV.utilities.exceptions.ExtrapolationWarning"]], "fileinputerror": [[227, "reV.utilities.exceptions.FileInputError"]], "fileinputwarning": [[228, "reV.utilities.exceptions.FileInputWarning"]], "handlerkeyerror": [[229, "reV.utilities.exceptions.HandlerKeyError"]], "handlerruntimeerror": [[230, "reV.utilities.exceptions.HandlerRuntimeError"]], "handlervalueerror": [[231, "reV.utilities.exceptions.HandlerValueError"]], "handlerwarning": [[232, "reV.utilities.exceptions.HandlerWarning"]], "inputerror": [[233, "reV.utilities.exceptions.InputError"]], "inputwarning": [[234, "reV.utilities.exceptions.InputWarning"]], "jsonerror": [[235, "reV.utilities.exceptions.JSONError"]], "multifileexclusionerror": [[236, "reV.utilities.exceptions.MultiFileExclusionError"]], "nearestneighborerror": [[237, "reV.utilities.exceptions.NearestNeighborError"]], "offshorewindinputerror": [[238, "reV.utilities.exceptions.OffshoreWindInputError"]], "offshorewindinputwarning": [[239, "reV.utilities.exceptions.OffshoreWindInputWarning"]], "outputwarning": [[240, "reV.utilities.exceptions.OutputWarning"]], "parallelexecutionwarning": [[241, "reV.utilities.exceptions.ParallelExecutionWarning"]], "pipelineerror": [[242, "reV.utilities.exceptions.PipelineError"]], "projectpointsvalueerror": [[243, "reV.utilities.exceptions.ProjectPointsValueError"]], "pysamversionerror": [[244, "reV.utilities.exceptions.PySAMVersionError"]], "pysamversionwarning": [[245, "reV.utilities.exceptions.PySAMVersionWarning"]], "resourceerror": [[246, "reV.utilities.exceptions.ResourceError"]], "samexecutionerror": [[247, "reV.utilities.exceptions.SAMExecutionError"]], "samexecutionwarning": [[248, "reV.utilities.exceptions.SAMExecutionWarning"]], "saminputerror": [[249, "reV.utilities.exceptions.SAMInputError"]], "saminputwarning": [[250, "reV.utilities.exceptions.SAMInputWarning"]], "slurmwarning": [[251, "reV.utilities.exceptions.SlurmWarning"]], "supplycurveerror": [[252, "reV.utilities.exceptions.SupplyCurveError"]], "supplycurveinputerror": [[253, "reV.utilities.exceptions.SupplyCurveInputError"]], "whilelooppackingerror": [[254, "reV.utilities.exceptions.WhileLoopPackingError"]], "revdeprecationwarning": [[255, "reV.utilities.exceptions.reVDeprecationWarning"]], "reverror": [[256, "reV.utilities.exceptions.reVError"]], "revlossesvalueerror": [[257, "reV.utilities.exceptions.reVLossesValueError"]], "revlosseswarning": [[258, "reV.utilities.exceptions.reVLossesWarning"]], "log_versions() (in module rev.utilities)": [[259, "reV.utilities.log_versions"]], "rev.utilities.pytest_utils": [[260, "module-reV.utilities.pytest_utils"]], "make_fake_h5_chunks() (in module rev.utilities.pytest_utils)": [[261, "reV.utilities.pytest_utils.make_fake_h5_chunks"]], "pd_date_range() (in module rev.utilities.pytest_utils)": [[262, "reV.utilities.pytest_utils.pd_date_range"]], "write_chunk() (in module rev.utilities.pytest_utils)": [[263, "reV.utilities.pytest_utils.write_chunk"]], "rev.utilities.slots": [[264, "module-reV.utilities.slots"]], "slotteddict (class in rev.utilities.slots)": [[265, "reV.utilities.slots.SlottedDict"]], "items() (slotteddict method)": [[265, "reV.utilities.slots.SlottedDict.items"]], "keys() (slotteddict method)": [[265, "reV.utilities.slots.SlottedDict.keys"]], "update() (slotteddict method)": [[265, "reV.utilities.slots.SlottedDict.update"]], "values() (slotteddict method)": [[265, "reV.utilities.slots.SlottedDict.values"]], "rev.version": [[266, "module-reV.version"]], "--verbose": [[268, "cmdoption-reV-v"], [278, "cmdoption-reV-project-points-v"]], "--version": [[268, "cmdoption-reV-version"], [278, "cmdoption-reV-project-points-version"]], "-v": [[268, "cmdoption-reV-v"], [278, "cmdoption-reV-project-points-v"]], "rev command line option": [[268, "cmdoption-reV-v"], [268, "cmdoption-reV-version"]], "--cancel": [[269, "cmdoption-reV-batch-cancel"], [277, "cmdoption-reV-pipeline-cancel"]], "--config_file": [[269, "cmdoption-reV-batch-c"], [270, "cmdoption-reV-bespoke-c"], [271, "cmdoption-reV-collect-c"], [272, "cmdoption-reV-econ-c"], [273, "cmdoption-reV-generation-c"], [274, "cmdoption-reV-hybrids-c"], [275, "cmdoption-reV-multiyear-c"], [276, "cmdoption-reV-nrwal-c"], [277, "cmdoption-reV-pipeline-c"], [279, "cmdoption-reV-qa-qc-c"], [280, "cmdoption-reV-rep-profiles-c"], [282, "cmdoption-reV-script-c"], [284, "cmdoption-reV-supply-curve-c"], [285, "cmdoption-reV-supply-curve-aggregation-c"]], "--delete": [[269, "cmdoption-reV-batch-delete"]], "--dry": [[269, "cmdoption-reV-batch-dry"]], "--monitor-background": [[269, "cmdoption-reV-batch-monitor-background"]], "-c": [[269, "cmdoption-reV-batch-c"], [270, "cmdoption-reV-bespoke-c"], [271, "cmdoption-reV-collect-c"], [272, "cmdoption-reV-econ-c"], [273, "cmdoption-reV-generation-c"], [274, "cmdoption-reV-hybrids-c"], [275, "cmdoption-reV-multiyear-c"], [276, "cmdoption-reV-nrwal-c"], [277, "cmdoption-reV-pipeline-c"], [279, "cmdoption-reV-qa-qc-c"], [280, "cmdoption-reV-rep-profiles-c"], [282, "cmdoption-reV-script-c"], [284, "cmdoption-reV-supply-curve-c"], [285, "cmdoption-reV-supply-curve-aggregation-c"]], "rev-batch command line option": [[269, "cmdoption-reV-batch-c"], [269, "cmdoption-reV-batch-cancel"], [269, "cmdoption-reV-batch-delete"], [269, "cmdoption-reV-batch-dry"], [269, "cmdoption-reV-batch-monitor-background"]], "rev-bespoke command line option": [[270, "cmdoption-reV-bespoke-c"]], "rev-collect command line option": [[271, "cmdoption-reV-collect-c"]], "rev-econ command line option": [[272, "cmdoption-reV-econ-c"]], "rev-generation command line option": [[273, "cmdoption-reV-generation-c"]], "rev-hybrids command line option": [[274, "cmdoption-reV-hybrids-c"]], "rev-multiyear command line option": [[275, "cmdoption-reV-multiyear-c"]], "rev-nrwal command line option": [[276, "cmdoption-reV-nrwal-c"]], "--background": [[277, "cmdoption-reV-pipeline-background"]], "--monitor": [[277, "cmdoption-reV-pipeline-monitor"]], "--recursive": [[277, "cmdoption-reV-pipeline-r"], [283, "cmdoption-reV-status-r"]], "-r": [[277, "cmdoption-reV-pipeline-r"], [278, "cmdoption-reV-project-points-from-regions-r"], [283, "cmdoption-reV-status-r"]], "rev-pipeline command line option": [[277, "cmdoption-reV-pipeline-background"], [277, "cmdoption-reV-pipeline-c"], [277, "cmdoption-reV-pipeline-cancel"], [277, "cmdoption-reV-pipeline-monitor"], [277, "cmdoption-reV-pipeline-r"]], "--fpath": [[278, "cmdoption-reV-project-points-f"]], "--lat_lon_coords": [[278, "cmdoption-reV-project-points-from-lat-lons-lat_lon_coords"]], "--lat_lon_fpath": [[278, "cmdoption-reV-project-points-from-lat-lons-llf"]], "--llc": [[278, "cmdoption-reV-project-points-from-lat-lons-lat_lon_coords"]], "--region": [[278, "cmdoption-reV-project-points-from-regions-r"]], "--region_col": [[278, "cmdoption-reV-project-points-from-regions-col"]], "--regions": [[278, "cmdoption-reV-project-points-from-regions-regs"]], "--res_file": [[278, "cmdoption-reV-project-points-rf"]], "--sam_file": [[278, "cmdoption-reV-project-points-sf"]], "-col": [[278, "cmdoption-reV-project-points-from-regions-col"]], "-f": [[278, "cmdoption-reV-project-points-f"], [281, "cmdoption-reV-reset-status-f"]], "-llf": [[278, "cmdoption-reV-project-points-from-lat-lons-llf"]], "-regs": [[278, "cmdoption-reV-project-points-from-regions-regs"]], "-rf": [[278, "cmdoption-reV-project-points-rf"]], "-sf": [[278, "cmdoption-reV-project-points-sf"]], "rev-project-points command line option": [[278, "cmdoption-reV-project-points-f"], [278, "cmdoption-reV-project-points-rf"], [278, "cmdoption-reV-project-points-sf"], [278, "cmdoption-reV-project-points-v"], [278, "cmdoption-reV-project-points-version"]], "rev-project-points-from-lat-lons command line option": [[278, "cmdoption-reV-project-points-from-lat-lons-lat_lon_coords"], [278, "cmdoption-reV-project-points-from-lat-lons-llf"]], "rev-project-points-from-regions command line option": [[278, "cmdoption-reV-project-points-from-regions-col"], [278, "cmdoption-reV-project-points-from-regions-r"], [278, "cmdoption-reV-project-points-from-regions-regs"]], "rev-qa-qc command line option": [[279, "cmdoption-reV-qa-qc-c"]], "rev-rep-profiles command line option": [[280, "cmdoption-reV-rep-profiles-c"]], "--after-step": [[281, "cmdoption-reV-reset-status-a"]], "--force": [[281, "cmdoption-reV-reset-status-f"]], "-a": [[281, "cmdoption-reV-reset-status-a"]], "directory": [[281, "cmdoption-reV-reset-status-arg-DIRECTORY"]], "rev-reset-status command line option": [[281, "cmdoption-reV-reset-status-a"], [281, "cmdoption-reV-reset-status-arg-DIRECTORY"], [281, "cmdoption-reV-reset-status-f"]], "rev-script command line option": [[282, "cmdoption-reV-script-c"]], "--include": [[283, "cmdoption-reV-status-i"]], "--pipe_steps": [[283, "cmdoption-reV-status-ps"]], "--status": [[283, "cmdoption-reV-status-s"]], "-i": [[283, "cmdoption-reV-status-i"]], "-ps": [[283, "cmdoption-reV-status-ps"]], "-s": [[283, "cmdoption-reV-status-s"]], "folder": [[283, "cmdoption-reV-status-arg-FOLDER"]], "rev-status command line option": [[283, "cmdoption-reV-status-arg-FOLDER"], [283, "cmdoption-reV-status-i"], [283, "cmdoption-reV-status-ps"], [283, "cmdoption-reV-status-r"], [283, "cmdoption-reV-status-s"]], "rev-supply-curve command line option": [[284, "cmdoption-reV-supply-curve-c"]], "rev-supply-curve-aggregation command line option": [[285, "cmdoption-reV-supply-curve-aggregation-c"]], "--type": [[286, "cmdoption-reV-template-configs-t"]], "-t": [[286, "cmdoption-reV-template-configs-t"]], "commands": [[286, "cmdoption-reV-template-configs-arg-COMMANDS"]], "rev-template-configs command line option": [[286, "cmdoption-reV-template-configs-arg-COMMANDS"], [286, "cmdoption-reV-template-configs-t"]]}}) \ No newline at end of file