From e80079347c839b8e02501a6bea251c0d302311d0 Mon Sep 17 00:00:00 2001 From: Johannes Kochems Date: Sat, 12 Mar 2022 17:15:07 +0100 Subject: [PATCH] Initial commit --- .gitignore | 24 + .readthedocs.yaml | 11 + CITATION.cff | 48 + LICENSE | 22 + README.md | 139 ++ cli.py | 65 + config.yml | 34 + environment.yml | 14 + pommesinvest/__init__.py | 18 + pommesinvest/__main__.py | 3 + pommesinvest/model/__init__.py | 0 pommesinvest/model/investment_model.py | 615 ++++++ pommesinvest/model_funcs/__init__.py | 0 pommesinvest/model_funcs/data_input.py | 1161 ++++++++++ pommesinvest/model_funcs/helpers.py | 482 ++++ pommesinvest/model_funcs/model_control.py | 1711 ++++++++++++++ pommesinvest/model_funcs/subroutines.py | 2449 +++++++++++++++++++++ pyproject.toml | 38 + setup.py | 83 + tests/test_cli.py | 0 tests/test_data_input.py | 0 tests/test_helpers.py | 0 tests/test_invesment_model.py | 0 tests/test_model_control.py | 0 tests/test_subroutines.py | 0 25 files changed, 6917 insertions(+) create mode 100644 .gitignore create mode 100644 .readthedocs.yaml create mode 100644 CITATION.cff create mode 100644 LICENSE create mode 100644 README.md create mode 100644 cli.py create mode 100644 config.yml create mode 100644 environment.yml create mode 100644 pommesinvest/__init__.py create mode 100644 pommesinvest/__main__.py create mode 100644 pommesinvest/model/__init__.py create mode 100644 pommesinvest/model/investment_model.py create mode 100644 pommesinvest/model_funcs/__init__.py create mode 100644 pommesinvest/model_funcs/data_input.py create mode 100644 pommesinvest/model_funcs/helpers.py create mode 100644 pommesinvest/model_funcs/model_control.py create mode 100644 pommesinvest/model_funcs/subroutines.py create mode 100644 pyproject.toml create mode 100644 setup.py create mode 100644 tests/test_cli.py create mode 100644 tests/test_data_input.py create mode 100644 tests/test_helpers.py create mode 100644 tests/test_invesment_model.py create mode 100644 tests/test_model_control.py create mode 100644 tests/test_subroutines.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d75fa7a --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +# Ignore (files stored at) a given path +/results +/inputs +/build +/dist +*.egg-info + +# Ignore files with a special ending (or content) +*__pycache__* +*.ipynb_checkpoints/* +*.idea +*start_jupyter* +*.aux +*.bcf +*.lof +*.out +*.run.xml +*.toc +*~ +*.gurobi.* +*.pyomo.* +*.tmp +*.log +*.lp \ No newline at end of file diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 0000000..f01f5df --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,11 @@ +version: 2 + +# Build from the docs/ directory with Sphinx +sphinx: + configuration: docs/conf.py + +# Explicitly set the version of Python and its requirements +python: + version: 3.7 + install: + - requirements: docs/requirements.txt \ No newline at end of file diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000..4890650 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,48 @@ +abstract: "pommesdispatch. A bottom-up fundamental investment model for the German electricity sector. https://github.com/pommes-public/pommesdispatch, accessed YYYY-MM-DD." +authors: + - family-names: Kochems + given-names: Johannes + orcid: "https://orcid.org/0000-0002-3461-3679" + - family-names: Giehl + given-names: Johannes + orcid: "https://orcid.org/0000-0002-1769-1907" + - family-names: Werner + given-names: Yannick + orcid: "https://orcid.org/0000-0002-6674-805X" + - family-names: Grosse + given-names: Benjamin + orcid: "https://orcid.org/0000-0002-3323-9734" + - family-names: Faist + given-names: Julien + - family-names: Kachel + given-names: Hannes + - family-names: Westphal + given-names: Sophie + - family-names: "Mikulicz-Radecki" + given-names: Flora + - family-names: Spiller + given-names: Carla + - family-names: "Büllesbach" + given-names: Fabian + - family-names: Ghosh + given-names: Timona + - family-names: Verwiebe + given-names: Paul + orcid: "https://orcid.org/0000-0002-6877-2846" + - family-names: "Encinas-Rosa" + given-names: Leticia + - family-names: "Müller-Kirchenbauer" + given-names: Joachim +cff-version: "1.1.0" +date-released: 2022-XX-XX +doi: https://doi.org/ +keywords: + - "multi-period model" + - "electricity sector" + - "investment" + - "capacity mix" + - "oemof.solph" +license: "MIT license" +message: "If you use this software, please cite as follows." +title: pommesinvest +version: "0.1.0" \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..96f051a --- /dev/null +++ b/LICENSE @@ -0,0 +1,22 @@ +# License +This software is licensed under MIT License. + +Copyright 2021 pommes developer group + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..75ade88 --- /dev/null +++ b/README.md @@ -0,0 +1,139 @@ +![PyPI](https://img.shields.io/pypi/v/pommesdispatch) +![PyPI - Python Version](https://img.shields.io/pypi/pyversions/pommesdispatch) +![Documentation Status](https://readthedocs.org/projects/pommesdispatch/badge/?version=latest) +![PyPI - License](https://img.shields.io/pypi/l/pommesdispatch) + +# pommesdispatch + +**A bottom-up fundamental investment model for the German electricity sector** + +This is the **investment variant** of the fundamental power market model *POMMES* (**PO**wer **M**arket **M**odel of **E**nergy and re**S**ources). +Please navigate to the section of interest to find out more. + +## Contents +* [Introduction](#introduction) +* [Documentation](#documentation) +* [Installation](#installation) + * [Setting up pommesdispatch](#setting-up-pommesdispatch) + * [Installing a solver](#installing-a-solver) +* [Contributing](#contributing) +* [Citing](#citing) +* [License](#license) + +## Introduction +*POMMES* itself is a cosmos consisting of a **dispatch model** (stored in this repository and described here), a **data preparation routine** and an **investment model** for the German wholesale power market. The model was originally developed by a group of researchers and students at the [chair of Energy and Resources Management of TU Berlin](https://www.er.tu-berlin.de/menue/home/) and is now maintained by a group of alumni and open for other contributions. + +If you are interested in the data preparation routines used or investment modeling, please find more information here: +- [pommesdata](https://github.com/pommes-public/pommesdata): A full-featured transparent data preparation routine from raw data to POMMES model inputs +- pommesinvest: A multi-period integrated investment and dispatch model for the German power sector (upcoming). + +### Purpose and model characterization +The **dispatch variant** of the power market model *POMMES* `pommesdispatch` enables the user to simulate the **dispatch of backup power plants, storages as well as demand response units for the Federal Republic of Germany** for an arbitrary year or timeframe between 2017 and 2030. The dispatch of renewable power plants is exogeneously determined by normalized infeed time series and capacity values. The models' overall goal is to minimize power system costs occuring from wholesale markets whereby no network constraints are considered except for the existing bidding zone configuration used for modeling electricity exchange. Thus, the model purpose is to simulate **dispatch decisions** and the resulting **day-ahed market prices**. A brief categorization of the model is given in the following table. An extensive categorization can be found in the [model documentation](). + +| **criterion** | **manifestation** | +| ---- | ---- | +| Purpose | - simulation of power plant dispatch and day-ahead prices for DE (scenario analysis) | +| Spatial coverage | - Germany (DE-LU) + electrical neighbours (NTC approach) | +| Time horizon | - usually 1 year in hourly resolution | +| Technologies | - conventional power plants, storages, demand response (optimized)
- renewable generators (fixed)
- demand: exogenous time series | +| Data sources | - input data not shipped out, but can be obtained from [pommesdata](https://github.com/pommes-public/pommesdata); OPSD, BNetzA, ENTSO-E, others | +| Implementation | - graph representation & linear optimization: [oemof.solph](https://github.com/oemof/oemof-solph) / [pyomo](https://github.com/Pyomo/pyomo)
- data management: python / .csv | + +### Mathematical and technical implementation +The models' underlying mathematical method is a **linear programming** approach, seeking to minimize overall +power system costs under constraints such as satisfying power demand at all times and not violating power generation +capacity or storage limits. Thus, binary variables such as units' status, startups and shutdowns are not accounted for. + +The model builds on the framework **[oemof.solph](https://github.com/oemof/oemof-solph)** which allows modeling +energy systems in a graph-based representation with the underlying mathematical constraints and objective function +terms implemented in **[pyomo](https://pyomo.readthedocs.io/en/stable/)**. Some of the required oemof.solph featuresm - such as demand response modeling - have been provided by the *POMMES* main developers which are also active in the +oemof community. Users not familiar with oemof.solph may find further information in the +[oemof.solph documentation](https://oemof-solph.readthedocs.io/en/latest/readme.html). + +## Documentation +An extensive **[documentation of pommesdispatch](https://pommesdispatch.readthedocs.io/)** can be found on readthedocs. It contains a user's guide, a model categorization, some energy economic and technical background information, a complete model formulation as well as documentation of the model functions and classes. + +## Installation +To set up `pommesdispatch`, set up a virtual environment (e.g. using conda) or add the required packages to your python installation. Additionally, you have to install a solver in order to solve the mathematical optimization problem. + +### Setting up pommesdispatch +`pommesdispatch` is hosted on [PyPI](https://pypi.org/project/pommesdispatch/). +To install it, please use the following command +``` +pip install pommesdispatch +``` + +If you want to contribute as a developer, you fist have to +[fork](https://docs.github.com/en/get-started/quickstart/fork-a-repo>) +it and then clone the repository, in order to copy the files locally by typing +``` +git clone https://github.com/your-github-username/pommesdispatch.git +``` +After cloning the repository, you have to install the required dependencies. +Make sure you have conda installed as a package manager. +If not, you can download it [here](https://www.anaconda.com/). +Open a command shell and navigate to the folder +where you copied the environment to. + +Use the following command to install dependencies +``` +conda env create -f environment.yml +``` +Activate your environment by typing +``` +conda activate pommes_dispatch +``` + +### Installing a solver +In order to solve a `pommesdispatch` model instance, you need a solver installed. Please see [oemof.solph's information on solvers](https://github.com/oemof/oemof-solph#installing-a-solver). As a default, gurobi is used for `pommesdispatch` models. It is a commercial solver, but provides academic licenses, though, if this applies to you. Elsewhise, we recommend to use CBC as the solver oemof recommends. To test your solver and oemof.solph installation, again see information from [oemof.solph](https://github.com/oemof/oemof-solph#installation-test). + +## Contributing +Every kind of contribution or feedback is warmly welcome.
+We use the [GitHub issue management](https://github.com/pommes-public/pommesdispatch/issues) as well as +[pull requests](https://github.com/pommes-public/pommesdispatch/pulls) for collaboration. We try to stick to the PEP8 coding standards. + +The following people have contributed in the following manner to `pommesdispatch`: + +| Name | Contribution | Status | +| ---- | ---- | ---- | +| Johannes Kochems | major development & conceptualization
conceptualization, core functionality (esp. dispatch, power prices, demand response, rolling horizon modeling), architecture, publishing process | coordinator & maintainer,
developer & corresponding author | +| Yannick Werner | major development & conceptualization
conceptualization, core functionality (esp. exchange, RES, CHP modeling), interface to pommesdata | developer & corresponding author | +| Johannes Giehl | development
early-stage core functionality | developer | +| Benjamin Grosse | development
support for conceptualization, early-stage contributions at the interface to pommesdata | developer | +| Sophie Westphal | development
early-stage contributions at the interface to pommesdata | former developer (student assistant) | +| Flora von Mikulicz-Radecki | testing
early-stage comprehensive testing | former tester (student assistant) | +| Carla Spiller | development
early-stage rolling horizon and cross-border exchange integration | former developer (student assistant) | +| Fabian Büllesbach | development
early-stage rolling horizon implementation | former developer (master's student) | +| Timona Ghosh | development
early-stage cross-border exchange implementation | former developer (master's student) | +| Paul Verwiebe | support
support of early-stage core functionality development | former supporter (research associate) | +| Leticia Encinas Rosa | support
support of early-stage core functionality development | former supporter (research associate) | +| Joachim Müller-Kirchenbauer | support & conceptualization
early-stage conceptualization, funding | supporter (university professor) | + +*Note: Not every single contribution is reflected in the current version of +`pommesdispatch`. This is especially true for those marked as early-stage +contributions that may have been extended, altered or sometimes discarded. +Nonetheless, all people listed have made valuable contributions. The ones +discarded might be re-integrated at some point in time. +Dedicated contributions to `pommesdata` and `pommesinvest` are not included +in the list, but listed individually for these projects.* + +## Citing +A publication using and introducing `pommesdispatch` is currently in preparation. + +If you are using `pommesdispatch` for your own analyses, we recommend citing as:
+*Kochems, J.; Werner, Y.; Giehl, J.; Grosse, B. et al. (2021): pommesdispatch. A bottom-up fundamental power market model for the German electricity sector. https://github.com/pommes-public/pommesdispatch, accessed YYYY-MM-DD.* + +We furthermore recommend naming the version tag or the commit hash used for the sake of transparency and reproducibility. + +Also see the *CITATION.cff* file for citation information. + +## License +This software is licensed under MIT License. + +Copyright 2021 pommes developer group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/cli.py b/cli.py new file mode 100644 index 0000000..cdcd6a4 --- /dev/null +++ b/cli.py @@ -0,0 +1,65 @@ +from pommesinvest.model.investment_model import run_investment_model, add_args + + +def create_default_config(): + content = """# Determine the model configuration + +# 1) Set overall workflow control parameters +control_parameters: + rolling_horizon: False + aggregate_input: False + countries: ['AT', 'BE', 'CH', 'CZ', 'DE', 'DK1', 'DK2', 'FR', 'NL', + 'NO1', 'NO2', 'NO3', 'NO4', 'NO5', 'PL', + 'SE1', 'SE2', 'SE3', 'SE4'] + solver: "gurobi" + fuel_cost_pathway: "middle" + activate_emissions_limit: False + emissions_pathway: "100_percent_linear" + activate_demand_response: False + demand_response_approach: "DLR" + demand_response_scenario: "50" + save_production_results: True + save_price_results: True + write_lp_file: False + +# 2) Set model optimization time and frequency +time_parameters: + start_time: "2017-01-01 00:00:00" + end_time: "2017-01-02 23:00:00" + freq: "60min" + +# 3) Set input and output data paths +input_output_parameters: + path_folder_input: "./inputs/" + path_folder_output: "./results/" + +# 4) Set rolling horizon parameters (optional) +rolling_horizon_parameters: + time_slice_length_wo_overlap_in_hours: 24 + overlap_in_hours: 12""" + with open("./config.yml", "w") as opf: + opf.write(content) + + +def run_pommes_invest(): + args = add_args() + if args.init: + create_default_config() + return + # Use multiple iterations to update market values + if args.iterations > 1: + print( + f"Running pommesdispatch ITERATIVELY in " + f"{args.iterations} iterations" + ) + for iteration in range(args.iterations): + print(f"STARTING ITERATION {iteration}") + run_investment_model(args.file) + print(f"ITERATION {iteration} completed") + print("-" * 60) + else: + run_investment_model() + + +if __name__ == "__main__": + run_pommes_invest() diff --git a/config.yml b/config.yml new file mode 100644 index 0000000..2d4a2bd --- /dev/null +++ b/config.yml @@ -0,0 +1,34 @@ +# Determine the model configuration + +# 1) Set overall workflow control parameters +control_parameters: + rolling_horizon: False + aggregate_input: True + interest_rate: 0.02 + solver: "gurobi" + fuel_cost_pathway: "NZE" + emissions_cost_pathway: "long-term" + activate_emissions_limit: False + emissions_pathway: "100_percent_linear" + activate_demand_response: False + demand_response_approach: "DLR" + demand_response_scenario: "50" + save_production_results: True + save_investment_results: True + write_lp_file: False + +# 2) Set model optimization time and frequency +time_parameters: + start_time: "2017-01-01 00:00:00" + end_time: "2017-01-02 23:00:00" + freq: "4H" + +# 3) Set input and output data paths +input_output_parameters: + path_folder_input: "./inputs/" + path_folder_output: "./results/" + +# 4) Set rolling horizon parameters (optional) +rolling_horizon_parameters: + time_slice_length_wo_overlap_in_hours: 24 + overlap_in_hours: 12 \ No newline at end of file diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000..d1bd6b4 --- /dev/null +++ b/environment.yml @@ -0,0 +1,14 @@ +name: pommesinvest +channels: + - anaconda + - conda-forge + - defaults +dependencies: + - python=3.8 + - pip + - numpy + - pandas + - pip: + # TODO: Replace with release when available + - git+https://github.com/oemof/oemof-solph.git@features/multi-period + diff --git a/pommesinvest/__init__.py b/pommesinvest/__init__.py new file mode 100644 index 0000000..32bc443 --- /dev/null +++ b/pommesinvest/__init__.py @@ -0,0 +1,18 @@ +""" +pommesinvest +------------ + +A bottom-up fundamental investment model for the German electricity sector +""" + +__version__ = "0.1.0" +__author__ = ( + "Johannes Kochems, Yannick Werner, " + "Johannes Giehl, Benjamin Grosse" +) +__credits__ = ( + "Julien Faist, Hannes Kachel, Sophie Westphal, " + "Flora von Mikulicz-Radecki, Carla Spiller, " + "Fabian Büllesbach, Timona Ghosh, Paul Verwiebe, " + "Leticia Encinas Rosa, Joachim Müller-Kirchenbauer" +) \ No newline at end of file diff --git a/pommesinvest/__main__.py b/pommesinvest/__main__.py new file mode 100644 index 0000000..883983d --- /dev/null +++ b/pommesinvest/__main__.py @@ -0,0 +1,3 @@ +from pommesinvest.model.investment_model import run_investment_model + +run_investment_model() diff --git a/pommesinvest/model/__init__.py b/pommesinvest/model/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pommesinvest/model/investment_model.py b/pommesinvest/model/investment_model.py new file mode 100644 index 0000000..39c6e29 --- /dev/null +++ b/pommesinvest/model/investment_model.py @@ -0,0 +1,615 @@ +# coding: utf-8 +""" +General description +------------------- +This is the investment variant of POMMES, the POwer Market Model +of Energy and reSources Department at TU Berlin. +The fundamental power market model has been originally developed +at TU Berlin and is now maintained by a developer group of alumni. +The source code is freely available under MIT license. +Usage of the model is highly encouraged. Contributing is welcome as well. + +Repository, Documentation, Installation +--------------------------------------- +All founds are hosted on +`GitHub `_ + +To install, simply type ``pip install pommesinvest`` + +Please find the documentation `here `_ + +Licensing information and Disclaimer +------------------------------------ +This software is provided under MIT License (see licensing file). + +A special thank you goes out to all the developers creating, +maintaining, and expanding packages used in this model, +especially to the oemof and pyomo developer groups! + +In addition to that, a special thank you goes to all students +and student assistants which have contributed to the model itself +or its data inputs. + +Input Data +---------- +Input data can be compiled using the ``pommesdata`` package. +A precompiled version is distributed with the investment model. + +Installation requirements +------------------------- +See `environments.yml` file + +@author: Johannes Kochems (*), Johannes Giehl (*), Yannick Werner, +Benjamin Grosse + +Contributors: +Julien Faist, Hannes Kachel, Sophie Westphal, Flora von Mikulicz-Radecki, +Carla Spiller, Fabian Büllesbach, Timona Ghosh, Paul Verwiebe, +Leticia Encinas Rosa, Joachim Müller-Kirchenbauer + +(*) Corresponding authors +""" + + +import time, calendar +import logging +import pandas as pd +from matplotlib import pyplot as plt + +import oemof.solph as solph +from oemof.solph import processing +from oemof.solph import views +from oemof.tools import logger + +# Import all functions necessary for a model run. +# A complete enumeration of functions is used here, so it can be seen which functions are imported +from functions_for_model_control_invest import determine_timeslices_RH, \ + add_further_constrs, \ + build_simple_model, build_RH_model, initial_states_RH, solve_RH_model, \ + dump_es, reconstruct_objective_value + +from helper_functions_invest import years_between + +from functions_for_processing_of_outputs_invest import \ + extract_model_results, create_aggregated_energy_source_results, create_aggregated_investment_results, \ + create_aggregated_investment_results_RH, create_results_to_save, \ + draw_production_plot, draw_investment_decisions_plot, draw_investment_decisions_plot_RH, \ + draw_exo_investment_decisions_plot, draw_decommissioning_plot + +# %% + +############################################################################### +### MODEL SETTINGS ############################################################ +############################################################################### + +# %% + +### 1) Determine model configuration through control variables + +# Set file version +file_version = '_2050' +lignite_phase_out = 2038 +hardcoal_phase_out = 2038 +MaxInvest = True + +# Determine major model configuration +RollingHorizon = True +AggregateInput = True +Europe = False +solver = 'gurobi' + +# Determine fuel and investment cost pathways (options: lower, middle, upper) +# as well emission limits (options: BAU, 80_percent_linear, 95_percent_linear, 100_percent_linear, customized) +fuel_cost_pathway = 'middle' +investment_cost_pathway = 'middle' + +# Interest rate and discounting +# Important Note: By default, exclusively real cost values are applied, so no discounting is needed. +# Hence, the interest rate is only effective if discounting is explicitly activated. +# Please make sure to be using nominal cost data if you do so (but best thing is, you don't use it at all). +IR = 0.04 +discount = False + +# Control Demand response modeling +# options for approach: ['DIW', 'DLR', 'IER', 'TUD'] +# options for scenario: ['25', '50', '75'] +ActivateDemandResponse = False +approach = 'DLR' +scenario = '50' + +# Control emissions limit (options: BAU, 80_percent_linear, +# 95_percent_linear, 100_percent_linear) +ActivateEmissionsLimit = False +emission_pathway = '100_percent_linear' + +ActivateInvestmentBudgetLimit = False + +# 11.05.2019, JK: Determine processing of outputs + +# Decide which grouping to apply on investment decision results; +# possibilities: "sources", "technologies", None +grouping = "sources" + +Dumps = False +PlotProductionResults = True +PlotInvestmentDecisionsResults = True + +SaveProductionPlot = False +SaveInvestmentDecisionsPlot = False + +SaveProductionResults = True +SaveInvestmentDecisionsResults = True + +# %% + +### 2) Set model optimization time and frequency for simple model runs + +# Define starting and end year of (overall) optimization run and define which +# frequency shall be used. (End year is included.) +startyear = 2016 +endyear = 2036 +freq = '48H' # for alternatives see dict below + +# Determine the number of timesteps per year (no leap year, leap year, multiplicator) +# Multiplicator is used to adapt input data given for hourly timesteps +freq_timesteps = {'60min': (8760, 8784, 1), + '4H': (2190, 2196, 4), + '8H': (1095, 1098, 8), + '24H': (365, 366, 24), + '36H': (244, 244, 36), + '48H': (182, 183, 48), + '72H': (122, 122, 72), + '96H': (92, 92, 96), + '120H': (73, 73, 120), + '240H': (37, 37, 240)}[freq] + +# Multiplicator used for getting frequency adjusted input data +# NOTE: Time shift (winter time CET / summer time CEST) is ignored +# Instead, UTC timestamps are used +multiplicator = freq_timesteps[2] + +# Set myopic optimization horizon here +# NOTE: Rolling horizon approach is not a typical rolling horizon optimization, +# but a rolling annual window used for optimization modelling with myopic foresight +if RollingHorizon: + # NOTE: Myopic horizon must be <= overall timeframe length and + # furthermore, amount of years must be a multiple of myopic horizon length. + # Else a KeyError is thrown and execution terminates and has to be started + # again with parameters set according to the prerequisites here. + myopic_horizon_in_years = 4 + + # Overlap is set to 0 in the first place. + overlap_in_timesteps = 0 + +# %% + +# Values used for model control only (No need to change anything here) + +# Create date strings including day, month and hour (used for the simulation) +# Date strings respresent the start of the first year and the end of the last one +starttime = str(startyear) + '-01-01 00:00:00' +endtime = str(endyear) + '-12-31 23:00:00' + +# Get the current time and convert it to a human readable form +# 08.12.2018, JK: time.localtime() is actually the one to be used in combination with time.mktime() +# since here, there are no further arguments, it is identical to time.localtime() as well as time.time() +ts = time.gmtime() +# ts_2 = time.localtime() +timestamp = time.strftime("%Y-%m-%d_%H-%M-%S", ts) +# 27.11.2018, CS: Calculate overall time and overall solution time +overall_objective = 0 +overall_time = 0 +overall_solution_time = 0 + +# Get the length of the (overall) optimization timeframe in years +optimization_timeframe = years_between(starttime, endtime) + 1 + +# Create string amendments in such a way that strings can be used to be concatenated +# to filenames indicating which model configuration has been considered +basic_filename = 'invest_' + +# This formulation is not really nice. Dicts don't work because keys must be unique. +if not RollingHorizon: + RH = 'overall_' + horizon_overlap = '' +else: + RH = 'myopic_' + horizon_overlap = str(myopic_horizon_in_years) + "years_" + str(overlap_in_timesteps) + "overlap_" +if AggregateInput: + Agg = 'clustered_' +else: + Agg = 'complete_' +if not Europe: + EU = 'DE-only' +else: + EU = 'EU' +if MaxInvest: + max_inv = '' +else: + max_inv = '_wo_invest_limits' + +# Filename concatenates all infos relevant for the model run +# (timestamp of model creation is no longer included) + +# filename = basic_filename + freq +"_" + RH + horizon_overlap + Agg + str(lignite_phase_out) + '_endyear' +str(endyear) + max_inv +filename = basic_filename + "start-" + starttime[:10] + "_" + str(optimization_timeframe) + "-years_" + RH + Agg + EU + +# Initialize logger for logging information +# NOTE: Created an additional path to store .log files +logger.define_logging(logfile=filename + '.log') + +# %% + +### 3) Set input data + +path_folder_input = './inputs/' +path_folder_input_csv = '../data/Outputlisten_Test_Invest/' + +# Use aggregated input data if respective control parameter is +# set to True -> It is highly recommended to use aggregated input data +if AggregateInput: + filename_min_max_timeseries = '4transformers_clustered_min_max_timeseries_' + 'BK' + str( + lignite_phase_out) + '_SK' + str(hardcoal_phase_out) + file_version + '_JK.csv' + # filename_min_max_timeseries = 'transformers_min_max_timeseries_clustered_2019-10-13.csv' + if Europe: + filename_node_data = '4power_market_input_data_invest_new_annually_clustered_' + 'BK' + str( + lignite_phase_out) + '_SK' + str(hardcoal_phase_out) + file_version + '_JK.xlsx' + # filename_node_data= 'node_input_data_invest_clustered_2019-10-13.xlsx' + logging.info('Using the AGGREGATED POWER PLANT DATA SET for EUROPE') + else: + filename_node_data = '4power_market_input_data_invest_new_annually_clustered_' + 'BK' + str( + lignite_phase_out) + '_SK' + str(hardcoal_phase_out) + file_version + '_JK.xlsx' + # filename_node_data = 'node_input_data_invest_clustered_2019-10-13.xlsx' + logging.info('Using the AGGREGATED POWER PLANT DATA SET for Germany') +# NOTE: This will not be applicable for investment modelling and might as well be dropped +else: + filename_min_max_timeseries = '4transformers_min_max_timeseries_' + 'BK' + str( + lignite_phase_out) + '_SK' + str(hardcoal_phase_out) + file_version + '_JK.csv' + # filename_min_max_timeseries = 'transformers_min_max_timeseries_complete_2019-10-13.csv' + if Europe: + filename_node_data = '4power_market_input_data_invest_new_annually_' + 'BK' + str( + lignite_phase_out) + '_SK' + str(hardcoal_phase_out) + file_version + '_JK.xlsx' + # filename_node_data= 'node_input_data_invest_complete_2019-10-13.xlsx' + logging.info('Using the COMPLETE POWER PLANT DATA SET for EUROPE. \n' + 'Minimum power output constraint of (individual) \n' + 'transformers will be neglected.') + else: + filename_node_data = '4power_market_input_data_invest_new_annually_' + 'BK' + str( + lignite_phase_out) + '_SK' + str(hardcoal_phase_out) + file_version + '_JK.xlsx' + # filename_node_data = 'node_input_data_invest_complete_2019-10-13.xlsx' + logging.info('Using the COMPLETE POWER PLANT DATA SET for GERMANY. \n' + 'Minimum power output constraint of (individual) \n' + 'transformers will be neglected.') + +# Input data containing timeseries information for nodes (except for cost data) +filename_node_timeseries = '5node_timeseries_' + 'BK' + str(lignite_phase_out) + '_SK' + str( + hardcoal_phase_out) + file_version + '.csv' +# filename_node_timeseries = 'node_timeseries_2019-10-13.csv' + +# Input data containing costs data +filename_cost_data = '2power_market_input_data_complete_cost' + file_version + '.xlsx' +# filename_cost_data = 'cost_input_data_invest_2019-10-13.xlsx' +filename_cost_timeseries = '3cost_timeseries' + file_version + '_JK.csv' +# filename_cost_timeseries = 'cost_timeseries_2019-10-13.csv' + +# Initialize an investment budget. +investment_budget = 0 + +if ActivateInvestmentBudgetLimit: + + investment_budget_per_year = 1000000 + + if RollingHorizon: + investment_budget = investment_budget_per_year * myopic_horizon_in_years + else: + investment_budget = investment_budget_per_year * optimization_timeframe + +if ActivateDemandResponse: + logging.info('Using approach from {} for DEMAND RESPONSE modeling\n' + 'Considering a {}% scenario'.format(approach, scenario)) +else: + logging.info('Running a model WITHOUT DEMAND RESPONSE') + +# %% + +### Calculate timeslice and model control information for Rolling horizon model runs + +if RollingHorizon: + # Set amount of timeslices and determine timeslice lengths for every iteration + # timeslice_length_dict has tuples as values: (timeslice_length_wo_overlap, timeslice_length_with_overlap) + timeseries_start, amount_of_timeslices, timeslice_length_dict \ + = determine_timeslices_RH(starttime, + endtime, + freq, + freq_timesteps, + myopic_horizon_in_years, + overlap_in_timesteps) + +# %% + +############################################################################### +### MODEL RUN ################################################################# +############################################################################### + +# %% + +### Model run for simple model set up + +if not RollingHorizon: + # Build the mathematical optimization model and obtain var_costs + # In addition to that, calculate exo commissioning cost and total de/commissioned capacity + # See function definition for details + om, existing_storage_labels, new_built_storage_labels, \ + total_exo_com_costs_df, total_exo_com_capacity_df, total_exo_decom_capacity_df \ + = build_simple_model(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + AggregateInput, + startyear, + endyear, + MaxInvest, + fuel_cost_pathway, + investment_cost_pathway, + starttime, + endtime, + freq, + multiplicator, + optimization_timeframe, + IR=IR, + discount=discount, + ActivateEmissionsLimit=ActivateEmissionsLimit, + emission_pathway=emission_pathway, + ActivateInvestmentBudgetLimit=ActivateInvestmentBudgetLimit, + investment_budget=investment_budget, + ActivateDemandResponse=ActivateDemandResponse, + approach=approach, + scenario=scenario) + + # Solve the problem using the given solver + om.solve(solver=solver, solve_kwargs={'tee': True}) + + # Calculate overall objective and optimization time + meta_results = processing.meta_results(om) + overall_solution_time += meta_results['solver']['Time'] + + # get the current time and calculate the overall time + ts_2 = time.gmtime() + # 08.12.2018, JK: See above at definition of ts... + # ts_2 = time.localtime() + overall_time = time.mktime(ts_2) - time.mktime(ts) + + print("********************************************************") + logging.info("Done!") + print('Overall solution time: {:.2f}'.format(overall_solution_time)) + print('Overall time: {:.2f}'.format(overall_time)) + +# %% + +### Myopic optimization with rolling window: Run invest model +# NOTE: This is a quasi rolling horizon approach + +if RollingHorizon: + + logging.info('Creating a LP optimization model for INVESTMENT optimization \n' + 'using a MYOPIC OPTIMIZATION ROLLING WINDOW approach for model solution.') + + # Initialization of RH model run + counter = 0 + transformers_init_df = pd.DataFrame() + storages_init_df = pd.DataFrame() + results_sequences = pd.DataFrame() + results_scalars = pd.DataFrame() + total_exo_com_costs_df_RH = pd.DataFrame() + total_exo_com_capacity_df_RH = pd.DataFrame() + total_exo_decom_capacity_df_RH = pd.DataFrame() + + # 14.10.2018, JK: For loop controls rolling horizon model run + for counter in range(amount_of_timeslices): + + # (re)build optimization model in every iteration + # Return the model, the energy system as well as the information on existing transformer and storage + # capacity for each iteration. + # In addition to that, calculate exo commissioning cost and total de/commissioned capacity + # See function definitions for details + om, es, timeseries_start, new_built_transformer_labels, \ + new_built_storage_labels, datetime_index, endo_exo_exist_df, \ + endo_exo_exist_stor_df, existing_storage_labels, \ + total_exo_com_costs_df_RH, total_exo_com_capacity_df_RH, total_exo_decom_capacity_df_RH \ + = build_RH_model(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + AggregateInput, + fuel_cost_pathway, + investment_cost_pathway, + endyear, + MaxInvest, + myopic_horizon_in_years, + timeseries_start, + timeslice_length_with_overlap=timeslice_length_dict[counter][2], + counter=counter, + transformers_init_df=transformers_init_df, + storages_init_df=storages_init_df, + freq=freq, + multiplicator=multiplicator, + overlap_in_timesteps=overlap_in_timesteps, + years_per_timeslice=timeslice_length_dict[counter][0], + total_exo_com_costs_df_RH=total_exo_com_costs_df_RH, + total_exo_com_capacity_df_RH=total_exo_com_capacity_df_RH, + total_exo_decom_capacity_df_RH=total_exo_decom_capacity_df_RH, + IR=IR, + discount=discount, + ActivateEmissionsLimit=ActivateEmissionsLimit, + emission_pathway=emission_pathway, + ActivateInvestmentBudgetLimit=ActivateInvestmentBudgetLimit, + investment_budget=investment_budget, + ActivateDemandResponse=ActivateDemandResponse, + approach=approach, + scenario=scenario + ) + + # 14.05.2019, JK: Solve model and return results + om, results_sequences, results_scalars, \ + overall_objective, overall_solution_time \ + = solve_RH_model(om, + datetime_index, + counter, + startyear, + myopic_horizon_in_years, + timeslice_length_wo_overlap_in_timesteps=timeslice_length_dict[counter][1], + results_sequences=results_sequences, + results_scalars=results_scalars, + overall_objective=overall_objective, + overall_solution_time=overall_solution_time, + new_built_storage_labels=new_built_storage_labels, + existing_storage_labels=existing_storage_labels, + solver=solver) + + # 14.05.2019, JK: Set initial states for the next model run + # See function definition for details + transformers_init_df, storages_init_df = \ + initial_states_RH(om, + timeslice_length_wo_overlap_in_timesteps=timeslice_length_dict[counter][1], + new_built_transformer_labels=new_built_transformer_labels, + new_built_storage_labels=new_built_storage_labels, + endo_exo_exist_df=endo_exo_exist_df, + endo_exo_exist_stor_df=endo_exo_exist_stor_df) + + # 05.06.2019, JK: To Do: Check whether this dump works properly or + # if objects are still kept in memory. + if Dumps: + # 17.05.2019, JK: Dump energy system including results + # (Pickling in order to reuse results later) + dump_es(om, es, "./dumps/", timestamp) + + # TODO: 20.08.2019, JK: Get overall objective value for model comparison + # overall_objective = reconstruct_objective_value(om) + + # TODO, JK: Get from experimental state to senseful integration + # Do some garbage collection in every iteration + gc.collect() + + # The following is carried out when all model runs are carried out + + # 27.11.2018, CS: get the current time and calculate the overall time + # 08.12.2018, JK: Seems to work here, although according to documentation of package time, the function time.mktime() + # should be used in combination with time.localtime(), not time.gmtime()... Commented this out below. + ts_2 = time.gmtime() + # ts_2 = time.localtime() + overall_time = calendar.timegm(ts_2) - calendar.timegm(ts) + + print('*************************************FINALLY DONE*************************************') + print('Overall objective value: {:,.0f}'.format(overall_objective)) + print('Overall solution time: {:.2f}'.format(overall_solution_time)) + print('Overall time: {:.2f}'.format(overall_time)) + +# %% + +############################################################################### +### PROCESS MODEL RESULTS ##################################################### +############################################################################### + +# %% + +# 15.05.2019, JK: Store results +if not RollingHorizon: + results_scalars, results_sequences = extract_model_results(om, + new_built_storage_labels, + existing_storage_labels) + +# %% + +### Create and visualize production schedule + +# 16.05.2019, JK: Power contains the aggregated production results per energy source +# as well as information on storage infeed and outfeed and load; see function definition for details +Power = create_aggregated_energy_source_results(results_sequences) + +# Create a nice stackplot of power plant production schedule as well as load +if PlotProductionResults: + + # 16.05.2019, JK: Draw a stackplot of the production results + draw_production_plot(Power, + Europe) + + if SaveProductionPlot: + path = "./results/" + plt.savefig(path + filename + '_production.png', dpi=150, bbox_inches="tight") + + plt.show() + +if SaveProductionResults: + path = "./results/" + results_sequences.to_csv(path + filename + '_production.csv', sep=';', decimal=',', header=True) + +# %% + +### Create and visualize investment decisions taken + +if not RollingHorizon: + Invest = create_aggregated_investment_results(results_scalars, + starttime, + grouping=grouping) + + print('Total exogenous commissioning costs: {:,.0f}'.format(total_exo_com_costs_df.sum().sum())) + print('Objective + Total exogenous commissioning costs: {:,.0f}'.format( + total_exo_com_costs_df.sum().sum() + meta_results['objective'])) + + results_to_save = create_results_to_save(meta_results["objective"], + total_exo_com_costs_df, + total_exo_com_capacity_df, + total_exo_decom_capacity_df, + overall_solution_time, + overall_time, + Invest) + +else: + Cumulated_Invest, Invest \ + = create_aggregated_investment_results_RH(results_scalars, + amount_of_timeslices, + timeslice_length_dict, + starttime, + freq, + grouping=grouping) + + print('Total exogenous commissioning costs: {:,.0f}'.format(total_exo_com_costs_df_RH.sum().sum())) + print('Objective + Total exogenous commissioning costs: {:,.0f}'.format( + total_exo_com_costs_df_RH.sum().sum() + overall_objective)) + + results_to_save = create_results_to_save(overall_objective, + total_exo_com_costs_df_RH, + total_exo_com_capacity_df_RH, + total_exo_decom_capacity_df_RH, + overall_solution_time, + overall_time, + Invest) + +# In the first place, only new installations are shown. +# Later on, it seems reasonable to include decommissioning decisions as well +# on an annual basis using a stacked bar plot and two y-axis directions +if PlotInvestmentDecisionsResults: + + if not RollingHorizon: + draw_investment_decisions_plot(Invest) + draw_exo_investment_decisions_plot(total_exo_com_capacity_df) + draw_decommissioning_plot(total_exo_decom_capacity_df) + draw_investment_decisions_plot(results_to_save['net_commissioning']) + + else: + draw_investment_decisions_plot_RH(Cumulated_Invest, Invest) + draw_exo_investment_decisions_plot(total_exo_com_capacity_df_RH) + draw_decommissioning_plot(total_exo_decom_capacity_df_RH) + draw_investment_decisions_plot(results_to_save['net_commissioning']) + + if SaveInvestmentDecisionsPlot: + path = "./results/" + plt.savefig(path + filename + '_investments.png', dpi=150, bbox_inches="tight") + +if SaveInvestmentDecisionsResults: + path = "./results/" + results_to_save.to_csv(path + filename + '_investments.csv', sep=';', decimal=',', header=True) diff --git a/pommesinvest/model_funcs/__init__.py b/pommesinvest/model_funcs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pommesinvest/model_funcs/data_input.py b/pommesinvest/model_funcs/data_input.py new file mode 100644 index 0000000..c481764 --- /dev/null +++ b/pommesinvest/model_funcs/data_input.py @@ -0,0 +1,1161 @@ +# -*- coding: utf-8 -*- +""" +General description +------------------- +This file contains all function definitions for reading in input data +used for the investment variant of POMMES. + +@author: Johannes Kochems (*), Johannes Giehl (*), Yannick Werner, +Benjamin Grosse + +Contributors: +Julien Faist, Hannes Kachel, Sophie Westphal, Flora von Mikulicz-Radecki, +Carla Spiller, Fabian Büllesbach, Timona Ghosh, Paul Verwiebe, +Leticia Encinas Rosa, Joachim Müller-Kirchenbauer + +(*) Corresponding authors +""" + +import pandas as pd +import logging + +from helper_functions_invest import discount_values +from subroutines_for_data_input_invest import ( + parse_input_sheets, create_buses, + create_sources, create_demand_response_units, create_demand, create_sinks, + create_excess_sinks, create_transformers, create_storages, + existing_transformers_exo_decom, new_transformers_exo, storages_exo, + renewables_exo, + load_input_data) +from helper_functions_invest import convert_annual_limit, resample_timeseries + +# TODO: Adjust to investment model needs +def parse_input_data(path_folder_input, + AggregateInput, + countries, + fuel_cost_pathway='middle', + year=2017, + ActivateDemandResponse=False, + scenario='50'): + """Read in csv files and build oemof components + + Parameters + ---------- + path_folder_input : :obj:`str` + The path_folder_output where the input data is stored + + AggregateInput: :obj:`boolean` + boolean control variable indicating whether to use complete or aggregated + transformer input data set + + countries : :obj:`list` of str + List of countries to be simulated + + fuel_cost_pathway: :obj:`str` + The chosen pathway for commodity cost scenarios (lower, middle, upper) + + year: :obj:`str` + Reference year for pathways depending on starttime + + ActivateDemandResponse : :obj:`boolean` + If True, demand response input data is read in + + scenario : :obj:`str` + Demand response scenario to be modeled; + must be one of ['25', '50', '75'] whereby '25' is the lower, + i.e. rather pessimistic estimate + Returns + ------- + input_data: :obj:`dict` of :class:`pd.DataFrame` + The input data given as a dict of DataFrames + with component names as keys + """ + # save the input data in a dict; keys are names and values are DataFrames + files = {'buses': 'buses', + # 'links': 'links', + # 'links_ts': 'links_ts', + 'sinks_excess': 'sinks_excess', + 'sinks_demand_el': 'sinks_demand_el', + 'sinks_demand_el_ts': 'sinks_demand_el_ts' + '_complete', + 'sources_shortage': 'sources_shortage', + 'sources_commodity': 'sources_commodity', + # 'sources_renewables_fluc': 'sources_renewables_fluc', + # 'costs_fuel': 'costs_fuel_' + fuel_cost_pathway, + # 'costs_ramping': 'costs_ramping', + # 'costs_fixed', + # 'costs_carbon': 'costs_carbon', + # 'costs_market_values': 'costs_market_values', + # 'costs_operation': 'costs_operation', + # 'costs_operation_storages': 'costs_operation_storages', + 'emission_limits': 'emission_limits' + } + + add_files = {#'sources_renewables': 'sources_renewables', + # 'sources_renewables_ts': 'sources_renewables_ts', + #'storages_el': 'storages_el', + # 'transformers': 'transformers', + # 'transformers_minload_ts': 'transformers_minload_ts', + # 'transformers_renewables': 'transformers_renewables', + # 'min_loads_dh': 'min_loads_dh', + # 'min_loads_ipp': 'min_loads_ipp', + # 'costs_operation_renewables': 'costs_operation_renewables' + } + + # Optionally use aggregated transformer data instead + if AggregateInput: + add_files['transformers'] = 'transformers_clustered' + + # Addition: demand response units + if ActivateDemandResponse: + add_files['sinks_dr_el'] = ( + 'sinks_demand_response_el_' + scenario) + add_files['sinks_dr_el_ts'] = ( + 'sinks_demand_response_el_ts_' + scenario + '_complete') + add_files['sinks_dr_el_ava_pos_ts'] = ( + 'sinks_demand_response_el_ava_pos_ts_' + scenario + '_complete') + add_files['sinks_dr_el_ava_neg_ts'] = ( + 'sinks_demand_response_el_ava_neg_ts_' + scenario + '_complete') + + # Use dedicated 2030 data + if year == 2030: + add_files = {k: v + '' for k, v in add_files.items()} + + files = {**files, **add_files} + + input_data = {key: load_input_data(filename=name, + path_folder_input=path_folder_input, + countries=countries) + for key, name in files.items()} + + return input_data + + +def add_limits(input_data, + emission_pathway, + starttime='2017-01-01 00:00:00', + endtime='2017-01-01 23:00:00'): + """Add further limits to the optimization model (emissions limit for now) + + Parameters + ---------- + input_data: :obj:`dict` of :class:`pd.DataFrame` + The input data given as a dict of DataFrames + with component names as keys + + emission_pathway : str + The pathway for emissions reduction to be used + + starttime : :obj:`str` + The starttime of the optimization run + + endtime : :obj:`str` + The endtime of the optimization run + + Returns + ------- + emissions_limit : :obj:`float` + The emissions limit to be used (converted) + """ + emissions_limit = convert_annual_limit( + input_data['emission_limits'][emission_pathway], + starttime, endtime) + + return emissions_limit + + +# TODO: Replace by method nodes_from_csv +def nodes_from_excel(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + AggregateInput, + startyear, + endyear, + MaxInvest, + fuel_cost_pathway='middle', + investment_cost_pathway='middle', + starttime='2016-01-01 00:00:00', + endtime='2017-01-01 00:00:00', + freq='24H', + multiplicator=24, + optimization_timeframe=2, + IR=0.02, + discount=False, + overlap_in_timesteps=0, + RollingHorizon=False, + ActivateEmissionsLimit=False, + emission_pathway='100_percent_linear', + ActivateDemandResponse=False, + approach='DIW', + scenario='50'): + """ Reads in an Excel Workbook and builds the respective oemof components + by going through the Worksheets of the file. The Worksheets are parsed + to a pd.DataFrame. The DataFrames are iterrated through using the + funcion pd.DataFrame.iterows(). The actual parameters are obtained + from the entries in the respective columns. If booleanBinaries is True, + variables and constraints for startup and shutdown as well as + minimum uptime and downtime are activated. Method is used to build the + nodes and flows for a dispatch optimization model. + + Parameters + ---------- + path_folder_input : :obj:`str` + The file path where input files are stored (common folder) + + filename_node_data : :obj:`str` + Name of Excel Workbook containing all data + for creating nodes (buses and oemof components) + + filename_cost_data : :obj:`str` + Name of Excel Workbook containing cost pathways for oemof components + + filename_node_timeseries : :obj:`str` + Filename of the node timeseries data, given in a separate .csv file + + filename_min_max_timeseries : :obj:`str` + Filename of the min / max transformer data, given in a separate .csv file + + filename_cost_timeseries : :obj:`str` + Filename of the cost timeseries data, given in a separate .csv file + + AggregateInput: :obj:`boolean` + boolean control variable indicating whether to use complete or aggregated + transformer input data set + + startyear : :obj:`int` + The startyear of the optimization run + + endyear : :obj:`int` + The endyear of the optimization run + + MaxInvest : :obj:`boolean` + If True, investment limits per technology are applied + + fuel_cost_pathway: :obj:`str` + The chosen pathway for commodity cost scenarios (lower, middle, upper) + + investment_cost_pathway: :obj:`str` + The chosen pathway for commodity cost scenarios (lower, middle, upper) + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The end timestamp of the optimization timeframe + + freq : :obj:`string` + A string defining the timeseries target frequency; determined by the + model configuration + + multiplicator : :obj:`int` + A multiplicator to convert the input data given with an hourly resolution + to another (usually a lower) one + + optimization_timeframe : :obj:`str` + The length of the overall optimization timeframe in years + (used for determining technology specific investment limits) + + IR : :obj:`float` + The interest rate used for discounting + + discount : :obj:`boolean` + Boolean parameter indicating whether or not to discount future investment costs + + overlap_in_timesteps : :obj:`int` + the overlap in timesteps if a rolling horizon model is run + (to prevent index out of bounds error) + + RollingHorizon: :obj:`boolean` + If True a myopic (Rolling horizon) optimization run is carried out, + elsewhise a simple overall optimization approach is chosen + + ActivateEmissionsLimit : :obj:`boolean` + If True, an emission limit is introduced + + emission_pathway : str + The pathway for emissions reduction to be used + + ActivateDemandResponse : :obj:`boolean` + Boolean control parameter indicating whether or not to introduce + demand response units + + approach : :obj:`str` + Demand response modeling approach to be used; + must be one of ['DIW', 'DLR', 'IER', 'TUD'] + + scenario : :obj:`str` + Demand response scenario to be modeled; + must be one of ['25', '50', '75'] whereby '25' is the lower, + i.e. rather pessimistic estimate + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + existing_storage_labels : :obj:`list` + List of the labels of existing storages + + new_built_storage_labels : :obj:`list` + List of the labels of new built storages + + total_exo_com_costs_df : :obj:`pd.DataFrame` + A DataFrame containing the overall costs for exogeneous investements + + total_exo_com_capacity_df : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous investements + + total_exo_decom_capacity_df : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous decommissioning decisions + """ + + # Parse all data needed from input data sheets + buses_df, excess_df, shortage_df, commodity_sources_df, \ + existing_transformers_df, new_built_transformers_df, \ + renewables_df, demand_df, \ + existing_storages_df, new_built_storages_df, \ + existing_transformers_decom_df, new_transformers_de_com_df, renewables_com_df, \ + node_timeseries_df, min_max_timeseries_df, fuel_costs_df, operation_costs_df, \ + ramping_costs_df, startup_costs_df, storage_var_costs_df, \ + investment_costs_df, storage_investment_costs_df, \ + storage_pump_investment_costs_df, storage_turbine_investment_costs_df, \ + WACC_df, cost_timeseries_df \ + = parse_input_sheets(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + fuel_cost_pathway, + investment_cost_pathway, + starttime, + endtime, + freq, + multiplicator, + overlap_in_timesteps=0) + + # node_dict is a dictionary containing all nodes + # (i.e. all oemof elements except for Flows) of the model + node_dict = {} + + # Create Buses objects from table 'buses' + # See subroutines_for_data_input.py for details on all following function calls + node_dict = create_buses(buses_df, node_dict) + + # Create all source objects from tables 'commodity_sources', 'shortage', 'renewables' + node_dict = create_sources(node_dict, commodity_sources_df, fuel_costs_df, fuel_cost_pathway, + shortage_df, renewables_df, node_timeseries_df, + cost_timeseries_df, starttime, endtime) + + # TODO: Replace this temporary solution by final one + # In the investment model, we only consider Germany + countries = ['DE'] + year = pd.to_datetime(starttime).year + + input_data = parse_input_data( + '../data/Outputlisten_Test_Invest/', + AggregateInput, + countries, + fuel_cost_pathway, + year, + ActivateDemandResponse, + scenario) + + if ActivateDemandResponse: + + ts_keys = ['sinks_dr_el_ts', 'sinks_dr_el_ava_pos_ts', + 'sinks_dr_el_ava_neg_ts', 'sinks_demand_el_ts'] + + for key in ts_keys: + input_data[key] = resample_timeseries(input_data[key], + freq='48H', + aggregation_rule='sum') + + node_dict, dr_overall_load_ts_df = create_demand_response_units( + input_data['sinks_dr_el'], + input_data['sinks_dr_el_ts'], + input_data['sinks_dr_el_ava_pos_ts'], + input_data['sinks_dr_el_ava_neg_ts'], + approach, + starttime, endtime, + node_dict) + + node_dict = create_demand( + input_data['sinks_demand_el'], + input_data['sinks_demand_el_ts'], + starttime, endtime, + node_dict, + # RollingHorizon, + ActivateDemandResponse, + dr_overall_load_ts_df) + + else: + + ts_keys = ['sinks_demand_el_ts'] + + for key in ts_keys: + input_data[key] = resample_timeseries(input_data[key], + freq='48H', + aggregation_rule='sum') + + node_dict = create_demand( + input_data['sinks_demand_el'], + input_data['sinks_demand_el_ts'], + starttime, endtime, + node_dict, + # RollingHorizon + ) + + # Create all sink objects from tables 'demand', 'excess_sinks' + # node_dict = create_sinks(node_dict, demand_df, node_timeseries_df, + # starttime, endtime, excess_df, + # ActivateDemandResponse, + # dr_overall_load_ts_df) + + # Create excess sinks + node_dict = create_excess_sinks(excess_df, node_dict) + + # Create Transformer objects from 'transformers' table + node_dict = create_transformers(node_dict, existing_transformers_df, + new_built_transformers_df, AggregateInput, + RollingHorizon, + operation_costs_df, ramping_costs_df, + investment_costs_df, WACC_df, cost_timeseries_df, + min_max_timeseries_df, MaxInvest, + starttime, endtime, endyear, + optimization_timeframe=optimization_timeframe) + + # Create Storage objects from 'storages' table + node_dict = create_storages(node_dict, + existing_storages_df, + new_built_storages_df, + RollingHorizon, + MaxInvest, + storage_var_costs_df, + storage_investment_costs_df, + storage_pump_investment_costs_df, + storage_turbine_investment_costs_df, + WACC_df, + starttime, + endyear, + optimization_timeframe=optimization_timeframe) + + total_exo_com_costs_df, total_exo_com_capacity_df, \ + total_exo_decom_capacity_df = exo_com_costs(startyear, + endyear, + existing_transformers_decom_df, + new_transformers_de_com_df, + investment_costs_df, + WACC_df, + new_built_storages_df, + storage_turbine_investment_costs_df, + storage_pump_investment_costs_df, + storage_investment_costs_df, + renewables_com_df, + IR=IR, + discount=discount) + + # Create existing_storage_labels and new_built_storage_labels + # to iterate over them later to get investment information about capacity + existing_storage_labels = list(existing_storages_df.index) + new_built_storage_labels = list(new_built_storages_df.index) + + emissions_limit = None + if ActivateEmissionsLimit: + emissions_limit = add_limits( + input_data, + emission_pathway, + starttime, endtime) + + return (node_dict, existing_storage_labels, new_built_storage_labels, + total_exo_com_costs_df, total_exo_com_capacity_df, total_exo_decom_capacity_df, + emissions_limit) + + +def nodes_from_excel_rh(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + AggregateInput, + counter, + storages_init_df, + transformers_init_df, + timeslice_length_with_overlap, + RH_endyear, + MaxInvest, + timeseries_start, + total_exo_com_costs_df_RH, + total_exo_com_capacity_df_RH, + total_exo_decom_capacity_df_RH, + fuel_cost_pathway='middle', + investment_cost_pathway='middle', + freq='24H', + multiplicator=24, + overlap_in_timesteps=0, + years_per_timeslice=1, + IR=0.02, + discount=False, + RollingHorizon=True, + ActivateEmissionsLimit=False, + emission_pathway='100_percent_linear', + ActivateDemandResponse=False, + approach='DIW', + scenario='50'): + + """ Function builds up an energy system from a given excel file, + preparing it for a rolling horizon dispatch optimization. + Functionality is more or less the same as for the simple method. + The main difference is that a counter is used and initial status + from the last optimization run is used in the next one. + + + Parameters + ---------- + path_folder_input : :obj:`str` + The file path where input files are stored (common folder) + + filename_node_data : :obj:`str` + Name of Excel Workbook containing all data for creating nodes (oemof components) + + filename_cost_data : :obj:`str` + Name of Excel Workbook containing cost pathways for oemof components + + filename_node_timeseries : :obj:`str` + Filename of the node timeseries data, given in a separate .csv file + + filename_min_max_timeseries : :obj:`str` + Filename of the min / max transformer data, given in a separate .csv file + + filename_cost_timeseries : :obj:`str` + Filename of the cost timeseries data, given in a separate .csv file + + AggregateInput: :obj:`boolean` + boolean control variable indicating whether to use complete or aggregated + transformer input data set + + counter : :obj:`int` + An integer counter variable counting the number of the rolling horizon run + + storages_init_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the storage data from previous model runs + + transformers_init_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the transformer data from previous model runs + + timeslice_length_with_overlap : :obj:`int` + Overall length of optimization timeframe including overlap (t_step) + + RH_endyear : :obj:`int` + End year of the current rolling horizon model run + + MaxInvest : :obj:`boolean` + If True, investment limits per technology are applied + + timeseries_start : :obj:`pd.Timestamp` + The starting timestamp of the optimization timeframe + + total_exo_com_costs_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall costs for exogeneous investements + + total_exo_com_capacity_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous investements + + total_exo_decom_capacity_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous decommissioning decisions + + fuel_cost_pathway: :obj:`str` + The chosen pathway for commodity cost scenarios (lower, middle, upper) + + investment_cost_pathway: :obj:`str` + The chosen pathway for investment cost scenarios (lower, middle, upper) + + freq : :obj:`string` + A string defining the timeseries target frequency; determined by the + model configuration + + multiplicator : :obj:`int` + A multiplicator to convert the input data given with an hourly resolution + to another (usually a lower) one + + overlap_in_timesteps : :obj:`int` + the overlap in timesteps if a rolling horizon model is run + (to prevent index out of bounds error) + + years_per_timeslice : :obj:`int` + Number of years of a timeslice (a given myopic iteration); may differ + for the last timesteps since it does not necessarily have to be + completely covered + + IR : :obj:`float` + The interest rate used for discounting + + discount : :obj:`boolean` + Boolean parameter indicating whether or not to discount future investment costs + + RollingHorizon: :obj:`boolean` + If True a myopic (Rolling horizon) optimization run is carried out, + elsewhise a simple overall optimization approach is chosen + + ActivateEmissionsLimit : :obj:`boolean` + If True, an emission limit is introduced + + emission_pathway : str + The pathway for emissions reduction to be used + + ActivateDemandResponse : :obj:`boolean` + Boolean control parameter indicating whether or not to introduce + demand response units + + approach : :obj:`str` + Demand response modeling approach to be used; + must be one of ['DIW', 'DLR', 'IER', 'TUD'] + + scenario : :obj:`str` + Demand response scenario to be modeled; + must be one of ['25', '50', '75'] whereby '25' is the lower, + i.e. rather pessimistic estimate + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + new_built_transformer_labels : :obj:`list` of :class:`str` + A list of the labels of all transformer elements included in the model + used for assessing these and assigning existing capacities (via the + function initial_states_RH from functions_for_model_control_invest) + + new_built_storage_labels : :obj:`list` of :class:`str` + A list of the labels of all storage elements included in the model + used for assessing these and assigning initial states as well + as existing caoacities (via the + function initial_states_RH from functions_for_model_control_invest) + + endo_exo_exist_df : :obj:`pd.DataFrame` + A DataFrame containing the endogeneous and exogeneous transformers commissioning + information for setting initial states + + endo_exo_exist_stor_df : :obj:`pd.DataFrame` + A DataFrame containing the endogeneous and exogeneous storages commissioning + information for setting initial states + + total_exo_com_costs_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall costs for exogeneous investements + + total_exo_com_capacity_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous investements + + total_exo_decom_capacity_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous decommissioning decisions + + """ + + # The rolling horizon approach differs by choosing + # starttime as well as endtime and initial capacities for transformers. + starttime_RH = timeseries_start.strftime("%Y-%m-%d %H:%M:%S") + endtime_RH = (timeseries_start + + timeslice_length_with_overlap * timeseries_start.freq).strftime("%Y-%m-%d %H:%M:%S") + + logging.info("Start of iteration {} : {}".format(counter, starttime_RH)) + logging.info("End of iteration {} : {}".format(counter, endtime_RH)) + + # Parse all data needed from input data sheets + # NOTE: Parsing from starttime to endtime of overall optimization horizon + # would be needed if constraints were introduced which cover more than one + # timeslice (such as expected power plant rentability constraints) + buses_df, excess_df, shortage_df, commodity_sources_df, \ + existing_transformers_df, new_built_transformers_df, \ + renewables_df, demand_df, \ + existing_storages_df, new_built_storages_df, \ + existing_transformers_decom_df, new_transformers_de_com_df, renewables_com_df, \ + node_timeseries_df, min_max_timeseries_df, fuel_costs_df, operation_costs_df, \ + ramping_costs_df, startup_costs_df, storage_var_costs_df, \ + investment_costs_df, storage_investment_costs_df, \ + storage_pump_investment_costs_df, storage_turbine_investment_costs_df, \ + WACC_df, cost_timeseries_df \ + = parse_input_sheets(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + fuel_cost_pathway, + investment_cost_pathway, + starttime=starttime_RH, + endtime=endtime_RH, + freq=freq, + multiplicator=multiplicator, + overlap_in_timesteps=overlap_in_timesteps) + + node_dict = {} + + node_dict = create_buses(buses_df, node_dict) + + node_dict = create_sources(node_dict, commodity_sources_df, fuel_costs_df, fuel_cost_pathway, + shortage_df, renewables_df, node_timeseries_df, + cost_timeseries_df, + starttime = starttime_RH, + endtime = endtime_RH) + + # TODO: Replace this temporary solution by final one + # In the investment model, we only consider Germany + countries = ['DE'] + year = pd.to_datetime(starttime_RH).year + + input_data = parse_input_data( + '../data/Outputlisten_Test_Invest/', + AggregateInput, + countries, + fuel_cost_pathway, + year, + ActivateDemandResponse, + scenario) + + if ActivateDemandResponse: + + ts_keys = ['sinks_dr_el_ts', 'sinks_dr_el_ava_pos_ts', + 'sinks_dr_el_ava_neg_ts', 'sinks_demand_el_ts'] + + for key in ts_keys: + input_data[key] = resample_timeseries(input_data[key], + freq='48H', + aggregation_rule='sum') + + node_dict, dr_overall_load_ts_df = create_demand_response_units( + input_data['sinks_dr_el'], + input_data['sinks_dr_el_ts'], + input_data['sinks_dr_el_ava_pos_ts'], + input_data['sinks_dr_el_ava_neg_ts'], + approach, + starttime_RH, endtime_RH, + node_dict) + + node_dict = create_demand( + input_data['sinks_demand_el'], + input_data['sinks_demand_el_ts'], + starttime_RH, endtime_RH, + node_dict, + # RollingHorizon, + ActivateDemandResponse, + dr_overall_load_ts_df) + + else: + + ts_keys = ['sinks_demand_el_ts'] + + for key in ts_keys: + input_data[key] = resample_timeseries(input_data[key], + freq='48H', + aggregation_rule='sum') + + node_dict = create_demand( + input_data['sinks_demand_el'], + input_data['sinks_demand_el_ts'], + starttime_RH, endtime_RH, + node_dict, + # RollingHorizon) + ) + + # node_dict = create_sinks(node_dict, demand_df, node_timeseries_df, + # starttime = starttime_RH, + # endtime = endtime_RH, + # excess_df = excess_df, + # RollingHorizon = True) + + # Create excess sinks + node_dict = create_excess_sinks(excess_df, node_dict) + + node_dict, new_built_transformer_labels, endo_exo_exist_df = \ + create_transformers(node_dict, + existing_transformers_df, + new_built_transformers_df, + AggregateInput, + RollingHorizon, + operation_costs_df, ramping_costs_df, + investment_costs_df, WACC_df, + cost_timeseries_df, + min_max_timeseries_df, + MaxInvest, + starttime=starttime_RH, + endtime=endtime_RH, + counter=counter, + transformers_init_df=transformers_init_df, + years_per_timeslice=years_per_timeslice, + endyear=RH_endyear) + + node_dict, new_built_storage_labels, endo_exo_exist_stor_df = \ + create_storages(node_dict, + existing_storages_df, + new_built_storages_df, + RollingHorizon, + MaxInvest, + storage_var_costs_df, + storage_investment_costs_df, + storage_pump_investment_costs_df, + storage_turbine_investment_costs_df, + WACC_df, + starttime=starttime_RH, + counter=counter, + storages_init_df=storages_init_df, + years_per_timeslice=years_per_timeslice, + endyear=RH_endyear) + + total_exo_com_costs_df_RH, total_exo_com_capacity_df_RH, \ + total_exo_decom_capacity_df_RH = exo_com_costs_RH(timeseries_start.year, + RH_endyear, + counter, + years_per_timeslice, + total_exo_com_costs_df_RH, + total_exo_com_capacity_df_RH, + total_exo_decom_capacity_df_RH, + existing_transformers_decom_df, + new_transformers_de_com_df, + investment_costs_df, + WACC_df, + new_built_storages_df, + storage_turbine_investment_costs_df, + storage_pump_investment_costs_df, + storage_investment_costs_df, + renewables_com_df, + IR=IR, + discount=discount) + + # Create existing_storage_labels to iterate over them later to get investment information about capacity + existing_storage_labels = list(existing_storages_df.index) + + emissions_limit = None + if ActivateEmissionsLimit: + emissions_limit = add_limits( + input_data, + emission_pathway, + starttime_RH, endtime_RH) + + return (node_dict, new_built_transformer_labels, new_built_storage_labels, + endo_exo_exist_df, endo_exo_exist_stor_df, existing_storage_labels, + total_exo_com_costs_df_RH, total_exo_com_capacity_df_RH, total_exo_decom_capacity_df_RH, + emissions_limit) + + +def exo_com_costs(startyear, + endyear, + existing_transformers_decom_df, + new_transformers_de_com_df, + investment_costs_df, + WACC_df, + new_built_storages_df, + storage_turbine_investment_costs_df, + storage_pump_investment_costs_df, + storage_investment_costs_df, + renewables_com_df, + IR=0.02, + discount=False): + """ Function takes the dataframes from the functions total_exo_decommissioning, + transformers_exo_commissioning, storages_exo_commissioning and + renewables_exo_commissioning and returns them + + Parameters + ---------- + startyear : :obj:`int` + Starting year of the overall optimization run + + endyear : :obj:`int` + End year of the overall optimization run + + renewables_com_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the renewables data + + storage_investment_costs_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the storage capacity investment costs data + + storage_pump_investment_costs_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the storage infeed investment costs data + + storage_turbine_investment_costs_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the storage outfeed investment costs data + + new_built_storages_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the new built storage units data + + WACC_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the WACC data + + investment_costs_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the transformers investment costs data + + new_transformers_de_com_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the new built transformers (exogeneous) + commissioning and decommissioning data + + existing_transformers_decom_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the existing transformers (exogeneous) + decommissioning data + + Returns + ------- + total_exo_com_costs_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous commissioning costs + + exo_commissioned_capacity_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous commissioning capacity and year + + exo_decommissioned_capacity_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous decommissioning capacity and year + + """ + + existing_transformers_decom_capacity_df = existing_transformers_exo_decom(existing_transformers_decom_df, + startyear, + endyear) + + new_transformers_exo_com_costs_df, new_transformers_exo_com_capacity_df, \ + new_transformers_exo_decom_capacity_df = new_transformers_exo(new_transformers_de_com_df, + investment_costs_df, + WACC_df, + startyear, + endyear, + IR=IR, + discount=discount) + + logging.info('Exogenous transformers costs: {:,.0f}'.format( + new_transformers_exo_com_costs_df.sum().sum())) + + storages_exo_com_costs_df, storages_exo_com_capacity_df, \ + storages_exo_decom_capacity_df = storages_exo(new_built_storages_df, + storage_turbine_investment_costs_df, + storage_pump_investment_costs_df, + storage_investment_costs_df, + WACC_df, + startyear, + endyear, + IR=IR, + discount=discount) + + logging.info('Exogenous storages costs: {:,.0f}'.format( + storages_exo_com_costs_df.sum().sum())) + + renewables_exo_com_costs_df, renewables_exo_com_capacity_df, \ + renewables_exo_decom_capacity_df = renewables_exo(renewables_com_df, + investment_costs_df, + WACC_df, + startyear, + endyear, + IR=IR, + discount=discount) + + logging.info('Exogenous renewables costs: {:,.0f}'.format( + renewables_exo_com_costs_df.sum().sum())) + + total_exo_com_costs_df = pd.concat([new_transformers_exo_com_costs_df, + storages_exo_com_costs_df, + renewables_exo_com_costs_df], axis=0, sort=False) + + total_exo_com_capacity_df = pd.concat([new_transformers_exo_com_capacity_df, + storages_exo_com_capacity_df, + renewables_exo_com_capacity_df], axis=0, sort=True) + + total_exo_decom_capacity_df = pd.concat([existing_transformers_decom_capacity_df, + new_transformers_exo_decom_capacity_df, + storages_exo_decom_capacity_df, + renewables_exo_decom_capacity_df], axis=0, sort=True) + + return total_exo_com_costs_df, total_exo_com_capacity_df, total_exo_decom_capacity_df + + +def exo_com_costs_RH(startyear, + endyear, + counter, + years_per_timeslice, + total_exo_com_costs_df_RH, + total_exo_com_capacity_df_RH, + total_exo_decom_capacity_df_RH, + existing_transformers_decom_df, + new_transformers_de_com_df, + investment_costs_df, + WACC_df, + new_built_storages_df, + storage_turbine_investment_costs_df, + storage_pump_investment_costs_df, + storage_investment_costs_df, + renewables_com_df, + IR=0.02, + discount=False): + """ Function takes the dataframes from the functions total_exo_decommissioning, + transformers_exo_commissioning, storages_exo_commissioning and + renewables_exo_commissioning and returns them + + Parameters + ---------- + startyear : :obj:`int` + Starting year of the overall optimization run + + endyear : :obj:`int` + End year of the overall optimization run + + counter : :obj:`int` + An integer counter variable counting the number of the rolling horizon run + + years_per_timeslice : :obj:`int` + Useful length of optimization timeframe (t_intervall) + + IR : :obj:`pandas.DataFrame` + A pd.DataFrame carrying the WACC information by technology / energy carrier + + total_exo_com_costs_df_RH : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous commissioning costs + + total_exo_com_capacity_df_RH : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous commissioning capacity and year + + total_exo_decom_capacity_df_RH : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous decommissioning capacity and year + + discount : :obj:`boolean` + If True, nominal values will be dicounted + If False, real values have to be used as model inputs (default) + + renewables_com_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the renewables data + + storage_investment_costs_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the storage capacity investment costs data + + storage_pump_investment_costs_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the storage infeed investment costs data + + storage_turbine_investment_costs_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the storage outfeed investment costs data + + new_built_storages_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the new built storage units data + + WACC_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the WACC data + + investment_costs_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the transformers investment costs data + + new_transformers_de_com_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the new built transformers (exogeneous) + commissioning and decommissioning data + + existing_transformers_decom_df: :obj:`pandas.DataFrame` + A pd.DataFrame containing the existing transformers (exogeneous) + decommissioning data + Returns + ------- + exo_com_cost_df_total_RH : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous commissioning costs including the + data from the actual myopic run + + exo_commissioned_capacity_df_total_RH : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous commissioning capacity and year + including the data from the actual myopic run + + exo_decommissioned_capacity_df_total_RH : :obj:`pandas.DataFrame` + A pd.DataFrame containing the exogenous decommissioning capacity and year + including the data from the actual myopic run + """ + + RH_startyear = startyear + (counter * years_per_timeslice) + if (startyear + ((counter + 1) * years_per_timeslice) - 1) > endyear: + RH_endyear = endyear + else: + RH_endyear = startyear + (((counter + 1) * years_per_timeslice) - 1) + + existing_transformers_decom_capacity_df_RH = existing_transformers_exo_decom( + existing_transformers_decom_df, + startyear=RH_startyear, + endyear=RH_endyear) + + new_transformers_exo_com_costs_df_RH, new_transformers_exo_com_capacity_df_RH, \ + new_transformers_exo_decom_capacity_df_RH = new_transformers_exo(new_transformers_de_com_df, + investment_costs_df, + WACC_df, + startyear=RH_startyear, + endyear=RH_endyear, + IR=IR, + discount=discount) + + # Discount annuities to overall_startyear + if discount: + new_transformers_exo_com_costs_df_RH = discount_values( + new_transformers_exo_com_costs_df_RH, + IR, + RH_startyear, + startyear) + + logging.info('Exogenous transformers costs for run {:d}: {:,.0f}'.format( + counter, new_transformers_exo_com_costs_df_RH.sum().sum())) + + storages_exo_com_costs_df_RH, storages_exo_com_capacity_df_RH, \ + storages_exo_decom_capacity_df_RH = storages_exo(new_built_storages_df, + storage_turbine_investment_costs_df, + storage_pump_investment_costs_df, + storage_investment_costs_df, + WACC_df, + startyear=RH_startyear, + endyear=RH_endyear, + IR=IR, + discount=discount) + + # Discount annuities to overall_startyear + if discount: + storages_exo_com_costs_df_RH = discount_values( + storages_exo_com_costs_df_RH, + IR, + RH_startyear, + startyear) + + logging.info('Exogenous storages costs for run {:d}: {:,.0f}'.format( + counter, storages_exo_com_costs_df_RH.sum().sum())) + + renewables_exo_com_costs_df_total_RH, renewables_exo_com_capacity_renewables_df_RH, \ + renewables_exo_decom_capacity_df_RH = renewables_exo(renewables_com_df, + investment_costs_df, WACC_df, + startyear=RH_startyear, endyear=RH_endyear, + IR=IR, + discount=discount) + + # Discount annuities to overall_startyear + if discount: + renewables_exo_com_costs_df_total_RH = discount_values( + renewables_exo_com_costs_df_total_RH, + IR, + RH_startyear, + startyear) + + logging.info('Exogenous renewables costs for run {:d}: {:,.0f}'.format( + counter, renewables_exo_com_costs_df_total_RH.sum().sum())) + + total_exo_com_costs_df_RH_iteration = pd.concat( + [new_transformers_exo_com_costs_df_RH, + storages_exo_com_costs_df_RH, + renewables_exo_com_costs_df_total_RH], axis=0, sort=True) + + total_exo_com_capacity_df_RH_iteration = pd.concat( + [new_transformers_exo_com_capacity_df_RH, + storages_exo_com_capacity_df_RH, + renewables_exo_com_capacity_renewables_df_RH], axis=0, sort=True) + + total_exo_decom_capacity_df_RH_iteration = pd.concat( + [existing_transformers_decom_capacity_df_RH, + new_transformers_exo_decom_capacity_df_RH, + storages_exo_decom_capacity_df_RH, + renewables_exo_decom_capacity_df_RH], axis=0, sort=True) + + # Combine the results from previous iterations with the one of the current iteration + total_exo_com_costs_df_RH = pd.concat([total_exo_com_costs_df_RH, + total_exo_com_costs_df_RH_iteration], + axis=1, sort=True) + total_exo_com_capacity_df_RH = pd.concat([total_exo_com_capacity_df_RH, + total_exo_com_capacity_df_RH_iteration], + axis=1, sort=True) + total_exo_decom_capacity_df_RH = pd.concat([total_exo_decom_capacity_df_RH, + total_exo_decom_capacity_df_RH_iteration], + axis=1, sort=True) + + return total_exo_com_costs_df_RH, total_exo_com_capacity_df_RH, total_exo_decom_capacity_df_RH diff --git a/pommesinvest/model_funcs/helpers.py b/pommesinvest/model_funcs/helpers.py new file mode 100644 index 0000000..514d66a --- /dev/null +++ b/pommesinvest/model_funcs/helpers.py @@ -0,0 +1,482 @@ +# -*- coding: utf-8 -*- +""" +General description +------------------- +These are supplementary routines used in the power market model POMMES. + +Installation requirements +------------------------- +Python version >= 3.8 + +@author: Johannes Kochems +""" + +# Import necessary packages for function definitions +import pandas as pd +import math +# Import datetime for conversion between date string and its element (year, month, ...) +from datetime import datetime +from dateutil.relativedelta import relativedelta +from pandas.tseries.frequencies import to_offset +# Compress can be used to slice lists using another list of boolean values +# -> used by method MultipleLeapYears +from itertools import compress +# Imports for creating holidays in pandas +# See: https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html, accessed 03.09.2019 +# Pandas on git hub: https://github.com/pandas-dev/pandas/blob/master/pandas/tseries/holiday.py, accessed 03.09.2019 +from pandas.tseries.holiday import AbstractHolidayCalendar, Holiday, EasterMonday, GoodFriday +from pandas.tseries.offsets import Day, Easter + +### NOTE: SOME OF THE FUNCTIONS ARE NOT USED, BUT MAY AT SOME POINT BE USEFUL (OR NOT) + +# JK: FUNCTION NOT TESTED YET! +def csv_to_excel(path_from, path_to = './inputs/', filename_to = 'power_market_input_data', *args): + + """ Reads in several csv-datasheets and puts them together to an Excel workbook. + + Parameters + ---------- + path_from: :obj:`str` + The path the csv files are stored at + + path_to: :obj:`str` + The path where the resulting .xlsx file shall be stored at + + filename_to: :obj:`str` + The filename for the resulting .xlsx file + + *args: :obj:`str` + A list of the name of csv-files that will be put together + + Returns + ------- + workbook: :obj:`file` + An Excel Workbook containing all data in separate Worksheets + """ + + # Create a list of sheets (i.e. pd.DataFrames) + sheets = [pd.read_csv(path_from + el, sep=';', decimal = ',') for el in args] + + # Create a Pandas Excel writer using XlsxWriter as the engine. + writer = pd.ExcelWriter(path_to + filename_to, engine='xlsxwriter') + + for sheet in sheets: + sheet.to_excel(writer) + + # Save the excel file and save it as variable workbook which is returned + writer.save() + workbook = writer.book + + return workbook + + +# 27.06.2019, FM: tested +def datetime_index_from_demand_data(path, file): + + """ Separately read in demand data from a .csv file. + The first column of the demand file must be the index column containing entries + that can be interpreted as dates. + + Parameters + ---------- + path: :obj:`str` + The path the csv file is stored at with/ in the end in order to connect the path + file: :obj:`str` + The name of the demand data file + + Returns + ------- + datetime: :obj:`datetime index` + """ + + demand_df = pd.read_csv(path+file, sep=';', decimal=',', parse_dates=True, index_col=0) + #27.06.2019, FM: old code: datetime= demand.df.index() + datetime = demand_df.index + + return datetime + + +# 28.09.2019, JK: Function taken from Stack overflow issue, see: https://stackoverflow.com/questions/4436957/pythonic-difference-between-two-dates-in-years, accessed 28.08.2019 +def years_between(y1, y2): + + """ Calculate the difference in years between two dates using the dateutil.relativedelta package + + Parameters: + ---------- + y1: :obj:`str` + The first date string + y2: :obj:`str` + The second date string + + Returns + ------- + year_diff: :obj:`int` + The difference between the two dates in years + + """ + + y1 = datetime.strptime(y1, '%Y-%m-%d %H:%M:%S') + y2 = datetime.strptime(y2, '%Y-%m-%d %H:%M:%S') + year_diff = abs(relativedelta(y1, y2).years) + + return year_diff + + + +# 09.12.2018, JK: Function taken from Stack overflow issue, see: https://stackoverflow.com/questions/8419564/difference-between-two-dates-in-python, accessed 09.12.2018 +#27.06.2019, FM: tested +def days_between(d1, d2): + + """ Calculate the difference in days between two days using the datetime package + + Parameters: + ---------- + d1: :obj:`str` + The first date string + d2: :obj:`str` + The second date string + + Returns + ------- + day_diff: :obj:`int` + The difference between the two dates in days + + + """ + + d1 = datetime.strptime(d1, '%Y-%m-%d %H:%M:%S') + d2 =datetime.strptime(d2, '%Y-%m-%d %H:%M:%S') + day_diff = abs((d2 - d1).days) + + return day_diff + + +# 16.12.2018, JK: Function inspired from Stack overflow issue, see: https://stackoverflow.com/questions/24217641/how-to-get-the-difference-between-two-dates-in-hours-minutes-and-seconds, accessed 16.12.2018 +# 05.07.2019, FM: tested +# TODO: YW: Search for pandas predefined function +def hours_between(h1, h2): + """ Calculate the difference in days between two days using the datetime package + + Parameters: + ---------- + h1: :obj:`str` + The first date string + h2: :obj:`str` + The second date string + + Returns + ------- + hour_diff: :obj:`int` + The difference between the two dates in hours + + + """ + + h1 = datetime.strptime(h1, '%Y-%m-%d %H:%M:%S') + h2 = datetime.strptime(h2, '%Y-%m-%d %H:%M:%S') + diff = abs((h2 - h1)) + days, seconds = diff.days, diff.seconds + # 16.12.2018, JK: Double slash here is used for floor division (rounded down to nearest number) + # Actually not really needed here, since only full hours are simulated, at least so far... + hour_diff = days * 24 + math.floor(seconds / 3600) + print("hour_diff: " + str(hour_diff)) + + return hour_diff + + +def timesteps_between_timestamps(ts1, ts2, freq): + """ Calculate the difference in days between two days using simple arithmetics. + + Parameters: + ---------- + ts1: :obj:`pd.Timestamp` + The first timestamp + ts2: :obj:`pd.Timestamp` + The first timestamp + freq: :obj:`str + The frequency information, e.g. '60min', '15min' + + Returns + ------- + hour_diff: :obj:`int` + The difference between the two dates in hours + + + """ + timesteps_seconds = {'60min': (24, 3600), '15min': (96, 900)} + + diff = ts2 - ts1 + + timestep_diff = diff.days * timesteps_seconds[freq][0] + math.floor(diff.seconds / timesteps_seconds[freq][1]) + + return timestep_diff + + +# 09.12.2018, JK: Routine to check whether a given year is a leapyear or not. +# Could be useful for adapting the number of days resp. timesteps per year. +# 05.07.2019, FM: tested +def IsLeapYear(year): + + """ Check whether given year is a leap year or not. + + Parameters: + ----------- + year: :obj:`int` + year which shall be checked + + Returns: + -------- + LeapYear: :obj:`boolean` + True if year is a leap year and False else + + """ + + # Underlying rules: + # every fourth year is a leap year unless it can be divided by 100. + # every fourhundredth year in turn is a leap year. + + LeapYear = False + + if year % 4 == 0: + LeapYear = True + if year % 100 == 0: + LeapYear = False + if year % 400 == 0: + LeapYear = True + + return LeapYear + +#05.07.2019,FM: tested +def MultipleLeapYears(years): + + """ Checks a list of multiple years to find out which one of them is a leap year. + In turn calls function IsLeapYear. + + Parameters: + ----------- + years: :obj:`list` + list of years which shall be checked + + Returns: + -------- + LeapYears: + list of LeapYears + + """ + LeapYearsBoolean = [IsLeapYear(el) for el in years] + # 11.12.2018, JK: To improve functionality, a set could be used to remove duplicates which may occur in input list + # Since this is more or less a throwaway function, there is no need to do that here. + # 11.12.2018, JK: compress is used here to be able to filter list years using LeapYearsBoolean, i.e. a list with boolean filter values + LeapYears = list(compress(years, LeapYearsBoolean)) + return LeapYears + + +# TODO: Resume with resampled timeseries (JK) +# -> There probably exist some routines from demand regio which may be used... +#05.07.2019, FM:Not yet tested +def resample_timeseries(timeseries, + freq, + aggregation_rule = 'sum', + interpolation_rule = 'linear'): + + """ Resample a timeseries to the frequency provided. + The frequency of the given timeseries is determined at first and upsampling + resp. downsampling are carried out afterwards. For upsampling linear + interpolation (default) is used, but another method may be chosen. + + Parameters + ---------- + timeseries : :obj:`pd.DataFrame` + The timeseries to be resampled stored in a pd.DataFrame + + freq : :obj:`str` + The target frequency + + interpolation_rule : :obj:`str` + Method used for interpolation in upsampling + + Returns + ------- + resampled_timeseries : :obj:`pd.DataFrame` + The resampled timeseries stored in a pd.DataFrame + + """ + + # 16.05.2019, JK: determine frequency of timeseries given + # For an alternative approach see: https://stackoverflow.com/questions/31517728/python-pandas-detecting-frequency-of-time-series + try: + original_freq = pd.infer_freq(timeseries.index, warn=True) + + # Error occurs when only two neigbouring timesteps are simulated which + # might be a potential use case for the model + # Function needs at least 3 values to obtain freq of a timeseries + except ValueError: + original_freq = 'AS' + + # 16.05.2019, JK: Determine whether upsampling or downsampling is needed + # 16.05.2019, JK: Workaround needed here + if to_offset(freq) > to_offset(original_freq): + # do downsampling + resampled_timeseries = timeseries.resample(freq).agg(aggregation_rule) + + else: + # do upsampling + resampled_timeseries = timeseries.resample(freq).interpolate(method=interpolation_rule) + + return resampled_timeseries + +# 07.06.2019, JK: Generic function for combining functions taken from Stack Overflow: +# https://stackoverflow.com/questions/13865009/have-multiple-commands-when-button-is-pressed +# accessed 07.06.2019 +def combine_funcs(*funcs): + """ Take an arbitrary number of functions as argument and combine them, + i.e. carry out the functions successively. + + Parameters + ---------- + *funcs: + The functions to be combined + + Returns + ------- + combined_func: + A method that successively runs the functions that shall be combined. + """ + def combined_func(*args, **kwargs): + for f in funcs: + f(*args, **kwargs) + + return combined_func + +def convert_annual_limit(annual_limit, starttime, endtime): + """Convert an annual limit to a sub- or multi-annual one + + Parameters + ---------- + annual_limit: :obj:`float` or :obj:`pd.Series`of :class:`float` + An annual limit (e.g. for emissions, investment budgets) + if starttime and endtime are within the same year + or a pd.Series of annual limits indexed by years if starttime and + endtime are not within one year + + starttime: :obj:`str` + The first date string; starttime of the optimization run + + endtime: :obj:`str` + The second date string; endtime of the optimization run + + Returns + ------- + new_limit: :obj:`float` + A sub-annual / multi-annual limit for the optimization timeframe + + """ + dt_start = datetime.strptime(starttime, '%Y-%m-%d %H:%M:%S') + dt_end = datetime.strptime(endtime, '%Y-%m-%d %H:%M:%S') + start_year = dt_start.year + end_year = dt_end.year + + new_limit = 0 + + if start_year == end_year: + day_diff = days_between(starttime, endtime) + year_fraction = day_diff / float(365) + if isinstance(annual_limit, float): + new_limit = annual_limit * year_fraction + else: + new_limit = annual_limit[start_year] * year_fraction + + else: + start_year_begin = str(start_year) + '-01-01 00:00:00' + end_year_end = str(end_year) + '-12-31 23:59:59' + day_diff_start = days_between(starttime, start_year_begin) + day_diff_end = days_between(end_year_end, endtime) + + start_year_fraction = (365 - day_diff_start) / float(365) + end_year_fraction = (365 - day_diff_end) / float(365) + full_years = end_year - start_year - 1 + + # Add annual limits for full years within optimization time frame + for i in range(full_years): + new_limit += annual_limit[start_year + i + 1] + + # Add limits for fractions of the start year and end year + new_limit += (annual_limit[start_year] * start_year_fraction + + annual_limit[end_year] * end_year_fraction) + + return new_limit + + +# Taken from oemof.tools.economics, but removed check for infeasible values +def calc_annuity(capex, n, wacc, u=None, cost_decrease=0): + """Calculates the annuity of an initial investment 'capex', considering the + cost of capital 'wacc' during a project horizon 'n' + + In case of a single initial investment, the employed formula reads: + + .. math:: + annuity = capex \cdot \frac{(wacc \cdot (1+wacc)^n)} + {((1 + wacc)^n - 1)} + + In case of repeated investments (due to replacements) at fixed intervals + 'u', the formula yields: + + .. math:: + annuity = capex \cdot \frac{(wacc \cdot (1+wacc)^n)} + {((1 + wacc)^n - 1)} \cdot \left( + \frac{1 - \left( \frac{(1-cost\_decrease)} + {(1+wacc)} \right)^n} + {1 - \left( \frac{(1-cost\_decrease)}{(1+wacc)} + \right)^u} \right) + + Parameters + ---------- + capex : float + Capital expenditure for first investment. Net Present Value (NPV) or + Net Present Cost (NPC) of investment + n : int + Horizon of the analysis, or number of years the annuity wants to be + obtained for (n>=1) + wacc : float + Weighted average cost of capital (0=1) + cost_decrease : float + Annual rate of cost decrease (due to, e.g., price experience curve). + This only influences the result for investments corresponding to + replacements, whenever u adjust amount of timeslices if year is a leap year. + startyear = timeseries_start.year + endyear = timeseries_end.year + + years = range(startyear, endyear + 1) + leap_years = MultipleLeapYears(years) + + # Determine amount of timeslices per year for every year considered + timeslice_year_dict = {} + + for year in years: + if not year in leap_years: + timeslice_year_dict[year] = freq_timesteps[0] + else: + timeslice_year_dict[year] = freq_timesteps[1] + + # Calculate amount of timeslices needed + # (Consideration of complete years only) + overall_years = endyear - startyear + 1 + amount_of_timeslices = math.ceil(overall_years / myopic_horizon_in_years) + + # Amount of timeslices within one iteration may vary due to leap years. + # Therefore, it is stored as a dict with the iteration number as key + timeslice_length_dict = {} + start = startyear + + for i in range(amount_of_timeslices): + + # Initialize timeslice length without overlap + timeslice_length_dict[i] = 0 + + # If we are not in the last iteration + if i != amount_of_timeslices - 1: + for year in range(start, start + myopic_horizon_in_years): + if not year == start + myopic_horizon_in_years - 1: + timeslice_length_dict[i] += timeslice_year_dict[year] + else: + timeslice_length_dict[i] = (myopic_horizon_in_years, + timeslice_length_dict[i] + timeslice_year_dict[year], + timeslice_length_dict[i] + timeslice_year_dict[ + year] + overlap_in_timesteps) + + # Last iteration + else: + + for year in range(start, endyear + 1): + if not year == endyear: + timeslice_length_dict[i] += timeslice_year_dict[year] + else: + timeslice_length_dict[i] = (len(range(start, endyear + 1)), + timeslice_length_dict[i] + timeslice_year_dict[year], + timeslice_length_dict[i] + timeslice_year_dict[ + year] + overlap_in_timesteps) + + start += myopic_horizon_in_years + + return timeseries_start, amount_of_timeslices, timeslice_length_dict + + +def add_further_constrs(om, + ActivateEmissionsLimit, + ActivateInvestmentBudgetLimit, + emissions_limit, + investment_budget, + countries=None, + fuels=None): + """Integrate further constraints into the optimization model + + For now, an additional overall emissions limit can be imposed. + + Note that setting an emissions limit may conflict with high minimum + loads from conventional transformers. This may lead to model infeasibility + if commodity bus balances cannot be met. + + Parameters + ---------- + om : :class:`oemof.solph.models.Model` + The original mathematical optimisation model to be solved + + ActivateEmissionsLimit : :obj:`boolean` + If True, an emission limit is introduced + + ActivateInvestmentBudgetLimit : :obj:`boolean` + If True, an overall investment budget limit is introduced + + emissions_limit : float + The actual emissions limit to be used + + investment_budget : float + The overall investment budget limit to be used + + countries : :obj:`list` of `str` + The countries for which an emissions limit shall be imposed + (Usually only Germany) + + fuels : :obj:`list` of `str` + The fuels for which an emissions limit shall be imposed + + """ + + if countries is None: + countries = ['DE'] + + if fuels is None: + fuels = ['biomass', 'hardcoal', 'lignite', + 'natgas', 'uranium', 'oil', + 'otherfossil', 'waste', 'mixedfuels'] + + # Emissions limit is imposed for flows from commodity source to commodity bus + emission_flow_labels = [country + '_bus_' + fuel + for country in countries + for fuel in fuels] + + emission_flows = {} + + for (i, o) in om.flows: + if any(x in o.label for x in emission_flow_labels): + emission_flows[(i, o)] = om.flows[(i, o)] + + if ActivateEmissionsLimit: + solph.constraints.emission_limit(om, flows=emission_flows, + limit=emissions_limit) + logging.info(f"Adding an EMISSIONS LIMIT of {emissions_limit} t CO2") + + # TODO: Revise! + if ActivateInvestmentBudgetLimit: + om = solph.constraints.investment_limit(om, limit=investment_budget) + + logging.info(f"Adding an INVESTMENT BUDGET LIMIT of {investment_budget} €") + + return om + + +def build_simple_model(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + AggregateInput, + startyear, + endyear, + MaxInvest, + fuel_cost_pathway, + investment_cost_pathway, + starttime, + endtime, + freq, + multiplicator, + optimization_timeframe, + IR=0.02, + discount=False, + ActivateEmissionsLimit=False, + emission_pathway='100_percent_linear', + ActivateInvestmentBudgetLimit=False, + investment_budget=None, + ActivateDemandResponse=False, + approach='DIW', + scenario='50'): + """ Set up and return a simple model (i.e. an overall optimization run + not including any measures for complexity reduction). + + Parameters + ---------- + path_folder_input : :obj:`str` + The file path where input files are stored (common folder) + + filename_node_data : :obj:`str` + Name of Excel Workbook containing all data for creating nodes (buses and oemof components) + + filename_cost_data : :obj:`str` + Name of Excel Workbook containing cost pathways for oemof components + + filename_node_timeseries : :obj:`str` + Filename of the node timeseries data, given in a separate .csv file + + filename_min_max_timeseries : :obj:`str` + Filename of the min / max transformer data, given in a separate .csv file + + filename_cost_timeseries : :obj:`str` + Filename of the cost timeseries data, given in a separate .csv file + + AggregateInput: :obj:`boolean` + If True an aggregated transformers input data set is used, elsewhise + the full transformers input data set is used + + startyear : :obj:`int` + The startyear of the optimization run + + endyear : :obj:`int` + The endyear of the optimization run + + MaxInvest : :obj:`boolean` + If True, investment limits per technology are applied + + fuel_cost_pathway : :obj:`str` + variable indicating which fuel cost pathway to use + Possible values 'lower', 'middle', 'upper' + + investment_cost_pathway : :obj:`str` + variable indicating which investment cost pathway to use + Possible values 'lower', 'middle', 'upper' + + starttime : :obj:`str` + The starttime of the optimization run + + endtime : :obj:`str` + The endtime of the optimization run + + multiplicator : :obj:`int` + A multiplicator to convert the input data given with an hourly resolution + to another (usually a lower) one + + optimization_timeframe : :obj:`str` + The length of the overall optimization timeframe in years + (used for determining technology specific investment limits) + + IR : :obj:`float` + The interest rate used for discounting + + discount : :obj:`boolean` + Boolean parameter indicating whether or not to discount future investment costs + + ActivateEmissionsLimit : :obj:`boolean` + If True, an emission limit is introduced + + emission_pathway : str + The pathway for emissions reduction to be used + + ActivateInvestmentBudgetLimit : :obj:`boolean` + If True, an overall investment budget limit is introduced + + investment_budget : float + The overall investment budget limit to be used + + ActivateDemandResponse : :obj:`boolean` + Boolean control parameter indicating whether or not to introduce + demand response units + + approach : :obj:`str` + Demand response modeling approach to be used; + must be one of ['DIW', 'DLR', 'IER', 'TUD'] + + scenario : :obj:`str` + Demand response scenario to be modeled; + must be one of ['25', '50', '75'] whereby '25' is the lower, + i.e. rather pessimistic estimate + + Returns + ------- + om : :class:`oemof.colph.models.Model` + The mathematical optimisation model solved including the results + + existing_storage_labels : :obj:`list` + List of the labels of existing storages + + new_built_storage_labels : :obj:`list` + List of the labels of new built storages + + total_exo_com_costs_df : :obj:`pd.DataFrame` + A DataFrame containing the overall costs for exogeneous investements + + total_exo_com_capacity_df : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous investements + + total_exo_decom_capacity_df : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous decommissioning decisions + + """ + + datetime_index = pd.date_range( + starttime, endtime, freq=freq) + + logging.info('Starting optimization') + + # initialisation of the energy system with its timeindex attribute + es = solph.EnergySystem(timeindex=datetime_index) + + logging.info('Running an integrated INVESTMENT and dispatch OPTIMIZATION') + logging.info('Time frequency used is {}'.format(freq)) + + # Create all nodes from excel sheet and store costs data + (nodes_dict, existing_storage_labels, new_built_storage_labels, + total_exo_com_costs_df, total_exo_com_capacity_df, total_exo_decom_capacity_df, + emissions_limit) \ + = nodes_from_excel(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + AggregateInput, + startyear, + endyear, + MaxInvest, + fuel_cost_pathway, + investment_cost_pathway, + starttime, + endtime, + freq, + multiplicator, + optimization_timeframe, + IR=IR, + discount=discount, + ActivateEmissionsLimit=ActivateEmissionsLimit, + emission_pathway=emission_pathway, + ActivateDemandResponse=ActivateDemandResponse, + approach=approach, + scenario=scenario) + + logging.info('Creating a LP optimization model for integrated\n' + 'INVESTMENT and dispatch OPTIMIZATION.') + + # Add all nodes to the EnergySystem + es.add(*nodes_dict.values()) + + # Create a least cost model from the energy system (builds mathematical model) + om = solph.Model(es) + + om = add_further_constrs(om, + ActivateEmissionsLimit, + ActivateInvestmentBudgetLimit, + emissions_limit, + investment_budget) + + return om, existing_storage_labels, new_built_storage_labels, \ + total_exo_com_costs_df, total_exo_com_capacity_df, total_exo_decom_capacity_df + + +def initial_states_RH(om, + timeslice_length_wo_overlap_in_timesteps, + new_built_transformer_labels, + new_built_storage_labels, + endo_exo_exist_df, + endo_exo_exist_stor_df): + """ Obtain the initial states / existing capacities for the upcoming rolling horizon (resp. + myopic optimization window) model run for a LP INVESTMENT MODEL configuration by iterating + over all nodes of the energy system using lists of storage resp. transformer input data obtained + from the input data file. Existing capacities have to be determined for both, transformers and + storages to determine how much capacity was installed in the previous model run (myopic optimization + window timeslice) and how existing capacity changes. Initial states only have to be set for + storage units since there are none for transformer units when a LP INVESTMENT model is run. + + Parameters + ---------- + om : :class:`oemof.colph.models.Model` + The mathematical optimisation model solved including the results + + timeslice_length_wo_overlap_in_timesteps: :obj:`int` + length of a rolling horizon timeslice excluding overlap + + new_built_transformer_labels: :obj:`list` of ::class:`str` + list of transformer labels (obtained from input data) + + new_built_storage_labels: :obj:`list` of ::class:`str` + list of storage labels (obtained from input data) + + endo_exo_exist_df : :obj:`pd.DataFrame` + A DataFrame containing the endogeneous and exogeneous transformers commissioning + information for setting initial states + + endo_exo_exist_stor_df : :obj:`pd.DataFrame` + A DataFrame containing the endogeneous and exogeneous storages commissioning + information for setting initial states + + Returns + ------- + transformers_init_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage data (i.e. statuses for + the last timestep of the optimization window - excluding overlap) + + storages_init_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage data (i.e. statuses for + the last timestep of the optimization window - excluding overlap) + + """ + # DataFrame for storing transformer resp. storage initial timestep data + transformers_init_df = pd.DataFrame(columns=['Existing_Capacity_Transformer', 'Existing_Capacity_endo', + 'endo_cumulated', 'old_exo'], index=new_built_transformer_labels) + + storages_init_df = pd.DataFrame(columns=['Capacity_Last_Timestep', 'Existing_Inflow_Power', + 'Existing_Outflow_Power', 'Existing_Capacity_Storage'], + index=new_built_storage_labels) + + # results_df = solph.processing.create_dataframe(om) + model_results = solph.processing.results(om) + + for i, t in transformers_init_df.iterrows(): + + transformer = solph.views.node(model_results, i) + # TODO, JK: Find a more elegant solution than doing this here... -> obtain country info before + try: + transformers_init_df.loc[i, 'Existing_Capacity_Transformer'] = transformer['scalars'][ + ((i, 'DE_bus_el'), 'invest')] + transformers_init_df.loc[i, 'Existing_Capacity_endo'] = endo_exo_exist_df.loc[i, 'Existing_Capacity_endo'] + transformers_init_df.loc[i, 'endo_cumulated'] = \ + transformers_init_df.loc[i, 'Existing_Capacity_Transformer'] \ + + transformers_init_df.loc[i, 'Existing_Capacity_endo'] + transformers_init_df.loc[i, 'old_exo'] = endo_exo_exist_df.loc[i, 'old_exo'] + + except: + transformers_init_df.loc[i, 'Existing_Capacity_Transformer'] = transformer['scalars'][ + ((i, 'AT_bus_el'), 'invest')] + + # Iterate over all storages and set parameters for initial timestep of next timeslice + for i, s in storages_init_df.iterrows(): + + storage = solph.views.node(model_results, i) + + try: + + # Obtain data for last timestep of storage unit during the optimization run (excluding overlap) + storages_init_df.loc[i, 'Capacity_Last_Timestep'] = \ + storage['sequences'][((i, 'None'), 'storage_content')][timeslice_length_wo_overlap_in_timesteps - 1] + storages_init_df.loc[i, 'Existing_Inflow_Power'] = storage['scalars'][(('DE_bus_el', i), 'invest')] + storages_init_df.loc[i, 'Existing_Outflow_Power'] = storage['scalars'][((i, 'DE_bus_el'), 'invest')] + storages_init_df.loc[i, 'Existing_Capacity_Storage'] = storage['scalars'][((i, 'None'), 'invest')] + + storages_init_df.loc[i, 'Existing_Capacity_endo'] = endo_exo_exist_stor_df.loc[i, 'capacity_endo'] + storages_init_df.loc[i, 'Existing_turbine_endo'] = endo_exo_exist_stor_df.loc[i, 'turbine_endo'] + storages_init_df.loc[i, 'Existing_pump_endo'] = endo_exo_exist_stor_df.loc[i, 'pump_endo'] + + storages_init_df.loc[i, 'capacity_endo_cumulated'] = \ + storages_init_df.loc[i, 'Existing_Capacity_Storage'] + storages_init_df.loc[i, 'Existing_Capacity_endo'] + storages_init_df.loc[i, 'turbine_endo_cumulated'] = \ + storages_init_df.loc[i, 'Existing_Outflow_Power'] + storages_init_df.loc[i, 'Existing_turbine_endo'] + storages_init_df.loc[i, 'pump_endo_cumulated'] = \ + storages_init_df.loc[i, 'Existing_Inflow_Power'] + storages_init_df.loc[i, 'Existing_Capacity_endo'] + + storages_init_df.loc[i, 'old_exo_cap'] = endo_exo_exist_stor_df.loc[i, 'old_exo_cap'] + storages_init_df.loc[i, 'old_exo_turbine'] = endo_exo_exist_stor_df.loc[i, 'old_exo_turbine'] + storages_init_df.loc[i, 'old_exo_pump'] = endo_exo_exist_stor_df.loc[i, 'old_exo_pump'] + + except: + + storages_init_df.loc[i, 'Capacity_Last_Timestep'] = \ + storage['sequences'][((i, 'None'), 'storage_content')][timeslice_length_wo_overlap_in_timesteps - 1] + storages_init_df.loc[i, 'Existing_Inflow_Power'] = storage['scalars'][(('AT_bus_el', i), 'invest')] + storages_init_df.loc[i, 'Existing_Outflow_Power'] = storage['scalars'][((i, 'AT_bus_el'), 'invest')] + storages_init_df.loc[i, 'Existing_Capacity_Storage'] = storage['scalars'][((i, 'None'), 'invest')] + + return transformers_init_df, storages_init_df + + +def build_RH_model(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + AggregateInput, + fuel_cost_pathway, + investment_cost_pathway, + endyear, + MaxInvest, + myopic_horizon_in_years, + timeseries_start, + timeslice_length_with_overlap, + counter, + transformers_init_df, + storages_init_df, + freq, + multiplicator, + overlap_in_timesteps, + years_per_timeslice, + total_exo_com_costs_df_RH, + total_exo_com_capacity_df_RH, + total_exo_decom_capacity_df_RH, + IR=0.02, + discount=False, + ActivateEmissionsLimit=False, + emission_pathway='100_percent_linear', + ActivateInvestmentBudgetLimit=False, + investment_budget=None, + ActivateDemandResponse=False, + approach='DIW', + scenario='50' + ): + """ Set up and return a rolling horizon LP dispatch model + + Parameters + ---------- + path_folder_input : :obj:`str` + The file path where input files are stored (common folder) + + filename_node_data : :obj:`str` + Name of Excel Workbook containing all data for creating nodes (buses and oemof components) + + filename_cost_data : :obj:`str` + Name of Excel Workbook containing cost pathways for oemof components + + filename_node_timeseries : :obj:`str` + Filename of the node timeseries data, given in a separate .csv file + + filename_min_max_timeseries : :obj:`str` + Filename of the min / max transformer data, given in a separate .csv file + + filename_cost_timeseries : :obj:`str` + Filename of the cost timeseries data, given in a separate .csv file + + AggregateInput: :obj:`boolean` + If True an aggregated transformers input data set is used, elsewhise + the full transformers input data set is used + + fuel_cost_pathway : :obj:`str` + variable indicating which fuel cost pathway to use + Possible values 'lower', 'middle', 'upper' + + investment_cost_pathway : :obj:`str` + variable indicating which investment cost pathway to use + Possible values 'lower', 'middle', 'upper' + + endyear : :obj:`int` + The endyear of the optimization run + + MaxInvest : :obj:`boolean` + If True, investment limits per technology are applied + + myopic_horizon_in_years : :obj:`int` + The length of a myopic iteration in years + + timeseries_start : :obj:`pd.Timestamp` + the starting timestep for used for the iteration + + timeslice_length_with_overlap : :obj:`int` + the duration of a timeslice in timesteps including overlap + (Usually no overlap used in investment model formulation) + + counter : :obj:`int` + A counter for the myopic iteration + + transformers_init_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the transformer data from previous model runs + + storages_init_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the storage data from previous model runs + + freq : :obj:`str` + The frequency used for the datetimeindex of the optimization run + + multiplicator : :obj:`int` + A multiplicator to convert the input data given with an hourly resolution + to another (usually a lower) one + + overlap_in_timesteps : :obj:`int` + the overlap in timesteps if a rolling horizon model is run + (to prevent index out of bounds error) + + years_per_timeslice : :obj:`int` + Number of years of a timeslice (a given myopic iteration); may differ + for the last timesteps since it does not necessarily have to be + completely covered + + total_exo_com_costs_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall costs for exogeneous investements + + total_exo_com_capacity_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous investements + + total_exo_decom_capacity_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous decommissioning decisions + + IR : :obj:`float` + The interest rate used for discounting + + discount : :obj:`boolean` + Boolean parameter indicating whether or not to discount future investment costs + + ActivateEmissionsLimit : :obj:`boolean` + If True, an emission limit is introduced + + emission_pathway : str + The pathway for emissions reduction to be used + + ActivateInvestmentBudgetLimit : :obj:`boolean` + If True, an overall investment budget limit is introduced + + investment_budget : float + The overall investment budget limit to be used + + ActivateDemandResponse : :obj:`boolean` + Boolean control parameter indicating whether or not to introduce + demand response units + + approach : :obj:`str` + Demand response modeling approach to be used; + must be one of ['DIW', 'DLR', 'IER', 'TUD'] + + scenario : :obj:`str` + Demand response scenario to be modeled; + must be one of ['25', '50', '75'] whereby '25' is the lower, + i.e. rather pessimistic estimate + + Returns + ------- + om : :class:`oemof.solph.models.Model` + The mathematical optimisation model to be solved + + es : :class:`oemof.solph.network.EnergySystem` + The energy system itself (used for determining initial states for the + next rolling horizon iteration) + + timeseries_start : :obj:`pd.Timestamp` + the adjusted starting timestep for used the next iteration + + new_built_transformer_labels : :obj:`list` of `str` values + List of transformer labels + (passed to a DataFrame in function initial_states_RH and + used for next iteration) + + new_built_storage_labels :obj:`list` of `str` values + List of storages labels + (passed to a DataFrame in function initial_states and + used for next iteration) + + datetime_index : :obj:`pd.DateRange` + datetime index for the current iteration + + endo_exo_exist_df : :obj:`pd.DataFrame` + A DataFrame containing the endogeneous and exogeneous transformers commissioning + information for setting initial states + + endo_exo_exist_stor_df : :obj:`pd.DataFrame` + A DataFrame containing the endogeneous and exogeneous storages commissioning + information for setting initial states + + total_exo_com_costs_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall costs for exogeneous investements + + total_exo_com_capacity_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous investements + + total_exo_decom_capacity_df_RH : :obj:`pd.DataFrame` + A DataFrame containing the overall capacity for exogeneous decommissioning decisions + + """ + # Set date_range object from start to endtime of each iteration + datetime_index = pd.date_range(start=timeseries_start, + periods=timeslice_length_with_overlap, + freq=freq) + + startyear = timeseries_start.year + + if (startyear + myopic_horizon_in_years - 1) >= endyear: + RH_endyear = endyear + else: + RH_endyear = (startyear + myopic_horizon_in_years - 1) + + logging.info('Starting optimization for optimization run ' + str(counter)) + + es = solph.EnergySystem(timeindex=datetime_index) + + # Crate all nodes of the energy system and return labels + # as well as information on existing capacities + (node_dict, new_built_transformer_labels, new_built_storage_labels, + endo_exo_exist_df, endo_exo_exist_stor_df, existing_storage_labels, + total_exo_com_costs_df_RH, total_exo_com_capacity_df_RH, total_exo_decom_capacity_df_RH, + emissions_limit) \ + = nodes_from_excel_rh(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + AggregateInput, + counter, + storages_init_df, + transformers_init_df, + timeslice_length_with_overlap, + RH_endyear, + MaxInvest, + timeseries_start, + total_exo_com_costs_df_RH, + total_exo_com_capacity_df_RH, + total_exo_decom_capacity_df_RH, + fuel_cost_pathway, + investment_cost_pathway, + freq, + multiplicator, + overlap_in_timesteps, + years_per_timeslice, + IR=IR, + discount=discount, + ActivateEmissionsLimit=ActivateEmissionsLimit, + emission_pathway=emission_pathway, + ActivateDemandResponse=ActivateDemandResponse, + approach=approach, + scenario=scenario) + + # Increase starting point for the next model run (do not lose time information) + # Instead of incrementing, set as start point of the next full year (needed for frequencies > 24 H) + timeseries_start = pd.Timestamp(year=RH_endyear + 1, month=1, day=1, hour=0, freq=freq) + + es.add(*node_dict.values()) + logging.info("Sucessfully set up energy system for iteration " + str(counter)) + + om = solph.Model(es) + + om = add_further_constrs(om, + ActivateEmissionsLimit, + ActivateInvestmentBudgetLimit, + emissions_limit, + investment_budget) + + return om, es, timeseries_start, new_built_transformer_labels, \ + new_built_storage_labels, datetime_index, endo_exo_exist_df, \ + endo_exo_exist_stor_df, existing_storage_labels, \ + total_exo_com_costs_df_RH, total_exo_com_capacity_df_RH, total_exo_decom_capacity_df_RH + + +def solve_RH_model(om, + datetime_index, + counter, + startyear, + myopic_horizon_in_years, + timeslice_length_wo_overlap_in_timesteps, + results_sequences, + results_scalars, + overall_objective, + overall_solution_time, + new_built_storage_labels, + existing_storage_labels, + IR=0.02, + discount=False, + solver='gurobi'): + """ Function for solving the rolling horizon optimization for a given window. + Returns the results as well as updated information on the overall_solution + and overall solution_time. + + Parameters + ---------- + om : :class:`oemof.solph.models.Model` + The mathematical optimisation model to be solved + + datetime_index : :obj:`pd.date_range` + The datetime index of the energy system + + startyear : :obj:`int` + The start year of the optimization window (iteration) + + counter : :obj:`int` + A counter for rolling horizon optimization windows (iterations) + + timeslice_length_wo_overlap_in_timesteps : :obj:`int` + Determines the length of a single (rolling horizon) optimization window + (excluding overlap) + + results_sequences : :obj:`pd.DataFrame` + A DataFrame to store the results of every optimization window + (for processing the results as well as a model comparison) + + results_scalars :obj:`pd.DataFrame` + A DataFrame to store the investment decisions taken + + overall_objective : :obj:`float` + The overall objective value + + overall_solution_time :obj:`float` + The overall solution time + + IR : :obj:`pandas.DataFrame` + A pd.DataFrame carrying the WACC information by technology / energy carrier + + discount : :obj:`boolean` + If True, nominal values will be dicounted + If False, real values have to be used as model inputs (default) + + solver : :obj:`str` + The solver to be used (defaults to 'gurobi') + + Returns + ------- + df_rcut : :obj:`pd.DataFrame` + A DataFrame needed to store results of the current optimization window + (for a model comparison) + + results_sequences : :obj:`pd.DataFrame` + A DataFrame to store the results of every optimization window + (for processing the results as well as a model comparison) + + results_scalars :obj:`pd.DataFrame` + A DataFrame to store the investment decisions taken + + overall_objective : :obj:`float` + The overall objective value + + overall_solution_time :obj:`float` + The overall solution time + + """ + RH_startyear = startyear + (counter * myopic_horizon_in_years) + # Solve the mathematical optimization model using the given solver + om.solve(solver=solver, solve_kwargs={'tee': True}) + print("********************************************************") + + logging.info("Model run %s done!" % (str(counter))) + + # JFG: Not necesary anymore after update to v.0.4.1 + model_results = solph.processing.results(om) + + # Obtain electricity results + electricity_bus = solph.views.node(model_results, 'DE_bus_el') + electricity_bus_scalars = electricity_bus['scalars'] + # Save results (excluding overlap) + df_rcut = pd.DataFrame(data=electricity_bus['sequences'][0:timeslice_length_wo_overlap_in_timesteps]) + + # Create storage_labels to iterate over them to get the capacity scalars and sequences + storage_labels = new_built_storage_labels + existing_storage_labels + + storage_results_dict = {} + for i in storage_labels: + storage_results_dict[i] = solph.views.node(model_results, i) + storage_capacity_results = \ + pd.DataFrame(data=storage_results_dict[i]['sequences'][0:timeslice_length_wo_overlap_in_timesteps][ + [((i, 'None'), 'storage_content')]]) + df_rcut = pd.concat([df_rcut, storage_capacity_results], axis=1, sort=True) + if i in new_built_storage_labels: + capacity_invest_results = storage_results_dict[i]['scalars'][[((i, 'None'), 'invest')]] + electricity_bus_scalars = pd.concat([electricity_bus_scalars, capacity_invest_results], axis=0, sort=True) + + results_scalars = pd.concat([results_scalars, electricity_bus_scalars, ], axis=1, sort=True) + results_sequences = results_sequences.append(df_rcut, sort=True) + + meta_results = solph.processing.meta_results(om) + + # NOTE: objective is calculated including overlap -> No slicing possible here + if not discount: + overall_objective += int(meta_results['objective']) + else: + overall_objective += int((meta_results['objective']) / ((1 + IR) ** (RH_startyear - startyear))) + + overall_solution_time += meta_results['solver']['Time'] + + return om, results_sequences, results_scalars, \ + overall_objective, overall_solution_time + + +# TODO: Resume here, JK / YW +def reconstruct_objective_value(om): + """ WORK IN PROGRESS; NO WARRANTY, THERE MAY BE BUGS HERE! """ + variable_costs = 0 + gradient_costs = 0 + investment_costs = 0 + + for i, o in om.FLOWS: + if om.flows[i, o].variable_costs[0] is not None: + for t in om.TIMESTEPS: + variable_costs += (om.flow[i, o, t] * om.objective_weighting[t] * + om.flows[i, o].variable_costs[t]) + + if om.flows[i, o].positive_gradient['ub'][0] is not None: + for t in om.TIMESTEPS: + gradient_costs += (om.flows[i, o].positive_gradient[i, o, t] * + om.flows[i, o].positive_gradient['costs']) + + if om.flows[i, o].negative_gradient['ub'][0] is not None: + for t in om.TIMESTEPS: + gradient_costs += (om.flows[i, o].negative_gradient[i, o, t] * + om.flows[i, o].negative_gradient['costs']) + + if om.flows[i, o].investment.ep_costs is not None: + investment_costs += (om.flows[i, o].invest[i, o] * + om.flows[i, o].investment.ep_costs) + + return variable_costs + gradient_costs + investment_costs + + +# TODO, JK +def dump_es(om, es, path, timestamp): + """ Function creates a dump of the given energy system including its + results as well as meta results """ + + # 17.05.2019, JK: Add results to the energy system to make it possible to store them. + es.results['main'] = solph.processing.results(om) + es.results['meta'] = solph.processing.meta_results(om) + + filename = "es_dump_" + timestamp + ".oemof" + + # 17.05.2019, JK: dump the energy system at the path defined + es.dump(dpath=path, filename=filename) + + return None diff --git a/pommesinvest/model_funcs/subroutines.py b/pommesinvest/model_funcs/subroutines.py new file mode 100644 index 0000000..ff36285 --- /dev/null +++ b/pommesinvest/model_funcs/subroutines.py @@ -0,0 +1,2449 @@ +# -*- coding: utf-8 -*- +""" +General description +------------------- +This file contains all subroutines used for reading in input data +for the dispatch variant of POMMES. + +Functions build_XX_transformer represent a hierarchical structure: + build_XX_transformer builds a single transformer element of a given type + and returns this to create_XX_transformers as node_dict[i], so the i_th + element to be build + +@author: Johannes Kochems (*), Johannes Giehl (*), Yannick Werner, +Benjamin Grosse + +Contributors: +Julien Faist, Hannes Kachel, Sophie Westphal, Flora von Mikulicz-Radecki, +Carla Spiller, Fabian Büllesbach, Timona Ghosh, Paul Verwiebe, +Leticia Encinas Rosa, Joachim Müller-Kirchenbauer + +(*) Corresponding authors +""" + +# Import necessary packages for function definitions +import math +import pandas as pd +import numpy as np + +from oemof.tools import economics +import oemof.solph as solph + +# TODO: Revise repo structure for clean imports of DR modeling +# Imports for demand response modeling +#import SinkDSM_DIW as DSM_DIW +#import SinkDSM_DLR as DSM_DLR +#import SinkDSM_IER as DSM_IER +#import SinkDSM_TUD as DSM_TUD + +from helper_functions_invest import resample_timeseries, calc_annuity + + +def load_input_data(filename=None, + path_folder_input='../data/Outputlisten/', + countries=None): + """Load input data from csv files. + + Parameters + ---------- + filename : :obj:`str` + Name of CSV file containing data + + path_folder_input : :obj:`str` + The path_folder_output where the input data is stored + + countries : :obj:`list` of str + List of countries to be simulated + + Returns + ------- + df :pandas:`pandas.DataFrame` + DataFrame containing information about nodes or time series. + """ + # TODO: include read in from binary files to save space with timeseries + # TODO: implement checkers for input data Validity, formats and such + + if 'ts' in filename: + df = pd.read_csv(path_folder_input + filename + '.csv', + parse_dates=True, index_col=0) + else: + df = pd.read_csv(path_folder_input + filename + '.csv', index_col=0) + + if 'country' in df.columns and countries is not None: + df = df[df['country'].isin(countries)] + + if df.isna().any().any() and 'ts' in filename: + print(f'Attention! Time series input data file {filename} contains NaNs.') + print(df.loc[df.isna().any(axis=1)]) + + return df + + +def parse_input_sheets(path_folder_input, + filename_node_data, + filename_cost_data, + filename_node_timeseries, + filename_min_max_timeseries, + filename_cost_timeseries, + fuel_cost_pathway, + investment_cost_pathway, + starttime, + endtime, + freq='24H', + multiplicator=24, + overlap_in_timesteps=0): + """ Method used to parse the input sheets used to create oemof elements from the + input Excel workbook(s). + + Since parsing the complete sheets and slicing is too time consuming for + timeseries data sheets, only the number of rows ranging from starttime + to endtime will be parsed. Therefore, the starting and endpoint have to + be determined. + + Parameters + ---------- + path_folder_input : :obj:`str` + The file path where input files are stored (common folder) + + filename_node_data : :obj:`str` + Name of Excel Workbook containing all data for creating nodes (buses and oemof components) + + filename_cost_data : :obj:`str` + Name of Excel Workbook containing cost pathways for oemof components + + filename_node_timeseries : :obj:`str` + Filename of the node timeseries data, given in a separate .csv file + + filename_min_max_timeseries : :obj:`str` + Filename of the min / max transformer data, given in a separate .csv file + + filename_cost_timeseries : :obj:`str` + Filename of the cost timeseries data, given in a separate .csv file + + fuel_cost_pathway : :obj:`str` + variable indicating which fuel cost pathway to use + Possible values 'lower', 'middle', 'upper' + + investment_cost_pathway : :obj:`str` + variable indicating which investment cost pathway to use + Possible values 'lower', 'middle', 'upper' + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The end timestamp of the optimization timeframe + + freq : :obj:`string` + A string defining the timeseries target frequency; determined by the + model configuration + + multiplicator : :obj:`int` + A multiplicator to convert the input data given with an hourly resolution + to another (usually a lower) one + + overlap_in_timesteps : :obj:`int` + the overlap in timesteps if a rolling horizon model is run + (to prevent index out of bounds error) + + Returns + ------- + All component DataFrames (i.e. buses, transformerns, renewables, demand, + storages, commodity_sources and timeseries data) with its parameters + obtained from the input Excel workbook + + """ + + #manually set input path for change to csv files + path_folder_input_csv = '../data/Outputlisten_Test_Invest/' + + + # Read in node Excel file and parse its spreadsheets to pd.DataFrames + xls_node = pd.ExcelFile(path_folder_input + filename_node_data) + 'here change for buses to from csv!!!' + buses_df = pd.read_csv(path_folder_input_csv + 'buses' + '.csv', index_col=0) + #buses_df = xls_node.parse('buses', index_col=0) + excess_df = pd.read_csv(path_folder_input_csv + 'sinks_excess' + '.csv', index_col=0) + #excess_df = xls_node.parse('excess', index_col=0) + shortage_df = pd.read_csv(path_folder_input_csv + 'sources_shortage' + '.csv', index_col=0) + #shortage_df = xls_node.parse('shortage', index_col=0) + commodity_sources_df = pd.read_csv(path_folder_input_csv + 'sources_commodity' + '.csv', index_col=0) + #commodity_sources_df = xls_node.parse('commodity_sources', index_col=0) + renewables_df = pd.read_csv(path_folder_input_csv + 'sources_renewables' + '.csv', index_col=0) + #renewables_df = xls_node.parse('renewables', index_col=0) + demand_df = pd.read_csv(path_folder_input_csv + 'sinks_demand_el' + '.csv', index_col=0) + #demand_df = xls_node.parse('demand', index_col=0) + + existing_transformers_df = pd.read_csv(path_folder_input_csv + 'transformers_clustered' + '.csv', index_col=0) + #existing_transformers_df = xls_node.parse('existing_transformers', index_col=0) + new_built_transformers_df = pd.read_csv(path_folder_input_csv + 'transformers_new' + '.csv', index_col=0) + #new_built_transformers_df = xls_node.parse('new_built_transformers', index_col=0) + + # Adapt transformer information to frequency information + # For the sake of interpretation, not the capacities itselfs are multiplied + # with the multiplicator, but min as well as max outputs + # NOTE: gradients are not (yet) included in the investment mode + existing_transformers_df.loc[:, ['grad_pos', 'grad_neg', + 'max_load_factor', 'min_load_factor']] = \ + existing_transformers_df.loc[:, ['grad_pos', 'grad_neg', + 'max_load_factor', 'min_load_factor']].mul(multiplicator) + + new_built_transformers_df.loc[:, ['grad_pos', 'grad_neg', + 'max_load_factor', 'min_load_factor']] = \ + new_built_transformers_df.loc[:, ['grad_pos', 'grad_neg', + 'max_load_factor', 'min_load_factor']].mul(multiplicator) + + existing_storages_df = pd.read_csv(path_folder_input_csv + 'storages_el' + '.csv', index_col=0) + #existing_storages_df = xls_node.parse('existing_storages', index_col=0) + new_built_storages_df = pd.read_csv(path_folder_input_csv + 'storages_new' + '.csv', index_col=0) + #new_built_storages_df = xls_node.parse('new_built_storages', index_col=0) + + # Set empty entries to 'None' (pandas default 'NaN' cannot be handled by Pyomo) + # Furthermore adapt storage information to frequency information + new_built_storages_df = new_built_storages_df.where(pd.notnull(new_built_storages_df), None) + + existing_storages_df.loc[:, ['max_storage_level', 'min_storage_level', + 'nominal_storable_energy']] = \ + existing_storages_df.loc[:, ['max_storage_level', 'min_storage_level', + 'nominal_storable_energy']].mul(multiplicator) + + new_built_storages_df.loc[:, ['max_storage_level', 'min_storage_level', + 'nominal_storable_energy']] = \ + new_built_storages_df.loc[:, ['max_storage_level', 'min_storage_level', + 'nominal_storable_energy']].mul(multiplicator) + + # Parse sheets for exogeneous commissioning and decommissioning + existing_transformers_decom_df = pd.read_csv(path_folder_input_csv + 'transformers_decommissioning' + '.csv', index_col=0) + #existing_transformers_decom_df = xls_node.parse('transformers_decommissioning', index_col=0) + new_transformers_de_com_df = pd.read_csv(path_folder_input_csv + 'transformers_new_decommissioning' + '.csv', index_col=0) + #new_transformers_de_com_df = xls_node.parse('new_transformers_de_comm', index_col=0) + renewables_com_df = pd.read_csv(path_folder_input_csv + 'renewables_commissioning' + '.csv', index_col=0) + #renewables_com_df = xls_node.parse('renewables_commissioning', index_col=0) + + # Parse index column only in order to determine start and end point for values to parse (via index) + node_start = pd.read_csv(path_folder_input_csv + filename_node_timeseries, + parse_dates=True, index_col=0, usecols=[0]) + start = pd.Index(node_start.index).get_loc(starttime) + timeseries_end = pd.Timestamp(endtime, freq) + end = pd.Index(node_start.index).get_loc((timeseries_end + overlap_in_timesteps + * timeseries_end.freq).strftime("%Y-%m-%d %H:%M:%S")) + + node_timeseries_df = resample_timeseries(pd.read_csv(path_folder_input_csv + filename_node_timeseries, + parse_dates=True, index_col=0, + skiprows=range(1, start + 1), nrows=end - start + 1), + freq, + aggregation_rule='sum') + + # Commissions / decommissions only happen on an annual basis + # (slice year, month and date (20XX-01-01)) + min_max_start = pd.read_csv(path_folder_input_csv + filename_min_max_timeseries, + parse_dates=True, index_col=0, usecols=[0]) + start = pd.Index(min_max_start.index).get_loc(starttime[:10])[0] + timeseries_end = pd.Timestamp(endtime, freq) + end = pd.Index(min_max_start.index).get_loc( + str((timeseries_end + overlap_in_timesteps * timeseries_end.freq).year + 1) + "-01-01")[0] + + # Workaround used here: Fillna using ffill and delete last value not needed anymore + min_max_timeseries_df = \ + resample_timeseries(pd.read_csv(path_folder_input_csv + filename_min_max_timeseries, + parse_dates=True, index_col=0, + header=[0, 1], + skiprows=range(2, start + 1), + nrows=end - start + 1).resample('H').fillna(method='ffill')[:-1], + freq, + aggregation_rule='sum') + + # Read in cost Excel file and parse its spreadsheets to pd.DataFrames + '''new import of cost via csv''' + + #xls_cost = pd.ExcelFile(path_folder_input + filename_cost_data) + + fuel_costs_df = pd.read_csv(path_folder_input_csv + 'costs_fuel' + '.csv', index_col=0) + #fuel_costs_df = xls_cost.parse(fuel_cost_pathway + '_fuel_costs', index_col=0) + operation_costs_df = pd.read_csv(path_folder_input_csv + 'costs_operation' + '.csv', index_col=0) + #operation_costs_df = xls_cost.parse('operation_costs', index_col=0) + ramping_costs_df = pd.read_csv(path_folder_input_csv + 'costs_ramping' + '.csv', index_col=0) + #ramping_costs_df = xls_cost.parse('ramping_costs', index_col=0) + startup_costs_df = pd.read_csv(path_folder_input_csv + 'costs_startup' + '.csv', index_col=0) + startup_costs_df.columns[1:len((startup_costs_df.columns))].astype(int) + #startup_costs_df = xls_cost.parse('startup_costs', index_col=0) + storage_var_costs_df = pd.read_csv(path_folder_input_csv + 'costs_operation_storages' + '.csv', index_col=0) + storage_var_costs_df.columns = storage_var_costs_df.columns.astype(int) + #storage_var_costs_df = xls_cost.parse('storage_var_costs', index_col=0) + + investment_costs_df = pd.read_csv(path_folder_input_csv + 'costs_invest' + '.csv', index_col=0) + investment_costs_df.columns = investment_costs_df.columns.astype(int) + #investment_costs_df = xls_cost.parse(investment_cost_pathway + '_investment_costs', index_col=0) + storage_investment_costs_df = pd.read_csv(path_folder_input_csv + 'costs_invest_storage' + '.csv', index_col=0) + storage_investment_costs_df.columns = storage_investment_costs_df.columns.astype(int) + #storage_investment_costs_df = xls_cost.parse('storage_inv_costs', index_col=0) + storage_pump_investment_costs_df = pd.read_csv(path_folder_input_csv + 'costs_invest_storage_pump' + '.csv', index_col=0) + storage_pump_investment_costs_df.columns = storage_pump_investment_costs_df.columns.astype(int) + #storage_pump_investment_costs_df = xls_cost.parse('storage_pump_inv_costs', index_col=0) + storage_turbine_investment_costs_df = pd.read_csv(path_folder_input_csv + 'costs_invest_storage_pump' + '.csv', index_col=0) + storage_turbine_investment_costs_df.columns = storage_turbine_investment_costs_df.columns.astype(int) + #storage_turbine_investment_costs_df = xls_cost.parse('storage_turbine_inv_costs', index_col=0) + WACC_df = pd.read_csv(path_folder_input_csv + 'WACC' + '.csv', index_col=0) + WACC_df.columns = WACC_df.columns.astype(int) + #WACC_df = xls_cost.parse('WACC', index_col=0) + + # Parse index column only in order to determine start and end point for values to parse (via index) + # NOTE: If node and cost data have the same starting point and frequency, + # this becomes obsolete and fastens up parsing of input data + cost_start = pd.read_csv(path_folder_input_csv + filename_cost_timeseries, + parse_dates=True, index_col=0, usecols=[0]) + start = pd.Index(cost_start.index).get_loc(starttime[:10]) + timeseries_end = pd.Timestamp(endtime, freq) + end = pd.Index(cost_start.index).get_loc( + str((timeseries_end + overlap_in_timesteps * timeseries_end.freq).year + 1) + "-01-01") + + # Workaround used here: Fillna using ffill and + # delete last value not needed anymore + cost_timeseries_df = pd.read_csv(path_folder_input_csv + filename_cost_timeseries, + parse_dates=True, index_col=0, header=[0, 1], + skiprows=range(2, start + 1), nrows=end - start + 1).resample( + freq).fillna(method='ffill')[:-1] + + # TODO, JK/YW: Introduce a dict of DataFrames for better handling + return buses_df, excess_df, shortage_df, commodity_sources_df, \ + existing_transformers_df, new_built_transformers_df, \ + renewables_df, demand_df, \ + existing_storages_df, new_built_storages_df, \ + existing_transformers_decom_df, new_transformers_de_com_df, renewables_com_df, \ + node_timeseries_df, min_max_timeseries_df, fuel_costs_df, operation_costs_df, \ + ramping_costs_df, startup_costs_df, storage_var_costs_df, \ + investment_costs_df, storage_investment_costs_df, \ + storage_pump_investment_costs_df, storage_turbine_investment_costs_df, \ + WACC_df, cost_timeseries_df + + +def create_buses(buses_df, node_dict): + """ Function to read in data from buses table and to create the respective bus elements + by adding them to the dictionary of nodes. + + Parameters + ---------- + buses_df : :obj:`pd.DataFrame` + pd.DataFrame containing the bus elements to be created + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the buses elements + + """ + + # Create Buses objects from table 'buses' + for i, b in buses_df.iterrows(): + # 02.05.2019, JK: Take care: First column must include labels to be used as identifierss + node_dict[i] = solph.Bus(label=i) + + return node_dict + + +def create_sources(node_dict, commodity_sources_df, fuel_costs_df, fuel_cost_pathway, + shortage_df, renewables_df, node_timeseries_df, + cost_timeseries_df, starttime, endtime): + """ Function calls all methods that creates source elements. + See individual function definitions below for further information. + + Parameters + ---------- + TBA + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including + all source elements needed for the model run + """ + node_dict = create_commodity_sources(commodity_sources_df, cost_timeseries_df, + fuel_costs_df, fuel_cost_pathway, + node_dict, starttime, endtime) + node_dict = create_shortage_sources(shortage_df, node_dict) + node_dict = create_renewables(renewables_df, node_timeseries_df, node_dict, + starttime, endtime) + + return node_dict + + +def create_commodity_sources(commodity_sources_df, cost_timeseries_df, + fuel_costs_df, fuel_cost_pathway, + node_dict, starttime, endtime): + """ Function to create the commodity source elements + by adding them to the dictionary of nodes. + + NOTE: Start year of cost time series is hard coded! + + Parameters + ---------- + commodity_sources_df : :obj:`pd.DataFrame` + pd.DataFrame containing the commodity source elements to be created + + cost_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing the cost timeseries data + + fuel_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the fuel costs data + + fuel_cost_pathway : :obj:`str` + variable indicating which fuel cost pathway to use + Possible values 'lower', 'middle', 'upper' + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The starting timestamp of the optimization timeframe + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including + the commodity source elements + + """ + + for i, cs in commodity_sources_df.iterrows(): + # Variable costs, i.e. fuel costs, are obtained through a normalized + # timeseries and the nominal value for 2015 which has been used for + # normalization + node_dict[i] = solph.Source( + label=i, + outputs={node_dict[cs['to']]: solph.Flow( + variable_costs=(cost_timeseries_df.loc[ + starttime:endtime, (fuel_cost_pathway + '_fuel_costs', i)] + * fuel_costs_df.loc[i, '2015']).to_numpy(), + emission_factor=cs['emission_factors'])}) + + return node_dict + + +# TODO: SHOW A WARNING IF SHORTAGE OR EXCESS ARE ACTIVE +def create_shortage_sources(shortage_df, node_dict): + """ Function to create the shortage sources (needed for model feasibility) + by adding them to the dictionary of nodes. + + Parameters + ---------- + shortage_df : :obj:`pd.DataFrame` + pd.DataFrame containing the shortage source elements to be created + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including + the shortage source elements + """ + + # From a mathematical point of view needed for model feaseability. + # In the first place, shortage costs are fixed for all timesteps + for i, s in shortage_df.iterrows(): + node_dict[i] = solph.Source(label=i, outputs={ + node_dict[s['to']]: solph.Flow( + variable_costs=s['shortage_costs'])}) + + return node_dict + + + +# TODO: Include variable costs for RES +def create_renewables(renewables_df, node_timeseries_df, node_dict, starttime, endtime): + """ Function to create the renewables source elements + by adding them to the dictionary of nodes. (No variable costs for RES are assumed yet.) + The capacity for 2016 is used for indexation of the other time series values. + + Parameters + ---------- + renewables_df : :obj:`pd.DataFrame` + pd.DataFrame containing the renewables source elements to be created + + node_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing the timeseries data + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The starting timestamp of the optimization timeframe + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the renewavles source elements + + """ + + for i, re in renewables_df.iterrows(): + node_dict[i] = solph.Source(label=i, outputs={node_dict[re['to']]: solph.Flow( + fix=(node_timeseries_df[i][starttime:endtime]).to_numpy(), + nominal_value=re['capacity_2016'] + )}) + + return node_dict + + +# NOTE: As long as renewables capacity is given model endogeneously, there is no reason why +# another approach as for transformers is applied. Methods could be combined to one. +def renewables_exo(renewables_com_df, + investment_costs_df, + WACC_df, + startyear, + endyear, + IR=0.02, + discount=False): + """ Function calculates dicounted investment costs for exogenously + new built renewables from renewables_exo_commissioning_df + which are built after the model-endogeneous investment year. Furthermore the commissioned + and decommissioned capacity from startyear until endyear for all new renewables + are stored in a seperate DataFrame + + Parameters + ---------- + renewables_com_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the renewables' data + + investment_costs_df: :obj:`pandas.DataFrame` + pd.DataFrame containing the Investment costs for new built renewables + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + startyear : :obj:`int` + Starting year of the overall optimization run + + endyear : :obj:`int` + End year of the overall optimization run + + IR : :obj:`float` + The interest rate to be applied for discounting (only if discount is True) + + discount : :obj:`boolean` + If True, nominal values will be dicounted + If False, real values have to be used as model inputs (default) + + Returns + ------- + renewables_exo_com_costs_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing total discounted cost for new built renewables + from startyear until endyear + + renewables_exo_com_capacity_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing the commissioned capacity of all exogenously new built + renewables in between startyear and endyear + + renewables_exo_decom_capacity_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing the decommissioned capacity of all exogenously new built + renewables in between startyear and endyear + """ + + renewables_exo_com_costs_df = pd.DataFrame(index=renewables_com_df.index) + renewables_exo_com_capacity_df = pd.DataFrame(index=renewables_com_df.index) + renewables_exo_decom_capacity_df = pd.DataFrame(index=renewables_com_df.index) + + # Use temporary DataFrames with transformer type as index, + # merge these together and apply function calc_annuity + # implementation of oemof.economics.annuity is not applicable for pd.Series + costs_df = pd.merge(investment_costs_df, WACC_df, on="transformer_type", suffixes=["_invest", "_WACC"]) + all_cols_df = pd.merge(renewables_com_df, costs_df, + left_on=renewables_com_df.index, right_on="transformer_type").set_index("transformer_type") + + for year in range(startyear, endyear + 1): + year_str = str(year) + renewables_exo_com_costs_df['exo_cost_' + year_str] = \ + renewables_com_df['commissioned_' + year_str] * calc_annuity( + capex=all_cols_df[year_str + '_invest'], + n=all_cols_df['unit_lifetime'], + wacc=all_cols_df[year_str + '_WACC']) + + if discount: + renewables_exo_com_costs_df['exo_cost_' + year_str] = \ + renewables_exo_com_costs_df['exo_cost_' + year_str].div( + ((1 + IR) ** (year - startyear))) + + # extract commissioning resp. decommissioning data + renewables_exo_com_capacity_df['commissioned_' + year_str] = renewables_com_df['commissioned_' + year_str] + renewables_exo_decom_capacity_df['decommissioned_' + year_str] = renewables_com_df['decommissioned_' + year_str] + + return renewables_exo_com_costs_df, renewables_exo_com_capacity_df, renewables_exo_decom_capacity_df + + +def create_sinks(node_dict, demand_df, node_timeseries_df, + starttime, endtime, excess_df, + RollingHorizon=True, + ActivateDemandResponse=False, + dr_overall_load_ts_df=None): + """ Function calls all methods that creates source elements. + See individual function definitions below for further information. + + Parameters + ---------- + TBA + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including + all source elements needed for the model run + """ + node_dict = create_demand(demand_df, node_timeseries_df, + starttime, endtime, node_dict, + RollingHorizon, + ActivateDemandResponse, + dr_overall_load_ts_df) + node_dict = create_excess_sinks(excess_df, node_dict) + + return node_dict + + +def create_demand(demand_df, node_timeseries_df, + starttime, endtime, node_dict, + ActivateDemandResponse=False, + dr_overall_load_ts_df=None): + """ Function to create the demand sink elements + by adding them to the dictionary of nodes. + The maximum value of 2016 is used for indexation. + + Parameters + ---------- + demand_df : :obj:`pd.DataFrame` + pd.DataFrame containing the demand sink elements to be created + + node_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing the timeseries data + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The end timestamp of the optimization timeframe + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The starting timestamp of the optimization timeframe + + ActivateDemandResponse : :obj:`boolean` + Boolean control parameter indicating whether or not to introduce + demand response units + + dr_overall_load_ts_df : :pandas:`pandas.Series` + The overall load time series from demand response units which is + used to decrement overall electrical load for Germany + NOTE: This shall be substituted through a version which already + includes this in the data preparation + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + + # Create Sink objects with fixed time series from 'demand' table + for i, d in demand_df.iterrows(): + # node_dict[i] = solph.Sink(label=i, inputs={node_dict[d['from']]: solph.Flow( + # fix=(node_timeseries_df[i][starttime:endtime]).to_numpy(), + # nominal_value=d['maximum_2016'] + # )}) + kwargs_dict = { + 'label': i, + 'inputs': {node_dict[d['from']]: solph.Flow( + fix=(node_timeseries_df[i][starttime:endtime]).to_numpy(), + nominal_value=d['maximum_2016'])}} + + # TODO: Include into data preparation and write adjusted demand + # Adjusted demand here means the difference between overall demand + # and default load profile for demand response units + if ActivateDemandResponse: + if i == 'DE_sink_el_load': + kwargs_dict['inputs'] = {node_dict[d['from']]: solph.Flow( + fix=np.array(node_timeseries_df[i][starttime:endtime] + .mul(d['maximum_2016']) + .sub( + dr_overall_load_ts_df[starttime:endtime])), + nominal_value=1)} + + node_dict[i] = solph.Sink(**kwargs_dict) + + return node_dict + + +# TODO: Resume and include modeling approaches into project +def create_demand_response_units(demand_response_df, load_timeseries_df, + availability_timeseries_pos_df, + availability_timeseries_neg_df, + approach, starttime, endtime, + node_dict): + """Create demand response units and add them to the dict of nodes. + + The demand response modeling approach can be chosen from different + approaches that have been implemented. + + Parameters + ---------- + demand_response_df : :pandas:`pandas.DataFrame` + pd.DataFrame containing the demand response sink elements to be created + + load_timeseries_df : :pandas:`pandas.DataFrame` + pd.DataFrame containing the load timeseries for the demand response + clusters to be modeled + + availability_timeseries_pos_df : :pandas:`pandas.DataFrame` + pd.DataFrame containing the availability timeseries for the demand + response clusters for downwards shifts (positive direction) + + availability_timeseries_neg_df : :pandas:`pandas.DataFrame` + pd.DataFrame containing the availability timeseries for the demand + response clusters for upwards shifts (negative direction) + + approach : :obj:`str` + Demand response modeling approach to be used; + must be one of ['DIW', 'DLR', 'IER', 'TUD'] + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The end timestamp of the optimization timeframe + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including + the demand response sink elements + + dr_overall_load_ts : :pandas:`pandas.Series` + The overall load time series from demand response units which is + used to decrement overall electrical load for Germany + NOTE: This shall be substituted through a version which already + includes this in the data preparation + """ + + for i, d in demand_response_df.iterrows(): + # Use kwargs dict for easier assignment of parameters + # kwargs for all DR modeling approaches + kwargs_all = { + 'demand': np.array(load_timeseries_df[i].loc[starttime:endtime]), + 'max_demand': d['max_cap'], + 'capacity_up': np.array(availability_timeseries_neg_df[i] + .loc[starttime:endtime]), + 'max_capacity_up': d['potential_neg_overall'], + 'capacity_down': np.array(availability_timeseries_pos_df[i] + .loc[starttime:endtime]), + 'max_capacity_down': d['potential_pos_overall'], + 'delay_time': math.ceil(d['shifting_duration']), + 'shed_time': 1, + 'recovery_time_shift': math.ceil(d['regeneration_duration']), + 'recovery_time_shed': 0, + 'cost_dsm_up': d['variable_costs'] / 2, + 'cost_dsm_down_shift': d['variable_costs'] / 2, + 'cost_dsm_down_shed': 10000, + 'efficiency': 1, + 'shed_eligibility': False, + 'shift_eligibility': True, + } + + # kwargs dependent on DR modeling approach chosen + kwargs_dict = { + 'DIW': {'method': 'delay', + 'shift_interval': 24}, + + 'DLR': {'shift_time': d['interference_duration_pos'], + 'ActivateYearLimit': True, + 'ActivateDayLimit': False, + 'n_yearLimit_shift': np.max( + [round(d['maximum_activations_year']), 1]), + 'n_yearLimit_shed': 1, + 't_dayLimit': 24, + 'addition': False, + 'fixes': True}, + + 'IER': {'shift_time_up': d['interference_duration_neg'], + 'shift_time_down': d['interference_duration_pos'], + 'start_times': + range(0, + len(np.array(load_timeseries_df[i] + .loc[starttime:endtime])), + math.ceil(d['shifting_duration'])), + 'cumulative_shift_time': + (np.max([round(d['maximum_activations_year']), 1]) + * d['interference_duration_pos']), + 'cumulative_shed_time': + len(np.array(load_timeseries_df[i] + .loc[starttime:endtime])), + 'addition': False, + 'fixes': True}, + + 'TUD': {'shift_time_down': d['interference_duration_pos'], + 'postpone_time': d['interference_duration_neg'], + 'annual_frequency_shift': + np.max([round(d['maximum_activations_year']), 1]), + # Hard-coded limit to prevent IndexError for small simulation timeframe + # 'annual_frequency_shift': 1, + 'annual_frequency_shed': len(np.array(load_timeseries_df[i] + .loc[starttime:endtime])), + 'daily_frequency_shift': 24, + 'addition': False} + } + + # Note: label and inputs have to be separately assigned here + # in order to prevent a KeyError that has something to do with constraint grouping + approach_dict = { + "DIW": DSM_DIW.SinkDSM( + label=i, + inputs={node_dict[d['from']]: solph.Flow(variable_costs=0)}, + **kwargs_all, + **kwargs_dict["DIW"], + invest=solph.options.Investment( + # Memo: Critically check min and max invest params + minimum=0, + maximum=min(d['max_cap'] + d['potential_neg_overall'], + d['installed_cap']), + ep_costs=d['specific_investments'] * 1e3 + )), + "DLR": DSM_DLR.SinkDR( + label=i, + inputs={node_dict[d['from']]: solph.Flow(variable_costs=0)}, + **kwargs_all, + **kwargs_dict["DLR"], + invest=solph.options.Investment( + # Memo: Critically check min and max invest params + minimum=0, + maximum=min(d['max_cap'] + d['potential_neg_overall'], + d['installed_cap']), + ep_costs=d['specific_investments'] * 1e3 + )), + "IER": DSM_IER.SinkDSI( + label=i, + inputs={node_dict[d['from']]: solph.Flow(variable_costs=0)}, + **kwargs_all, + **kwargs_dict["IER"], + invest=solph.options.Investment( + # Memo: Critically check min and max invest params + minimum=0, + maximum=min(d['max_cap'] + d['potential_neg_overall'], + d['installed_cap']), + ep_costs=d['specific_investments'] * 1e3 + )), + "TUD": DSM_TUD.SinkDSM( + label=i, + inputs={node_dict[d['from']]: solph.Flow(variable_costs=0)}, + **kwargs_all, + **kwargs_dict["TUD"], + invest=solph.options.Investment( + # Memo: Critically check min and max invest params + minimum=0, + maximum=min(d['max_cap'] + d['potential_neg_overall'], + d['installed_cap']), + ep_costs=d['specific_investments'] * 1e3 + )) + } + + node_dict[i] = approach_dict[approach] + + # Calculate overall electrical load from demand response units + # which is substracted from electrical load + dr_overall_load_ts_df = load_timeseries_df.mul( + demand_response_df['max_cap']).sum(axis=1) + + return node_dict, dr_overall_load_ts_df + + +def create_excess_sinks(excess_df, node_dict): + """ Function to create the excess sinks (needed for model feasibility) + by adding them to the dictionary of nodes. + + Parameters + ---------- + excess_df : :obj:`pd.DataFrame` + pd.DataFrame containing the excess sink elements to be created + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including + the excess sink elements + """ + + for i, e in excess_df.iterrows(): + node_dict[i] = solph.Sink(label=i, inputs={node_dict[e['from']]: solph.Flow( + variable_costs=e['excess_costs'])}) + + return node_dict + + +def build_CHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th): + """ Function used to actually build transformer elements and include a distinction between + CHP, var_CHP and condensing power plants. Function is called by create_dispatch_transformers + as well as by create_invest_transformers. + + Parameters + ---------- + i : :obj:`str` + label of current transformer (within iteration) + + t : :obj:`pd.Series` + pd.Series containing attributes for transformer component (row-wise data entries) + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + outflow_args_el: :obj:`dict` + Dictionary holding the values for electrical outflow arguments + + outflow_args_th: :obj:`dict` + Dictionary holding the values for thermal outflow arguments + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + + # Build actual transformer element using the correct efficiency parameters + node_dict[i] = solph.Transformer( + label=i, + inputs={node_dict[t['from']]: solph.Flow()}, + outputs={ + node_dict[t['to_el']]: solph.Flow(**outflow_args_el), + node_dict[t['to_th']]: solph.Flow(**outflow_args_th)}, + + # 05.10.2018, JK: add conversion factors for electrical and thermal conversion for CHP plant + # to outflow_args + conversion_factors={ + node_dict[t['to_el']]: t['efficiency_el_CC'], + node_dict[t['to_th']]: t['efficiency_th_CC']}) + + return node_dict[i] + + +# TODO: Substitue this by ExtractionTurbine to model "real" flexible CHP plants +def build_var_CHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th): + """ Function used to actually build transformer elements and include a distinction between + CHP, var_CHP and condensing power plants. Function is called by create_dispatch_transformers + as well as by create_invest_transformers. + + Parameters + ---------- + i : :obj:`str` + label of current transformer (within iteration) + + t : :obj:`pd.Series` + pd.Series containing attributes for transformer component (row-wise data entries) + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + outflow_args_el: :obj:`dict` + Dictionary holding the values for electrical outflow arguments + + outflow_args_th: :obj:`dict` + Dictionary holding the values for thermal outflow arguments + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + + # Build actual transformer element using the correct efficiency parameters + node_dict[i] = solph.Transformer( + label=i, + inputs={node_dict[t['from']]: solph.Flow()}, + outputs={ + node_dict[t['to_el']]: solph.Flow(**outflow_args_el), + node_dict[t['to_th']]: solph.Flow(**outflow_args_th)}, + + # Add conversion factors for electrical and thermal conversion for CHP plant to outflow_args. + # For variable CHP plants parameters are dependent on the operation mode (coupled or full condensation). + conversion_factors={ + node_dict[t['to_el']]: t['efficiency_el_CC'], + node_dict[t['to_th']]: t['efficiency_th_CC']}, + conversion_factor_full_condensation={node_dict[t['to_el']]: t['efficiency_el']}) + + return node_dict[i] + + +# TODO: Continue implementation. -> See documentation for the component +# which in addition to simple transformers includes a power loss index as +# well as additional constraints for a flexible unit operation. +# https://oemof.readthedocs.io/en/stable/api/oemof.solph.html#oemof.solph.components.ExtractionTurbineCHP +def build_ExtractionTurbineCHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th): + """ UNDER DEVERLOPMENT + """ + node_dict[i] = solph.components.ExtractionTurbineCHP( + label=i, + inputs={node_dict[t['from']]: solph.Flow()}, + outputs={ + node_dict[t['to_el']]: solph.Flow(**outflow_args_el), + node_dict[t['to_th']]: solph.Flow(**outflow_args_th)}, + + # 05.10.2018, JK: add conversion factors for electrical and thermal conversion for CHP plant to + # outflow_args. + # For variable CHP plants parameters are dependent on the operation mode (coupled or full condensation). + conversion_factors={ + node_dict[t['to_el']]: t['efficiency_el_CC'], + node_dict[t['to_th']]: t['efficiency_th_CC']}, + conversion_factor_full_condensation={node_dict[t['to_el']]: t['efficiency_el']}) + + +def build_condensing_transformer(i, t, node_dict, outflow_args_el): + """ Function used to actually build transformer elements and include a distinction between + CHP, var_CHP and condensing power plants. Function is called by create_dispatch_transformers + as well as by create_invest_transformers. + + Parameters + ---------- + i : :obj:`str` + label of current transformer (within iteration) + + t : :obj:`pd.Series` + pd.Series containing attributes for transformer component (row-wise data entries) + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + outflow_args_el: :obj:`dict` + Dictionary holding the values for electrical outflow arguments + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + + # 05.10.2018, JK: build actual transformer element using the correct efficiency parameters + node_dict[i] = solph.Transformer( + label=i, + inputs={node_dict[t['from']]: solph.Flow()}, + outputs={ + node_dict[t['to_el']]: solph.Flow(**outflow_args_el)}, + conversion_factors={node_dict[t['to_el']]: t['efficiency_el']}) + + return node_dict[i] + + +def create_transformers(node_dict, existing_transformers_df, + new_built_transformers_df, AggregateInput, + RollingHorizon, + operation_costs_df, ramping_costs_df, + investment_costs_df, WACC_df, cost_timeseries_df, + min_max_timeseries_df, MaxInvest, + starttime, endtime, endyear, + optimization_timeframe=1, + counter=0, + transformers_init_df=pd.DataFrame(), + years_per_timeslice=0): + """ Function to create all transformer elements. Calls functions for + creating existing resp. new built transformers (fleets). + + Parameters + ---------- + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem (not including + transformers) + + existing_transformers_df : :obj:`pd.DataFrame` + pd.DataFrame containing the existing transformer elements to be created + (i.e. existing plants for which no investments occur) + + new_built_transformers_df : :obj:`pd.DataFrame` + pd.DataFrame containing the potentially new built transformer elements + to be created (i.e. investment alternatives which may be invested in) + + AggregateInput: :obj:`boolean` + If True an aggregated transformers input data set is used, elsewhise + the full transformers input data set is used + + RollingHorizon: :obj:`boolean` + If True a myopic (Rolling horizon) optimization run is carried out, + elsewhise a simple overall optimization approach is chosen + + operation_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the operation costs data + + ramping_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the ramping costs data + + investment_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the operation costs data + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + cost_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing the cost timeseries data + + min_max_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing min resp. max output of a transformer as a + timeseries in order to deal with (exogeneously determined) commissions + or decommissions during runtime + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The starting timestamp of the optimization timeframe + + optimization_timeframe : :obj:`str` + The length of the overall optimization timeframe in years + (used for determining technology specific investment limits) + + counter : :obj:`int` + number of rolling horizon iteration + + transformers_init_df : :obj:`pd.DataFrame` + pd.DataFrame containing the existing capacities for the transformer + elements from prior myopic (Rolling horizon) iteration + + years_per_timeslice : :obj:`int` + Number of years of a timeslice (a given myopic iteration); may differ + for the last timesteps since it does not necessarily have to be + completely covered + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including + all source elements needed for the model run + + new_built_transformer_labels : :obj:`list` of :class:`str` + A list containing the labels for potentially new built transformers; + only returned if myopic approach is chosen (RollingHorizon = True) + + """ + node_dict = create_existing_transformers(existing_transformers_df, + AggregateInput, + node_dict, + operation_costs_df, + ramping_costs_df, + cost_timeseries_df, + min_max_timeseries_df, + starttime, + endtime) + + if not RollingHorizon: + + node_dict = create_new_built_transformers(new_built_transformers_df, + node_dict, + operation_costs_df, + investment_costs_df, + WACC_df, + cost_timeseries_df, + min_max_timeseries_df, + MaxInvest, + starttime, + endtime, + endyear, + optimization_timeframe) + + return node_dict + + else: + + node_dict, new_built_transformer_labels, endo_exo_exist_df = \ + create_new_built_transformers_RH(counter, new_built_transformers_df, + transformers_init_df, + node_dict, operation_costs_df, + investment_costs_df, + WACC_df, cost_timeseries_df, + min_max_timeseries_df, + MaxInvest, + starttime, endtime, + years_per_timeslice, + endyear) + + return node_dict, new_built_transformer_labels, endo_exo_exist_df + + +def create_existing_transformers(existing_transformers_df, AggregateInput, + node_dict, operation_costs_df, ramping_costs_df, + cost_timeseries_df, min_max_timeseries_df, + starttime, endtime): + """ Function to create the existing transformers elements (i.e. power plants) + by adding them to the dictionary of nodes. + + Only existing transformers (fleets) are created for which no investments + are considered. Considering investments in the future might be possible + via including "retrofit fleets". + + Parameters + ---------- + existing_transformers_df : :obj:`pd.DataFrame` + pd.DataFrame containing the existing transformer elements to be created + (i.e. existing plants for which no investments occur) + + AggregateInput: :obj:`boolean` + If True an aggregated transformers input data set is used, elsewhise + the full transformers input data set is used + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + operation_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the operation costs data + + ramping_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the ramping costs data + + cost_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing the cost timeseries data + + min_max_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing min resp. max output of a transformer as a + timeseries in order to deal with (exogeneously determined) commissions + or decommissions during runtime + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The starting timestamp of the optimization timeframe + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + startyear_str = str(pd.to_datetime(starttime).year) + + for i, t in existing_transformers_df.iterrows(): + + # Don't build units whose lifetime is reached and whose cap is 0 + # This would cause an error in bounds if it were not included + if t['existing_' + startyear_str] == 0: + continue + + # Checks if it is an electricity unit. (Model could on principle be extended to other energy carriers, e.g. heat) + if t['Electricity']: + minimum_el_load = min_max_timeseries_df.loc[starttime:endtime, (i, 'min')].to_numpy() + maximum_el_load = min_max_timeseries_df.loc[starttime:endtime, (i, 'max')].to_numpy() + + outflow_args_el = { + 'nominal_value': t['existing_' + startyear_str], + # Operation costs per energy carrier / transformer type (not fuel costs itself) + 'variable_costs': (cost_timeseries_df.loc[starttime:endtime, ('operation_costs', t['bus_technology'])] + * operation_costs_df.loc[t['bus_technology'], '2015']).to_numpy(), + 'min': minimum_el_load, + 'max': maximum_el_load, + # There seems to be some trouble with + # setting gradients properly if investments are included as well. + # Thus, gradients are neglected here for testing purposes. + # TODO when time, JK/YW: Introduce a fix in oemof itself for this... + # 'positive_gradient': {'ub': t['grad_pos'], + # 'costs': np.array(cost_timeseries_df.loc[starttime:endtime, + # ('ramping_costs', t['from'])] * ramping_costs_df.loc[t['from'], 2015])}, + # 'negative_gradient': {'ub': t['grad_neg'], + # 'costs': np.array(cost_timeseries_df.loc[starttime:endtime, + # ('ramping_costs', t['from'])] * ramping_costs_df.loc[t['from'], 2015])}, + # outages are a historical relict from a prior version (v0.0.9) + # 'outages': {t['outages'], 'period', 'output'} + } + + # dict outflow_args_th contains all keyword arguments that are + # identical for the thermal output of all CHP (and var_CHP) transformers. + outflow_args_th = { + # TODO, Substitute this through THERMAL capacity! + 'nominal_value': t['capacity'], + # TODO: Check if variable costs are doubled for CHP plants! + 'variable_costs': (cost_timeseries_df.loc[starttime:endtime, ('operation_costs', t['bus_technology'])] * + operation_costs_df.loc[t['bus_technology'], '2015']).to_numpy(), + 'min': minimum_el_load, + 'max': maximum_el_load, + } + + # Do not include minimum load values if full input data set is used + if not AggregateInput: + outflow_args_el.pop('min') + + # Check if plant is a CHP plant (CHP = 1) in order to set efficiency parameters accordingly. + if t['CHP']: + node_dict[i] = build_CHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th) + + # Check if plant is a variable CHP plant (i.e. an extraction turbine) and + # set efficiency parameters accordingly. + elif t['var_CHP']: + node_dict[i] = build_var_CHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th) + + # Else, i.e. no CHP plant + else: + node_dict[i] = build_condensing_transformer(i, t, node_dict, outflow_args_el) + + return node_dict + + +def existing_transformers_exo_decom(transformers_decommissioning_df, + startyear, + endyear): + """ Function takes the decommissioned capacity of transformers and groups + them by bus_technology + + Parameters + ---------- + transformers_decommissioning_df : :obj:`pandas.DataFrame` + A pd.DataFrame containing the transformers' decommissioning data + + startyear : :obj:`int` + Starting year of the optimization run + + endyear : :obj:`int` + End year of the optimization run + + Returns + ------- + existing_transformers_exo_decom_capacity_df : :obj:`pandas.DataFrame + DataFrame containing the decommissioned capacity of each bus_technology + from the existing transformers from startyear until endyear. + """ + existing_transformers_exo_decom_capacity_df = transformers_decommissioning_df.groupby("bus_technology").sum() + existing_transformers_exo_decom_capacity_df = \ + existing_transformers_exo_decom_capacity_df.loc[:, + 'decommissioned_' + str(startyear):'decommissioned_' + str(endyear)] + + return existing_transformers_exo_decom_capacity_df + + +def calc_exist_cap(t, startyear, endyear, col_name): + """ Calculate existing capacity for a given oemof element / column name """ + # JF 10.01.2020 initialize existing_capacity + existing_capacity = t[col_name + str(startyear)] + # JF 10.01.2020 iterate over all upcoming years and increase increase exisiting_capaacity by the exisiting capacity of each year + for year in range(startyear + 1, endyear + 1): + existing_capacity += t[col_name + str(year)] - t[col_name + str(year - 1)] + if existing_capacity <= t[col_name + str(startyear)]: + existing_capacity = t[col_name + str(startyear)] + return existing_capacity + + +def create_new_built_transformers(new_built_transformers_df, + node_dict, operation_costs_df, investment_costs_df, + WACC_df, cost_timeseries_df, min_max_timeseries_df, + MaxInvest, starttime, endtime, endyear, + optimization_timeframe): + """ Function to create the potentially new built transformers elements + (i.e. investment alternatives for new built power plants) + by adding them to the dictionary of nodes. + + New built units are modelled as power plant fleets per energy carrier / + technology. + + Parameters + ---------- + new_built_transformers_df : :obj:`pd.DataFrame` + pd.DataFrame containing the potentially new built transformer elements + to be created (i.e. investment alternatives which may be invested in) + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + operation_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the operation costs data + + investment_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the operation costs data + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + cost_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing the cost timeseries data + + MaxInvest : :obj:`boolean` + Boolean parameter indicating whether or not to set investment bounds + + min_max_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing min resp. max output of a transformer as a + timeseries in order to deal with (exogeneously determined) commissions + or decommissions during runtime + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The starting timestamp of the optimization timeframe + + optimization_timeframe : :obj:`str` + The length of the overall optimization timeframe in years + (used for determining technology specific investment limits) + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + startyear = pd.to_datetime(starttime).year + + for i, t in new_built_transformers_df.iterrows(): + + existing_capacity = calc_exist_cap(t, startyear, endyear, 'existing_') + # overall invest limit is the amount of capacity that can be installed + # at maximum, i.e. the potential limit of a given technology + overall_invest_limit = t['overall_invest_limit'] + annual_invest_limit = t['max_invest'] + + # invest_max is the amount of capacity that can maximally be installed + # within the optimization timeframe + if overall_invest_limit - existing_capacity <= 0: + max_bound_transformer = 0 + else: + max_bound_transformer = overall_invest_limit - existing_capacity + + if MaxInvest: + invest_max = np.min([max_bound_transformer, annual_invest_limit * optimization_timeframe]) + else: + if 'water' in i: + invest_max = np.min([max_bound_transformer, annual_invest_limit * optimization_timeframe]) + else: + invest_max = float('inf') + + minimum = (min_max_timeseries_df.loc[starttime:endtime, (i, 'min')]).to_numpy() + maximum = (min_max_timeseries_df.loc[starttime:endtime, (i, 'max')]).to_numpy() + + # Checks if it is an electricity unit. (Model could on principle be extended to other energy carriers, e.g. heat) + if t['Electricity']: + + # 08.10.2018, JK: Definition of parameter dicts in the following + outflow_args_el = { + # Nominal value is not applicable in investment mode + # Operation costs per energy carrier / transformer type + 'variable_costs': (cost_timeseries_df.loc[starttime:endtime, ('operation_costs', t['bus_technology'])] + * operation_costs_df.loc[t['bus_technology'], '2015']).to_numpy(), + 'min': minimum, + 'max': maximum, + + # NOTE: Ramping is not working in the investment mode + # TODO, JK / YW: Introduce a fix in oemof.solph itself... when you find the time for that + # 'positive_gradient': {'ub': t['grad_pos'], 'costs': t['ramp_costs']}, + # 'negative_gradient': {'ub': t['grad_neg'], 'costs': t['ramp_costs']}, + + # NOTE: Outages are a historical relict from prior version (v0.0.9) + # 'outages': {t['outages'], 'period', 'output'} + + 'investment': solph.Investment( + maximum=invest_max, + + # New built plants are installed at capacity costs for the start year + # (of each myopic iteration = investment possibility) + ep_costs=economics.annuity(capex=investment_costs_df.loc[t['bus_technology'], startyear], + n=t['unit_lifetime'], + wacc=WACC_df.loc[t['bus_technology'], startyear]), + + existing=existing_capacity) + } + + # dict outflow_args_th contains all keyword arguments that are + # identical for the thermal output of all CHP (and var_CHP) transformers. + outflow_args_th = { + # Substitute this through THERMAL capacity! + 'nominal_value': existing_capacity, + # TODO: Check if variable costs are doubled for CHP plants! + 'variable_costs': (cost_timeseries_df.loc[starttime:endtime, + ('operation_costs', t['bus_technology'])] + * operation_costs_df.loc[t['bus_technology'], '2015']).to_numpy(), + 'min': minimum, + 'max': maximum + } + + # Check if plant is a CHP plant (CHP = 1) in order to set efficiency parameters accordingly. + if t['CHP']: + node_dict[i] = build_CHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th) + + # Check if plant is a variable CHP plant (i.e. an extraction turbine) + # and set efficiency parameters accordingly. + elif t['var_CHP']: + node_dict[i] = build_var_CHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th) + + # Else, i.e. no CHP plant + else: + node_dict[i] = build_condensing_transformer(i, t, node_dict, outflow_args_el) + + return node_dict + + +def create_new_built_transformers_RH(counter, new_built_transformers_df, + transformers_init_df, + node_dict, operation_costs_df, + investment_costs_df, + WACC_df, cost_timeseries_df, + min_max_timeseries_df, MaxInvest, + starttime, endtime, years_per_timeslice, + endyear): + """ Function to create the potentially new built transformers elements + (i.e. investment alternatives for new built power plants) + by adding them to the dictionary of nodes used for the myopic modelling + approach. + + New built units are modelled as power plant fleets per energy carrier / + technology. + + NOTE: There are two kinds of existing capacity: + - One being exogeneously determined and + - one being chosen endogeneously by the model itself (investments + from previous iterations). + + Parameters + ---------- + counter : :obj:`int` + number of rolling horizon iteration + + new_built_transformers_df : :obj:`pd.DataFrame` + pd.DataFrame containing the potentially new built transformer elements + to be created (i.e. investment alternatives which may be invested in) + + transformers_init_df : :obj:`pd.DataFrame` + pd.DataFrame containing the existing capacities for the transformer + elements from prior myopic (Rolling horizon) iteration + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + operation_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the operation costs data + + investment_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing the operation costs data + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + cost_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing the cost timeseries data + + min_max_timeseries_df : :obj:`pd.DataFrame` + pd.DataFrame containing min resp. max output of a transformer as a + timeseries in order to deal with (exogeneously determined) commissions + or decommissions during runtime + + starttime : :obj:`str` + The starting timestamp of the optimization timeframe + + endtime : :obj:`str` + The end timestamp of the optimization timeframe + + years_per_timeslice : :obj:`int` + Number of years of a timeslice (a given myopic iteration); may differ + for the last timesteps since it does not necessarily have to be + completely covered + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + new_built_transformer_labels : :obj:`list` of :class:`str + A list containing the labels for potentially new built transformers + + """ + startyear = pd.to_datetime(starttime).year + + # Use a list to store capacities installed for new built alternatives + new_built_transformer_labels = [] + endo_exo_exist_df = pd.DataFrame(columns=['Existing_Capacity_endo', 'old_exo'], + index=new_built_transformer_labels) + + for i, t in new_built_transformers_df.iterrows(): + new_built_transformer_labels.append(i) + + exo_exist = calc_exist_cap(t, startyear, endyear, 'existing_') + + if counter != 0: + # Obtain existing capacities from initial states DataFrame to increase existing capacity + existing_capacity = exo_exist + transformers_init_df.loc[i, 'endo_cumulated'] + + else: + # Set existing capacity for 0th iteration + existing_capacity = exo_exist + + endo_exo_exist_df.loc[i, 'old_exo'] = exo_exist + endo_exo_exist_df.loc[i, 'Existing_Capacity_endo'] = existing_capacity - exo_exist + + overall_invest_limit = t['overall_invest_limit'] + annual_invest_limit = t['max_invest'] + + if overall_invest_limit - existing_capacity <= 0: + max_bound_transformer = 0 + else: + max_bound_transformer = overall_invest_limit - existing_capacity + # invest_max is the amount of capacity that can maximally be installed + # within the optimization timeframe (including existing capacity) + if MaxInvest: + invest_max = np.min([max_bound_transformer, annual_invest_limit * years_per_timeslice]) + else: + if 'water' in i: + invest_max = np.min([max_bound_transformer, annual_invest_limit * years_per_timeslice]) + else: + invest_max = float('inf') + + minimum = (min_max_timeseries_df.loc[starttime:endtime, (i, 'min')]).to_numpy() + maximum = (min_max_timeseries_df.loc[starttime:endtime, (i, 'max')]).to_numpy() + + if t['Electricity']: + + outflow_args_el = { + # Operation costs per energy carrier / transformer type + 'variable_costs': (cost_timeseries_df.loc[starttime:endtime, ('operation_costs', t['bus_technology'])] * + operation_costs_df.loc[t['bus_technology'], '2015']).to_numpy(), + 'min': minimum, + 'max': maximum, + + # NOTE: Ramping is not working in the investment mode + # TODO, JK / YW: Introduce a fix in oemof.solph itself... when you find the time for that + # 'positive_gradient': {'ub': t['grad_pos'], 'costs': t['ramp_costs']}, + # 'negative_gradient': {'ub': t['grad_neg'], 'costs': t['ramp_costs']}, + + # NOTE: Outages are a historical relict from prior version (v0.0.9) + # 'outages': {t['outages'], 'period', 'output'} + + # investment must be between investment limit applicable and + # already existing capacity (for myopic optimization) + 'investment': solph.Investment( + maximum=invest_max, + # New built plants are installed at capacity costs for the start year + # (of each myopic iteration = investment possibility) + ep_costs=economics.annuity(capex=investment_costs_df.loc[t['bus_technology'], startyear], + n=t['unit_lifetime'], + wacc=WACC_df.loc[t['bus_technology'], startyear]), + + existing=existing_capacity) + } + + # dict outflow_args_th contains all keyword arguments that are + # identical for the thermal output of all CHP (and var_CHP) transformers. + outflow_args_th = { + # Substitute this through THERMAL capacity! + 'nominal_value': existing_capacity, + # TODO: Check if variable costs are doubled for CHP plants! + 'variable_costs': (cost_timeseries_df.loc[starttime:endtime, ('operation_costs', t['bus_technology'])] + * operation_costs_df.loc[t['bus_technology'], '2015']).to_numpy(), + 'min': minimum, + 'max': maximum + } + + # Check if plant is a CHP plant (CHP = 1) in order to set efficiency parameters accordingly. + if t['CHP']: + node_dict[i] = build_CHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th) + + # Check if plant is a variable CHP plant (i.e. an extraction turbine) + # and set efficiency parameters accordingly. + elif t['var_CHP']: + node_dict[i] = build_var_CHP_transformer(i, t, node_dict, outflow_args_el, outflow_args_th) + + # Else, i.e. no CHP plant + else: + node_dict[i] = build_condensing_transformer(i, t, node_dict, outflow_args_el) + + return node_dict, new_built_transformer_labels, endo_exo_exist_df + + +def new_transformers_exo(new_transformers_de_com_df, + investment_costs_df, + WACC_df, + startyear, + endyear, + IR=0.02, + discount=False): + """ Function calculates dicounted investment costs from exogenously + new built transformers from new_transformers_de_com_df + which are built after the model-endogeneous investment year. Furthermore, the commissioned + and decommissioned capacity from startyear until endyear for each new built + transformer are stored in a seperate DataFrame + + Parameters + ---------- + new_transformers_de_com_df : :obj:`pandas.DataFrame` + pd.DataFrame containing the new built transformers' (de)commissioning data + + investment_costs_df: :obj:`pandas.DataFrame` + pd.DataFrame containing the Investment costs for new built transformers + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + startyear : :obj:`int` + Starting year of the optimization run + + endyear : :obj:`int` + End year of the optimization run + + IR : :obj:`float` + The interest rate to be applied for discounting (only if discount is True) + + discount : :obj:`boolean` + If True, nominal values will be dicounted + If False, real values have to be used as model inputs (default) + + Returns + ------- + new_transformers_exo_com_costs_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing total discounted cost for new built transformers + from startyear until endyear + + new_transformers_exo_com_cacity_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing the commissioned capacity of all exogenously new built + transformers in between startyear and endyear + + new_transformers_exo_decom_capacity_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing the decommissioned capacity of all exogenously new built + transformers in between startyear and endyear + + """ + + new_transformers_exo_com_costs_df = pd.DataFrame(index=new_transformers_de_com_df["bus_technology"]) + new_transformers_exo_com_costs_df["labels"] = new_transformers_de_com_df.index.values + + # Use temporary DataFrames with bus technology as index, + # merge these together and apply function calc_annuity + # implementation of oemof.economics.annuity is not applicable for pd.Series + costs_df = pd.merge(investment_costs_df, WACC_df, on="transformer_type", suffixes=["_invest", "_WACC"]) + all_cols_df = pd.merge(new_transformers_de_com_df, costs_df, + left_on="bus_technology", right_on="transformer_type").set_index("bus_technology") + + for year in range(startyear, endyear + 1): + year_str = str(year) + new_transformers_exo_com_costs_df['exo_cost_' + year_str] = \ + all_cols_df['commissioned_' + year_str] * calc_annuity( + capex=all_cols_df[year_str + '_invest'], + n=all_cols_df['unit_lifetime'], + wacc=all_cols_df[year_str + '_WACC']) + + if discount: + new_transformers_exo_com_costs_df['exo_cost_' + year_str] = \ + new_transformers_exo_com_costs_df['exo_cost_' + year_str].div( + ((1 + IR) ** (year - startyear))) + + new_transformers_exo_com_costs_df = new_transformers_exo_com_costs_df.reset_index(drop=True).set_index('labels') + + # extract commissioning resp. decommissioning data + new_transformers_exo_com_cacity_df = new_transformers_de_com_df.loc[ + :, 'commissioned_' + str(startyear): 'commissioned_' + str(endyear)] + new_transformers_exo_decom_capacity_df = new_transformers_de_com_df.loc[ + :, 'decommissioned_' + str(startyear):'decommissioned_' + str(endyear)] + + return new_transformers_exo_com_costs_df, new_transformers_exo_com_cacity_df, new_transformers_exo_decom_capacity_df + + +def create_storages(node_dict, + existing_storages_df, + new_built_storages_df, + RollingHorizon, + MaxInvest, + storage_var_costs_df, + storage_investment_costs_df, + storage_pump_investment_costs_df, + storage_turbine_investment_costs_df, + WACC_df, + starttime, + endyear, + optimization_timeframe=1, + counter=0, + storages_init_df=pd.DataFrame(), + years_per_timeslice=0): + """ Function to read in data from storages table and to create the storages elements + by adding them to the dictionary of nodes. + + Parameters + ---------- + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + existing_storages_df : :obj:`pd.DataFrame` + pd.DataFrame containing the existing storages elements to be created + + new_built_storages_df : :obj:`pd.DataFrame` + pd.DataFrame containing the potentially new built storages elements + to be created + + RollingHorizon: :obj:`boolean` + If True a myopic (Rolling horizon) optimization run is carried out, + elsewhise a simple overall optimization approach is chosen + + storage_var_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing variable costs for storages + + storage_investment_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing investment costs for storages capacity + + storage_pump_investment_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing investment costs for storages infeed + + storage_turbine_investment_costs_df : :obj:`pd.DataFrame` + pd.DataFrame containing investment costs forfor storages outfeed + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + starttime : :obj:`str` + Starting time of the optimization run + + optimization_timeframe : :obj:`str` + The length of the overall optimization timeframe in years + (used for determining technology specific investment limits) + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + startyear = pd.to_datetime(starttime).year + + # Create existing storages objects + for i, s in existing_storages_df.iterrows(): + node_dict[i] = build_existing_storage(i, s, node_dict, + storage_var_costs_df, + startyear) + + if not RollingHorizon: + + # Create potentially new built storages objects + for i, s in new_built_storages_df.iterrows(): + node_dict[i] = build_new_built_storage(i, s, node_dict, + storage_var_costs_df, + storage_investment_costs_df, + storage_pump_investment_costs_df, + storage_turbine_investment_costs_df, + WACC_df, + MaxInvest, + startyear, + endyear, + optimization_timeframe) + + return node_dict + + else: + + new_built_storage_labels = [] + endo_exo_exist_stor_df = pd.DataFrame(columns=['capacity_endo', 'old_exo_cap', + 'turbine_endo', 'old_exo_turbine', + 'pump_endo', 'old_exo_pump'], + index=new_built_storages_df.index.values) + + # Create potentially new built storages objects + for i, s in new_built_storages_df.iterrows(): + + # Do not include thermal storage units (if there are any) + if not '_th' in i: + new_built_storage_labels.append(i) + + node_dict[i], endo_exo_exist_stor_df = build_new_built_storage_RH(counter, i, s, + storages_init_df, + node_dict, + storage_var_costs_df, + storage_investment_costs_df, + storage_pump_investment_costs_df, + storage_turbine_investment_costs_df, + WACC_df, + MaxInvest, + startyear, + endyear, + years_per_timeslice, + endo_exo_exist_stor_df) + + return node_dict, new_built_storage_labels, endo_exo_exist_stor_df + + +def build_existing_storage(i, s, node_dict, + storage_var_costs_df, + startyear): + """ Function used to actually build investment storage elements. + Function is called by create_invest_storages as well as by create_invest_storages_RH. + Separate function definition in order to increase code readability. + + Parameters + ---------- + i : :obj:`str` + label of current transformer (within iteration) + + s : :obj:`pd.Series` + pd.Series containing attributes for storage component (row-wise data entries) + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + storage_var_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage variable costs + + startyear : :obj:`int` + The startyear of the optimization timeframe + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + node_dict[i] = solph.components.GenericStorage( + label=i, + inputs={node_dict[s['bus']]: solph.Flow( + nominal_value=s['existing_pump_' + str(startyear)], + variable_costs=storage_var_costs_df.loc[i, startyear], + max=s['max_storage_level'] + )}, + outputs={node_dict[s['bus']]: solph.Flow( + nominal_value=s['existing_turbine_' + str(startyear)], + variable_costs=storage_var_costs_df.loc[i, startyear], + max=s['max_storage_level'] + )}, + nominal_storage_capacity=s['existing_' + str(startyear)], + # Be careful with the implementation of loss rate + # TODO, YW: Check back with input data! + loss_rate=s['loss_rate'] * s['max_storage_level'], + initial_storage_level=s['initial_storage_level'], + # @YW: Julien suggested to use balanced=False + # I don't see any benefit in this. What do you think? + balanced=True, + max_storage_level=s['max_storage_level'], + min_storage_level=s['min_storage_level'], + inflow_conversion_factor=s['efficiency_pump'], + outflow_conversion_factor=s['efficiency_turbine'] + ) + + return node_dict[i] + + +# CHANGED +def build_new_built_storage(i, s, node_dict, + storage_var_costs_df, + storage_investment_costs_df, + storage_pump_investment_costs_df, + storage_turbine_investment_costs_df, + WACC_df, + MaxInvest, + startyear, + endyear, + optimization_timeframe): + """ Function used to actually build investment storage elements + (new built units only). + Function is called by create_invest_storages as well as by + create_invest_storages_RH. + Separate function definition in order to increase code readability. + + Parameters + ---------- + i : :obj:`str` + label of current transformer (within iteration) + + s : :obj:`pd.Series` + pd.Series containing attributes for storage component (row-wise data entries) + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + storage_var_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage variable costs + + storage_investment_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage investment costs + + storage_pump_investment_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage pump investment costs + + storage_turbine_investment_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage turbine investment costs + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + startyear : :obj:`int` + The startyear of the optimization timeframe + + optimization_timeframe : :obj:`str` + The length of the overall optimization timeframe in years + (used for determining technology specific investment limits) + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + # Add upcoming commissioned capacity to existing + existing_pump = calc_exist_cap(s, startyear, endyear, 'existing_pump_') + existing_turbine = calc_exist_cap(s, startyear, endyear, 'existing_turbine_') + existing = calc_exist_cap(s, startyear, endyear, 'existing_') + + # overall invest limit is the amount of capacity that can at maximum be installed + # i.e. the potential limit of a given technology + overall_invest_limit_pump = s['overall_invest_limit_pump'] + overall_invest_limit_turbine = s['overall_invest_limit_turbine'] + overall_invest_limit = s['overall_invest_limit'] + + annual_invest_limit_pump = s['max_invest_pump'] + annual_invest_limit_turbine = s['max_invest_turbine'] + annual_invest_limit = s['max_invest'] + + # invest_max is the amount of capacity that can maximally be installed + # within the optimization timeframe + if overall_invest_limit_pump - existing_pump <= 0: + max_bound1 = 0 + else: + max_bound1 = overall_invest_limit_pump - existing_pump + + if overall_invest_limit_turbine - existing_turbine <= 0: + max_bound2 = 0 + else: + max_bound2 = overall_invest_limit_turbine - existing_turbine + + if overall_invest_limit - existing <= 0: + max_bound3 = 0 + else: + max_bound3 = overall_invest_limit - existing + + if MaxInvest: + invest_max_pump = np.min([max_bound1, annual_invest_limit_pump * optimization_timeframe]) + invest_max_turbine = np.min([max_bound2, annual_invest_limit_turbine * optimization_timeframe]) + invest_max = np.min([max_bound3, annual_invest_limit * optimization_timeframe]) + else: + invest_max_pump = float('inf') + invest_max_turbine = float('inf') + invest_max = float('inf') + + wacc = WACC_df.loc[i, startyear] + + node_dict[i] = solph.components.GenericStorage( + label=i, + + inputs={node_dict[s['bus']]: solph.Flow( + variable_costs=storage_var_costs_df.loc[i, startyear], + max=s['max_storage_level'], + investment=solph.Investment( + maximum=invest_max_pump, + ep_costs=economics.annuity(capex=storage_pump_investment_costs_df.loc[i, startyear], + n=s['unit_lifetime_pump'], + wacc=wacc), + existing=existing_pump) + )}, + + outputs={node_dict[s['bus']]: solph.Flow( + variable_costs=storage_var_costs_df.loc[i, startyear], + max=s['max_storage_level'], + investment=solph.Investment( + maximum=invest_max_turbine, + ep_costs=economics.annuity(capex=storage_turbine_investment_costs_df.loc[i, startyear], + n=s['unit_lifetime_turbine'], + wacc=wacc), + existing=existing_turbine) + )}, + + loss_rate=s['loss_rate'] * s['max_storage_level'], + initial_storage_level=s['initial_storage_level'], + # @YW: Same as for existing storages (see above) + balanced=True, + max_storage_level=s['max_storage_level'], + min_storage_level=s['min_storage_level'], + invest_relation_input_output=s['invest_relation_input_output'], + invest_relation_input_capacity=s['invest_relation_input_capacity'], + invest_relation_output_capacity=s['invest_relation_output_capacity'], + + inflow_conversion_factor=s['efficiency_pump'], + outflow_conversion_factor=s['efficiency_turbine'], + + investment=solph.Investment( + maximum=invest_max, + ep_costs=economics.annuity(capex=storage_investment_costs_df.loc[i, startyear], + n=s['unit_lifetime'], + wacc=wacc), + existing=existing) + ) + + return node_dict[i] + + +def build_new_built_storage_RH(counter, i, s, + storages_init_df, + node_dict, + storage_var_costs_df, + storage_investment_costs_df, + storage_pump_investment_costs_df, + storage_turbine_investment_costs_df, + WACC_df, + MaxInvest, + startyear, + endyear, + years_per_timeslice, + endo_exo_exist_stor_df): + """ Function used to actually build investment storage elements + (new built units only). + Function is called by create_invest_storages as well as by + create_invest_storages_RH. + Separate function definition in order to increase code readability. + + Parameters + ---------- + counter : :obj:`int` + number of rolling horizon iteration + + i : :obj:`str` + label of current transformer (within iteration) + + s : :obj:`pd.Series` + pd.Series containing attributes for storage component (row-wise data entries) + + storages_init_df : :obj;`pd.DataFrame` + pd.DataFrame containing the storage states from previous iterations + as well as the already existing capacity + + node_dict : :obj:`dict` of :class:`nodes ` + Dictionary containing all nodes of the EnergySystem + + storage_var_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage variable costs + + storage_investment_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage investment costs + + storage_pump_investment_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage pump investment costs + + storage_turbine_investment_costs_df : :obj:`pd.DataFrame` + A pd.DataFrame containing the storage turbine investment costs + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + startyear : :obj:`int` + The startyear of the optimization timeframe + + years_per_timeslice : :obj:`int` + Number of years of a timeslice (a given myopic iteration); may differ + for the last timesteps since it does not necessarily have to be + completely covered + + Returns + ------- + node_dict : :obj:`dict` of :class:`nodes ` + Modified dictionary containing all nodes of the EnergySystem including the demand sink elements + + """ + # Get values from previous iteration + exo_exist_pump = calc_exist_cap(s, startyear, endyear, 'existing_pump_') + exo_exist_turbine = calc_exist_cap(s, startyear, endyear, 'existing_turbine_') + exo_exist_cap = calc_exist_cap(s, startyear, endyear, 'existing_') + + if counter != 0: + # Obtain capacity for last timestep as well as existing capacities + # from storages_init_df and calculate initial capacity (in percent) + # Capacities equal to the existing ones + new installations + + # First set new existing parameters in order to calculate storage state + # for the new existing storage energy. + existing_pump = exo_exist_pump + storages_init_df.loc[i, 'Existing_Inflow_Power'] + existing_turbine = exo_exist_turbine + storages_init_df.loc[i, 'Existing_Outflow_Power'] + existing = exo_exist_cap + storages_init_df.loc[i, 'Existing_Capacity_Storage'] + + else: + # Set values for 0th iteration (empty DataFrame) + existing_pump = exo_exist_pump + existing_turbine = exo_exist_turbine + existing = exo_exist_cap + + # Prevent potential zero division for storage states + if existing != 0: + initial_storage_level_last = storages_init_df.loc[i, 'Capacity_Last_Timestep'] / existing + else: + initial_storage_level_last = s['initial_storage_level'] + + endo_exo_exist_stor_df.loc[i, 'old_exo_cap'] = exo_exist_cap + endo_exo_exist_stor_df.loc[i, 'old_exo_turbine'] = exo_exist_turbine + endo_exo_exist_stor_df.loc[i, 'old_exo_pump'] = exo_exist_pump + + endo_exo_exist_stor_df.loc[i, 'capacity_endo'] = existing - exo_exist_cap + endo_exo_exist_stor_df.loc[i, 'turbine_endo'] = existing_turbine - exo_exist_turbine + endo_exo_exist_stor_df.loc[i, 'pump_endo'] = existing_pump - exo_exist_pump + + # overall invest limit is the amount of capacity that can at maximum be installed + # i.e. the potential limit of a given technology + overall_invest_limit_pump = s['overall_invest_limit_pump'] + overall_invest_limit_turbine = s['overall_invest_limit_turbine'] + overall_invest_limit = s['overall_invest_limit'] + + annual_invest_limit_pump = s['max_invest_pump'] + annual_invest_limit_turbine = s['max_invest_turbine'] + annual_invest_limit = s['max_invest'] + + if overall_invest_limit_pump - existing_pump <= 0: + max_bound1 = 0 + else: + max_bound1 = overall_invest_limit_pump - existing_pump + + if overall_invest_limit_turbine - existing_turbine <= 0: + max_bound2 = 0 + else: + max_bound2 = overall_invest_limit_turbine - existing_turbine + + if overall_invest_limit - existing <= 0: + max_bound3 = 0 + else: + max_bound3 = overall_invest_limit - existing + + # invest_max is the amount of capacity that can maximally be installed + # within the optimization timeframe + if MaxInvest: + invest_max_pump = np.min([max_bound1, annual_invest_limit_pump * years_per_timeslice]) + invest_max_turbine = np.min([max_bound2, annual_invest_limit_turbine * years_per_timeslice]) + invest_max = np.min([max_bound3, annual_invest_limit * years_per_timeslice]) + else: + invest_max_pump = float('inf') + invest_max_turbine = float('inf') + invest_max = float('inf') + + wacc = WACC_df.loc[i, startyear] + + node_dict[i] = solph.components.GenericStorage( + label=i, + + inputs={node_dict[s['bus']]: solph.Flow( + variable_costs=storage_var_costs_df.loc[i, startyear], + max=s['max_storage_level'], + investment=solph.Investment( + maximum=invest_max_pump, + # TODO, JK/YW: Julien added a division of capex by 120 here... Find out why! + # 31.03.2020, JK: I assume this to be a relict from a hard code test which has not been cleaned up... + # ep_costs=economics.annuity(capex=storage_pump_investment_costs_df.loc[i, startyear] / 120 + ep_costs=economics.annuity(capex=storage_pump_investment_costs_df.loc[i, startyear], + n=s['unit_lifetime_pump'], + wacc=wacc), + existing=existing_pump) + )}, + + outputs={node_dict[s['bus']]: solph.Flow( + variable_costs=storage_var_costs_df.loc[i, startyear], + max=s['max_storage_level'], + investment=solph.Investment( + maximum=invest_max_turbine, + ep_costs=economics.annuity(capex=storage_turbine_investment_costs_df.loc[i, startyear], + n=s['unit_lifetime_turbine'], + wacc=wacc), + existing=existing_turbine) + )}, + + loss_rate=s['loss_rate'] * s['max_storage_level'], + initial_storage_level=initial_storage_level_last, + balanced=True, + max_storage_level=s['max_storage_level'], + min_storage_level=s['min_storage_level'], + invest_relation_input_output=s['invest_relation_input_output'], + invest_relation_input_capacity=s['invest_relation_input_capacity'], + invest_relation_output_capacity=s['invest_relation_output_capacity'], + + inflow_conversion_factor=s['efficiency_pump'], + outflow_conversion_factor=s['efficiency_turbine'], + + investment=solph.Investment( + maximum=invest_max, + ep_costs=economics.annuity(capex=storage_investment_costs_df.loc[i, startyear], + n=s['unit_lifetime'], + wacc=wacc), + existing=existing) + ) + + return node_dict[i], endo_exo_exist_stor_df + + +def storages_exo(new_built_storages_df, + storage_turbine_inv_costs_df, + storage_pump_inv_costs_df, + storage_cap_inv_costs_df, + WACC_df, + startyear, + endyear, + IR=0.02, + discount=False): + """ Function calculates dicounted investment costs for exogenously + new built storages from new_built_storages_df which are built after the + model-endogeneous investment year. Furthermore the commissioned capacity + from startyear until endyear for new built storages is stored in a + seperate DataFrame + + Parameters + ---------- + new_built_storages_df : :obj:`pandas.DataFrame` + pd.DataFrame containing the new built storages' data + + storage_turbine_inv_costs_df: :obj:`pandas.DataFrame` + pd.DataFrame containing the turbine investment costs for new built storages + + storage_pump_inv_costs_df: :obj:`pandas.DataFrame` + pd.DataFrame containing the pump investment costs for new built storages + + storage_cap_inv_costs_df: :obj:`pandas.DataFrame` + pd.DataFrame containing the capacity investment costs for new built storages + + WACC_df : :obj:`pd.DataFrame` + pd.DataFrame containing the WACC data + + startyear : :obj:`int` + Starting year of the (current) optimization run + + endyear : :obj:`int` + End year of the (current) optimization run + + IR : :obj:`float` + The interest rate to be applied for discounting (only if discount is True) + + discount : :obj:`boolean` + If True, nominal values will be dicounted + If False, real values have to be used as model inputs (default) + + Returns + ------- + storages_exo_com_costs_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing total discounted cost for new built storages + from startyear until endyear + + storages_exo_com_capacity_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing the commissioned capacity of all exogenously new built + storages in between startyear and endyear + + storages_exo_decom_capacity_df : :obj:`pandas.core.frame.DataFrame` + Dataframe containing the decommissioned capacity of all exogenously new built + storages in between startyear and endyear + """ + + # JF 14.12.2019 create storage_part for easier access to the needed values + # and creating a new index for a new Dataframe + storage_part = ['_turbine', '_pump', '_cap'] + # 27.03.2020, JK: Mistake below: zip does not create the cartesian product which is demanded + new_index = [i + j for i in new_built_storages_df.index.values for j in storage_part] + # new_index = [i + j for i, j in zip(new_built_storages_df.index.values, storage_part)] + storages_exo_com_costs_df = pd.DataFrame(index=new_index) + storages_exo_com_capacity_df = pd.DataFrame(index=new_index) + storages_exo_decom_capacity_df = pd.DataFrame(index=new_index) + + # go through all storages + for storage in new_built_storages_df.index.values: + # go through all years, for example range(2016+1=2017, 2050+1=2051) + for year in range(startyear, endyear + 1): + year_str = str(year) + # JF 15.12.2019 define (de)commissioning as net (de)commissioning in between the years + if year == 2016: + turbine_commissioning = new_built_storages_df.loc[storage, 'existing_turbine_' + year_str] + pump_commissioning = new_built_storages_df.loc[storage, 'existing_pump_' + year_str] + cap_commissioning = new_built_storages_df.loc[storage, 'existing_' + year_str] + turbine_decommissioning = 0 + pump_decommissioning = 0 + cap_decommissioning = 0 + else: + turbine_commissioning = (new_built_storages_df.loc[storage, 'existing_turbine_' + year_str] + - new_built_storages_df.loc[storage, 'existing_turbine_' + str(year - 1)]) + pump_commissioning = (new_built_storages_df.loc[storage, 'existing_pump_' + year_str] + - new_built_storages_df.loc[storage, 'existing_pump_' + str(year - 1)]) + cap_commissioning = (new_built_storages_df.loc[storage, 'existing_' + year_str] + - new_built_storages_df.loc[storage, 'existing_' + str(year - 1)]) + turbine_decommissioning = -turbine_commissioning + pump_decommissioning = -pump_commissioning + cap_decommissioning = -cap_commissioning + + # JF 15.12.2019 save discounted cost data for the turbine in the storages costs' dataframe + # check if commissioning is greater or equal to zero, if yes then + # store it as commissioning and calculate the discounted commissioning cost, + # if not store the result as decommissioning + if turbine_commissioning >= 0: + storages_exo_com_costs_df.loc[storage + storage_part[0], + 'exo_cost_' + year_str] = \ + turbine_commissioning * economics.annuity( + capex=storage_turbine_inv_costs_df.loc[storage, year], + n=new_built_storages_df.loc[storage, 'unit_lifetime_turbine'], + wacc=WACC_df.loc[storage, year]) + + storages_exo_com_capacity_df.loc[storage + storage_part[0], + 'commissioned_' + year_str] = turbine_commissioning + storages_exo_decom_capacity_df.loc[storage + storage_part[0], + 'decommissioned_' + year_str] = 0 + else: + storages_exo_com_costs_df.loc[storage + storage_part[0], + 'exo_cost_' + year_str] = 0 + storages_exo_com_capacity_df.loc[storage + storage_part[0], + 'commissioned_' + year_str] = 0 + storages_exo_decom_capacity_df.loc[storage + storage_part[0], + 'decommissioned_' + year_str] = turbine_decommissioning + + # save discounted cost data for the pump in the storages costs' dataframe + if pump_commissioning >= 0: + storages_exo_com_costs_df.loc[storage + storage_part[1], + 'exo_cost_' + year_str] = \ + pump_commissioning * economics.annuity( + capex=storage_pump_inv_costs_df.loc[storage, year], + n=new_built_storages_df.loc[storage, 'unit_lifetime_pump'], + wacc=WACC_df.loc[storage, year]) + + storages_exo_com_capacity_df.loc[storage + storage_part[1], + 'commissioned_' + year_str] = pump_commissioning + storages_exo_decom_capacity_df.loc[storage + storage_part[1], + 'decommissioned_' + year_str] = 0 + + else: + storages_exo_com_costs_df.loc[storage + storage_part[1], + 'exo_cost_' + year_str] = 0 + storages_exo_com_capacity_df.loc[storage + storage_part[1], + 'commissioned_' + year_str] = 0 + storages_exo_decom_capacity_df.loc[storage + storage_part[1], + 'decommissioned_' + year_str] = pump_decommissioning + + # save discounted cost data for the capacity in the storages costs' dataframe + if cap_commissioning >= 0: + storages_exo_com_costs_df.loc[storage + storage_part[2], + 'exo_cost_' + year_str] = \ + cap_commissioning * economics.annuity( + capex=storage_cap_inv_costs_df.loc[storage, year], + n=new_built_storages_df.loc[storage, 'unit_lifetime'], + wacc=WACC_df.loc[storage, year]) + + storages_exo_com_capacity_df.loc[storage + storage_part[2], + 'commissioned_' + year_str] = cap_commissioning + storages_exo_decom_capacity_df.loc[storage + storage_part[2], + 'decommissioned_' + year_str] = 0 + else: + storages_exo_com_costs_df.loc[storage + storage_part[2], + 'exo_cost_' + year_str] = 0 + storages_exo_com_capacity_df.loc[storage + storage_part[2], + 'commissioned_' + year_str] = 0 + storages_exo_decom_capacity_df.loc[storage + storage_part[2], + 'decommissioned_' + year_str] = cap_decommissioning + + if discount: + storages_exo_com_costs_df.loc[storage + storage_part[0], + 'exo_cost_' + year_str] = \ + storages_exo_com_costs_df.loc[storage + storage_part[0], + 'exo_cost_' + year_str].div( + ((1 + IR) ** (year - startyear))) + storages_exo_com_costs_df.loc[storage + storage_part[1], + 'exo_cost_' + year_str] = \ + storages_exo_com_costs_df.loc[storage + storage_part[0], + 'exo_cost_' + year_str].div( + ((1 + IR) ** (year - startyear))) + storages_exo_com_costs_df.loc[storage + storage_part[2], + 'exo_cost_' + year_str] = \ + storages_exo_com_costs_df.loc[storage + storage_part[0], + 'exo_cost_' + year_str].div( + ((1 + IR) ** (year - startyear))) + + return storages_exo_com_costs_df, storages_exo_com_capacity_df, storages_exo_decom_capacity_df diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..c42deb9 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,38 @@ +[tool.poetry] +name = "pommesdispatch" +version = "0.1.0" +description = "A bottom-up fundamental power market model for the German electricity sector" +authors = ["Johannes Kochems ", "Yannick Werner <>", "Johannes Giehl <>", "Benjamin Grosse <>"] +maintainers = ["Johannes Kochems "] +license = "MIT" +readme = "README.md" +repository = "https://github.com/pommes-public/pommesdispatch/" +documentation = "https://pommesdispatch.readthedocs.io/" +keywords = ["power market", "fundamental model", "dispatch", "power price", + "oemof.solph"] +classifiers = [ + "Development Status :: 5 - Production/Stable" +] + +[tool.poetry.dependencies] +python = "^3.8" +numpy = "*" +pandas = "*" +"oemof.solph" = "0.4.4" +pyyaml = "*" + +[tool.poetry.dev-dependencies] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry.scripts] +run_pommes_dispatch = "pommesdispatch.cli:run_pommes_dispatch" + +[tool.poetry.extras] +dev = ["pytest", "sphinx", "sphinx_rtd_theme", "sphinx_copybutton"] + +[tool.poetry.urls] +"Changelog" = "https://pommesdispatch.readthedocs.io/en/latest/changelog.html" +"Issue Tracker" = "https://github.com/pommes-public/pommesdispatch/issues" \ No newline at end of file diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..1e72cb7 --- /dev/null +++ b/setup.py @@ -0,0 +1,83 @@ +from setuptools import setup, find_packages +import codecs +import os.path + + +def read(rel_path): + here = os.path.abspath(os.path.dirname(__file__)) + with codecs.open(os.path.join(here, rel_path), 'r') as fp: + return fp.read() + + +def get_version(rel_path): + for line in read(rel_path).splitlines(): + if line.startswith('__version__'): + delim = '"' if '"' in line else "'" + return line.split(delim)[1] + else: + raise RuntimeError("Unable to find version string.") + + +__author__ = ["Johannes Kochems", "Yannick Werner", + "Johannes Giehl", "Benjamin Grosse"] +__copyright__ = "Copyright 2021 pommes developer group" +__credits__ = ["Sophie Westphal", "Flora von Mikulicz-Radecki", + "Carla Spiller", "Fabian Büllesbach", "Timona Ghosh", + "Paul Verwiebe", "Leticia Encinas Rosa", + "Joachim Müller-Kirchenbauer"] + +__license__ = "MIT" +__maintainer__ = "Johannes Kochems" +__email__ = "jokochems@web.de" +__status__ = "Production" + + +with open("README.md", "r", encoding="utf-8") as fh: + long_description = fh.read() + + +setup( + name='pommesdispatch', + version=get_version("pommesdispatch/__init__.py"), + description='A bottom-up fundamental power market model ' + 'for the German electricity sector', + long_description=long_description, + keywords=['power market', 'fundamental model', 'dispatch', 'power price', + 'oemof.solph'], + url='https://github.com/pommes-public/pommesdispatch/', + author=', '.join(__author__), + author_email=__email__, + license=__license__, + package_dir={'': ''}, + packages=find_packages(where=''), + entry_points={ + 'console_scripts': [ + 'run_pommes_dispatch=pommesdispatch.cli:run_pommes_dispatch' + ], + }, + classifiers=[ + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + project_urls={ + "Documentation": "https://pommesdispatch.readthedocs.io/", + "Changelog": ( + "https://pommesdispatch.readthedocs.io/en/latest/changelog.html" + ), + "Issue Tracker": + "https://github.com/pommes-public/pommesdispatch/issues", + }, + install_requires=[ + 'numpy', + 'pandas', + 'oemof.solph == 0.4.4', + 'pyyaml' + ], + python_requires='>=3.8', + extras_require={'dev': ['pytest', 'sphinx', 'sphinx_rtd_theme', + "sphinx_copybutton"]}, + include_package_data=True, + zip_safe=False, +) diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_data_input.py b/tests/test_data_input.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_helpers.py b/tests/test_helpers.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_invesment_model.py b/tests/test_invesment_model.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_model_control.py b/tests/test_model_control.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_subroutines.py b/tests/test_subroutines.py new file mode 100644 index 0000000..e69de29