Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merge Main into Dev #25

Merged
merged 5 commits into from
Oct 28, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions gtep/driver.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from pyomo.core import TransformationFactory
from pyomo.contrib.appsi.solvers.highs import Highs
from pyomo.contrib.appsi.solvers.gurobi import Gurobi
import gurobipy as gp


data_path = "./gtep/data/5bus"
Expand Down
2 changes: 2 additions & 0 deletions gtep/gtep_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -756,6 +756,8 @@ def capacity_factor(b, renewableGen):
== m.renewableCapacity[renewableGen]
)


## TODO: (@jkskolf) add renewableExtended to this and anywhere else
@b.Constraint(m.renewableGenerators)
def operational_renewables_only(b, renewableGen):
return (
Expand Down
2 changes: 1 addition & 1 deletion gtep/gtep_solution.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def dump_json(self, filename="./gtep_solution_jscTest.json"):
with open(dump_filepath, "w") as fobj:
json.dump(self._to_dict(), fobj)

def _to_dict(self):
def _to_dict(self) -> dict:

results_dict = {
"solution_loader": self.results.solution_loader, # object
Expand Down
54 changes: 54 additions & 0 deletions gtep/tests/unit/test_validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import pyomo.common.unittest as unittest
from gtep.gtep_model import ExpansionPlanningModel
from gtep.gtep_data import ExpansionPlanningData
from gtep.gtep_solution import ExpansionPlanningSolution
from pyomo.core import TransformationFactory
from pyomo.contrib.appsi.solvers.highs import Highs
import logging

from gtep.validation import clone_timeseries, filter_pointers, populate_generators, populate_transmission

input_data_source = "./gtep/data/5bus"
output_data_source = "./gtep/tests/data/5bus_out"

def test_solution():
data_object = ExpansionPlanningData()
data_object.load_prescient(input_data_source)

mod_object = ExpansionPlanningModel(
stages=2,
data=data_object.md,
num_reps=2,
len_reps=1,
num_commit=6,
num_dispatch=4,
)
mod_object.create_model()
TransformationFactory("gdp.bound_pretransformation").apply_to(mod_object.model)
TransformationFactory("gdp.bigm").apply_to(mod_object.model)
# opt = SolverFactory("gurobi")
# opt = Gurobi()
opt = Highs()
# # mod_object.results = opt.solve(mod_object.model, tee=True)
mod_object.results = opt.solve(mod_object.model)

sol_object = ExpansionPlanningSolution()
sol_object.load_from_model(mod_object)
sol_object.dump_json("./gtep/tests/test_solution.json")
return sol_object

solution = test_solution()

class TestValidation(unittest.TestCase):
def test_populate_generators(self):
populate_generators(input_data_source, solution, output_data_source)


def test_populate_transmission(self):
populate_transmission(input_data_source, solution, output_data_source)

def test_filter_pointers(self):
filter_pointers(input_data_source, output_data_source)

def test_clone_timeseries(self):
clone_timeseries(input_data_source, output_data_source)
95 changes: 95 additions & 0 deletions gtep/validation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
from pyomo.environ import *
from gtep.gtep_model import ExpansionPlanningModel
from gtep.gtep_solution import ExpansionPlanningSolution
import re
import os
import shutil
import logging

import pandas as pd

logger = logging.getLogger(__name__)

def populate_generators(data_input_path: str, sol_object: ExpansionPlanningSolution, data_output_path: str):
# load existing and candidate generators from initial prescient data
# note that -c in name indicates candidate
input_df = pd.read_csv(data_input_path + "/gen.csv")

# pull final stage solution variables for thermal and renewable investments
# for thermal:
# generator should exist in future grid if the status in the final stage
# is installed, operational, or extended

def gen_name_filter(gen_name):
return 'gen' in gen_name and ('Ext' in gen_name or 'Ope' in gen_name or 'Ins' in gen_name)
solution_dict = sol_object._to_dict()['results']['primals_tree']
end_investment_stage = list(solution_dict.keys())[0]
end_investment_solution_dict = {k: v['value'] for k,v in solution_dict[end_investment_stage].items() if gen_name_filter(k) and v['value'] > 0.5}
end_investment_gens = [re.search(r'\[.*\]', k).group(0)[1:-1] for k in end_investment_solution_dict.keys()]

# for renewable:
# total capacity should be installed + operational + extended values
def renewable_name_filter(gen_name):
return 'renew' in gen_name and ('Ext' in gen_name or 'Ope' in gen_name or 'Ins' in gen_name)
end_investment_renewable_dict = {k: v['value'] for k,v in solution_dict[end_investment_stage].items() if renewable_name_filter(k)}
end_investment_renewable_gens = {re.search(r'\[.*\]', k).group(0)[1:-1]: 0 for k in end_investment_renewable_dict.keys()}
for k,v in end_investment_renewable_dict.items():
end_investment_renewable_gens[re.search(r'\[.*\]', k).group(0)[1:-1]] += v
for k, v in end_investment_renewable_gens.items():
## NOTE: (@jkskolf) this will break in pandas 3.0
input_df["PMax MW"].mask(input_df['GEN UID'] == k,v, inplace=True)

end_investment_gens += [k for k in end_investment_renewable_gens.keys()]
# populate output dataframe
output_df = input_df[input_df['GEN UID'].isin(end_investment_gens)]

# TODO: (@jkskolf) should we update prices here? I think no, but ...
if not os.path.exists(data_output_path):
os.makedirs(data_output_path)
output_df.to_csv(data_output_path + '/gen.csv',index=False)

def populate_transmission(data_input_path, sol_object, data_output_path):
# load existing and candidate generators from initial prescient data
# note that -c in name indicates candidate
input_df = pd.read_csv(data_input_path + "/branch.csv")

# pull final stage solution variables for transmission
def branch_name_filter(gen_name):
return 'bran' in gen_name and ('Ext' in gen_name or 'Ope' in gen_name or 'Ins' in gen_name)
solution_dict = sol_object._to_dict()['results']['primals_tree']
end_investment_stage = list(solution_dict.keys())[0]
end_investment_solution_dict = {k: v['value'] for k,v in solution_dict[end_investment_stage].items() if branch_name_filter(k) and v['value'] > 0.5}
end_investment_branches = [re.search(r'\[.*\]', k).group(0)[1:-1] for k in end_investment_solution_dict.keys()]
output_df = input_df[input_df['UID'].isin(end_investment_branches)]

if not os.path.exists(data_output_path):
os.makedirs(data_output_path)
output_df.to_csv(data_output_path + '/branch.csv' ,index=False)

def filter_pointers(data_input_path, data_output_path):
# load initial timeseries pointers
input_pointers_df = pd.read_csv(data_input_path + "/timeseries_pointers.csv")

# load final generators
# NOTE: must be run _after_ populate_generators and with the same data_output_path
# to pull resulting generator objects
output_generators_df = pd.read_csv(data_output_path + "/gen.csv")

# keep generators that exist at the final investment stage and remove the rest
# keep all non-generator timeseries pointers
matching_gen_list = [gen for gen in output_generators_df['GEN UID']]
output_df = input_pointers_df[input_pointers_df['Object'].isin(matching_gen_list) | input_pointers_df['Category'] != 'Generator']

if not os.path.exists(data_output_path):
os.makedirs(data_output_path)
output_df.to_csv(data_output_path + '/timeseries_pointers.csv')

def clone_timeseries(data_input_path, data_output_path):
file_list = os.listdir(data_input_path)
file_list.remove('timeseries_pointers.csv')
file_list.remove('gen.csv')
file_list.remove('branch.csv')

# @jkskolf, I don't think I like this ...
for fname in file_list:
shutil.copy(data_input_path + "/" + fname, data_output_path + "/" + fname)
Loading