-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
19 changed files
with
1,358 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
{ | ||
"configurations": { | ||
"Debug: Current File": { | ||
"adapter": "debugpy", | ||
"configuration": { | ||
"name": "Debug: Current File", | ||
"type": "python", | ||
"request": "launch", | ||
"cwd": "${workspaceRoot}", | ||
"stopOnEntry": false, | ||
"console": "externalTerminal", | ||
"debugOptions": [], | ||
"python": "python3", | ||
"program": "${file}" | ||
}, | ||
"breakpoints": { | ||
"exception": { | ||
"caught": "N", | ||
"raised": "N", | ||
"uncaught": "Y", | ||
"userUnhandled": "" | ||
} | ||
} | ||
|
||
} | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,27 @@ | ||
{ | ||
"configurations": { | ||
"Debug: Current File": { | ||
"adapter": "debugpy", | ||
"configuration": { | ||
"name": "Debug: Current File", | ||
"type": "python", | ||
"request": "launch", | ||
"cwd": "${workspaceRoot}", | ||
"stopOnEntry": false, | ||
"console": "externalTerminal", | ||
"debugOptions": [], | ||
"python": "python3", | ||
"program": "${file}" | ||
}, | ||
"breakpoints": { | ||
"exception": { | ||
"caught": "N", | ||
"raised": "N", | ||
"uncaught": "Y", | ||
"userUnhandled": "" | ||
} | ||
} | ||
|
||
} | ||
} | ||
} |
File renamed without changes.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,110 @@ | ||
import datetime | ||
|
||
import pandas as pd | ||
import numpy as np | ||
|
||
|
||
def _local_extremum(arr, min_: bool): | ||
mask = np.empty_like(arr, dtype=bool) | ||
mask[0] = True | ||
mask[1:] = arr[:-1] != arr[1:] | ||
arr_filtered = arr[mask] | ||
idx_to_orig = mask.nonzero()[0] | ||
|
||
if not min_: | ||
idx_maximum = np.logical_and(arr_filtered[:-2] < arr_filtered[1:-1], arr_filtered[1:-1] >= arr_filtered[2:]) | ||
else: | ||
idx_maximum = np.logical_and(arr_filtered[:-2] > arr_filtered[1:-1], arr_filtered[1:-1] <= arr_filtered[2:]) | ||
|
||
idx_maximum = idx_to_orig[1:-1][idx_maximum] | ||
|
||
return idx_maximum | ||
|
||
|
||
def local_maximum(arr): | ||
return _local_extremum(arr, min_=False) | ||
|
||
|
||
def local_minimum(arr): | ||
return _local_extremum(arr, min_=True) | ||
|
||
|
||
def _per_day_extremum(arr, min_: bool): | ||
if min_: | ||
maxs = local_minimum(arr) | ||
else: | ||
maxs = local_maximum(arr) | ||
max_st = 0 | ||
per_day_maxs = [] | ||
for i in range(24, maxs[-1], 24): | ||
curr_max_st = max_st | ||
day_maxs = [] | ||
while maxs[curr_max_st] < i: | ||
day_maxs.append(maxs[curr_max_st]) | ||
curr_max_st += 1 | ||
|
||
max_st = curr_max_st | ||
|
||
if len(day_maxs) == 1: | ||
per_day_maxs.append(day_maxs[0]) | ||
continue | ||
elif len(day_maxs) == 0: | ||
continue | ||
|
||
if min_: | ||
per_day_maxs.append(day_maxs[np.argmin(arr[day_maxs])]) | ||
else: | ||
per_day_maxs.append(day_maxs[np.argmax(arr[day_maxs])]) | ||
|
||
return np.array(per_day_maxs) | ||
|
||
|
||
def _per_day_minimum(arr): | ||
return _per_day_extremum(arr, min_=True) | ||
|
||
|
||
def _per_day_maximum(arr): | ||
return _per_day_extremum(arr, min_=False) | ||
|
||
|
||
def _get_start_of_days(df): | ||
maxs = _per_day_maximum(df.values) | ||
mins = _per_day_minimum(df.values) | ||
start_of_day = [] | ||
offset_from_max = [] | ||
|
||
mins_st = 0 | ||
maxs_st = 0 | ||
while maxs_st < len(maxs) and mins_st < len(mins): | ||
if mins[mins_st] < maxs[maxs_st]: | ||
mins_st += 1 | ||
continue | ||
|
||
start_of_day.append(maxs[maxs_st] + (mins[mins_st] - maxs[maxs_st]) // 2) | ||
offset_from_max.append((mins[mins_st] - maxs[maxs_st]) // 5 * 2) | ||
maxs_st += 1 | ||
|
||
while maxs_st + 1 < len(maxs) and maxs[maxs_st + 1] < mins[mins_st]: | ||
maxs_st += 1 | ||
|
||
start_of_day = np.array(start_of_day) | ||
offset_from_max = np.array(offset_from_max) | ||
return start_of_day | ||
|
||
|
||
def _get_day_diff(df: pd.DataFrame, day: datetime.date): | ||
first_day = df.iloc[0].name.to_pydatetime() | ||
return (datetime.datetime(day.year, day.month, day.day, first_day.hour) - first_day).days | ||
|
||
|
||
def get_day_load(df: pd.DataFrame, day: datetime.date): | ||
start_of_day = _get_start_of_days(df) | ||
st = start_of_day[_get_day_diff(df, day)] | ||
return df.iloc[st:st + 24] | ||
|
||
|
||
def get_interval_load(df: pd.DataFrame, st_day: datetime.date, en_day: datetime.date): | ||
start_of_day = _get_start_of_days(df) | ||
st = start_of_day[_get_day_diff(df, st_day)] | ||
en = start_of_day[_get_day_diff(df, en_day)] | ||
return df.iloc[st:en + 24] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,56 @@ | ||
from pathlib import Path | ||
import typing as t | ||
|
||
import torch | ||
import numpy as np | ||
import pandas as pd | ||
import pytorch_lightning as pl | ||
from torch.utils.data import Dataset, DataLoader | ||
|
||
class TimeseriesDataset(Dataset): | ||
''' | ||
Custom Dataset subclass. | ||
Serves as input to DataLoader to transform X | ||
into sequence data using rolling window. | ||
DataLoader using this dataset will output batches | ||
of `(batch_size, seq_len, n_features)` shape. | ||
Suitable as an input to RNNs. | ||
from: https://www.kaggle.com/tartakovsky/pytorch-lightning-lstm-timeseries-clean-code | ||
''' | ||
def __init__(self, X: np.ndarray, y: np.ndarray, seq_len: int = 1): | ||
self.X = torch.tensor(X).float() | ||
self.y = torch.tensor(y).float() | ||
self.seq_len = seq_len | ||
|
||
def __len__(self): | ||
return self.X.__len__() - (self.seq_len-1) | ||
|
||
def __getitem__(self, index): | ||
return (self.X[index:index+self.seq_len], self.y[index+self.seq_len-1]) | ||
|
||
class SwedenLoadDataModule(pl.LightningDataModule): | ||
def __init__(self, filepath: Path = Path('data/sweden_load_2005_2017.csv'), batch_size: int = 32): | ||
super().__init__() | ||
self.filepath = filepath | ||
df = pd.read_csv(filepath) | ||
df['cet_cest_timestamp'] = df['cet_cest_timestamp'].apply(lambda x: x.replace(tzinfo=None)) | ||
df = df.rename({'cet_cest_timestamp': 'time', 'SE_load_actual_tso': 'load'}, axis=1) | ||
self.df = df | ||
|
||
def setup(self, stage=None): | ||
if stage == 'fit' and self.X_train is not None: | ||
return | ||
if stage == 'test' and self.X_test is not None: | ||
return | ||
if stage is None and self.X_train is not None and self.X_test is not None: | ||
return | ||
|
||
def train_dataloader(self) -> DataLoader: | ||
... | ||
|
||
def val_dataloader(self) -> t.Union[DataLoader, t.List[DataLoader]]: | ||
... | ||
|
||
def test_dataloader(self) -> t.Union[DataLoader, t.List[DataLoader]]: | ||
... |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,103 @@ | ||
import traceback | ||
import logging | ||
import typing as t | ||
|
||
import os | ||
import shutil | ||
import matplotlib.pyplot as plt | ||
|
||
from pathlib import Path | ||
|
||
import pandas as pd | ||
import numpy as np | ||
import altair as alt | ||
import seaborn as sns | ||
|
||
logger = logging.getLogger() | ||
logger.setLevel(logging.INFO) | ||
|
||
alt.data_transformers.disable_max_rows() | ||
sns.set_theme() | ||
|
||
start_date: t.Final[str] = '2017-11-06' | ||
|
||
|
||
def get_load_n_days(load_path: Path, n: int = 1): | ||
if n > 25: | ||
raise ValueError('max 25 days') | ||
df = pd.read_csv(load_path, parse_dates=['cet_cest_timestamp']) | ||
df['cet_cest_timestamp'] = df['cet_cest_timestamp'].apply(lambda x: x.replace(tzinfo=None)) | ||
|
||
df_tmp = df.loc[(df['cet_cest_timestamp'] >= pd.to_datetime(start_date)) | ||
& (df['cet_cest_timestamp'] < pd.to_datetime(f'{start_date[:3]}-{6+n:02}'))].rename(columns={ | ||
"cet_cest_timestamp": 'time', | ||
'SE_load_actual_tso': 'load' | ||
}).set_index('time') | ||
|
||
week_load = df_tmp.resample('10S').asfreq().interpolate() | ||
|
||
return week_load | ||
|
||
def get_discharging_charging_bounds(load, charging_percentile, discharging_percentile): | ||
discharging_bound = np.percentile(load.load, q=discharging_percentile, interpolation='linear') | ||
charging_bound = np.percentile(load.load, q=charging_percentile, interpolation='linear') | ||
return discharging_bound, charging_bound | ||
|
||
def get_expected_load(load, charging_percentile, discharging_percentile): | ||
discharging_bound, charging_bound = get_discharging_charging_bounds(load, charging_percentile, discharging_percentile) | ||
expected_week_load = load.copy() | ||
expected_week_load.load = np.minimum(expected_week_load, discharging_bound) | ||
expected_week_load.load = np.maximum(expected_week_load, charging_bound) | ||
return expected_week_load | ||
|
||
def visualize_load(load): | ||
return alt.Chart(load.reset_index()).encode( | ||
x='time:T', | ||
y='load:Q' | ||
).mark_line() | ||
|
||
def visualize_charging_discharging(load, charging_percentile, discharging_percentile): | ||
def red_line_chart(y): | ||
return alt.Chart(pd.DataFrame({'y': [y]})).encode(y='y:Q').mark_rule(color='red') | ||
|
||
# base_load = load.min().values[0] | ||
discharging_bound, charging_bound = get_discharging_charging_bounds(load, charging_percentile, discharging_percentile) | ||
|
||
return alt.Chart(load.reset_index()).encode(x='time:T', | ||
y='load:Q').mark_line() + \ | ||
red_line_chart(charging_bound) + red_line_chart(discharging_bound) | ||
|
||
sweden_load_path = Path('../ipynotebooks/data/sweden_load_2005_2017.csv') | ||
day_load = get_load_n_days(sweden_load_path, 1) | ||
week_load = get_load_n_days(sweden_load_path, 7) | ||
|
||
num_of_batteries = 40 | ||
|
||
nominal_v = 3.3 | ||
capacity_wh = 400*num_of_batteries | ||
capacity_ah = capacity_wh/nominal_v | ||
|
||
max_charging_wh = 100*num_of_batteries | ||
max_charging_ah = max_charging_wh/nominal_v | ||
|
||
c_rate_num = max_charging_ah/capacity_ah | ||
|
||
# step = 10 | ||
# load_df = df_tmp.resample(f'{step}S').asfreq().interpolate() | ||
load_df = week_load | ||
|
||
def get_power_load(load, time: int): | ||
start_time = pd.to_datetime(f'{start_date} 00:00:00') | ||
time = start_time + pd.to_timedelta(time, 's') | ||
return load_df.iloc[load_df.index.get_loc(time, method='nearest')].values[0] | ||
|
||
def get_per_mw_price(load, time: int): | ||
pload = get_power_load(load, time) | ||
return 5 + 0.5 * pload + 0.05*pload**2 | ||
|
||
def get_energy_mwh_required(load, time: int, step: int): | ||
pload = get_power_load(load, time) | ||
return pload*step/1000/3600 | ||
|
||
|
||
# visualize_charging_discharging(week_load, 25, 75) |
Oops, something went wrong.