Skip to content

Commit

Permalink
[MNT] fix pre-commit failures on main (#1696)
Browse files Browse the repository at this point in the history
Cleaned up the offending items so `pre-commit` passes on main.

Fixes #1695

### Description

This quick fix sorts out the files causing `pre-commit` to fail on
`main`, so it can run again.
  • Loading branch information
ewth authored Oct 10, 2024
1 parent 53a1c41 commit 63fadd1
Show file tree
Hide file tree
Showing 5 changed files with 6 additions and 6 deletions.
4 changes: 2 additions & 2 deletions docs/source/tutorials/stallion.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -979,7 +979,7 @@
" # reduce learning rate if no improvement in validation loss after x epochs\n",
" # reduce_on_plateau_patience=1000,\n",
")\n",
"print(f\"Number of parameters in network: {tft.size()/1e3:.1f}k\")"
"print(f\"Number of parameters in network: {tft.size() / 1e3:.1f}k\")"
]
},
{
Expand Down Expand Up @@ -1138,7 +1138,7 @@
" optimizer=\"Ranger\",\n",
" reduce_on_plateau_patience=4,\n",
")\n",
"print(f\"Number of parameters in network: {tft.size()/1e3:.1f}k\")"
"print(f\"Number of parameters in network: {tft.size() / 1e3:.1f}k\")"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion examples/ar.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@
log_val_interval=3,
# reduce_on_plateau_patience=3,
)
print(f"Number of parameters in network: {deepar.size()/1e3:.1f}k")
print(f"Number of parameters in network: {deepar.size() / 1e3:.1f}k")

# # find optimal learning rate
# deepar.hparams.log_interval = -1
Expand Down
2 changes: 1 addition & 1 deletion examples/nbeats.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@
net = NBeats.from_dataset(
training, learning_rate=3e-2, log_interval=10, log_val_interval=1, log_gradient_flow=False, weight_decay=1e-2
)
print(f"Number of parameters in network: {net.size()/1e3:.1f}k")
print(f"Number of parameters in network: {net.size() / 1e3:.1f}k")

# # find optimal learning rate
# # remove logging and artificial epoch size
Expand Down
2 changes: 1 addition & 1 deletion examples/stallion.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@
log_val_interval=1,
reduce_on_plateau_patience=3,
)
print(f"Number of parameters in network: {tft.size()/1e3:.1f}k")
print(f"Number of parameters in network: {tft.size() / 1e3:.1f}k")

# # find optimal learning rate
# # remove logging and artificial epoch size
Expand Down
2 changes: 1 addition & 1 deletion pytorch_forecasting/data/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def check_for_nonfinite(tensor: torch.Tensor, names: Union[str, List[str]]) -> t
for name, na in zip(names, nans):
if na > 0:
raise ValueError(
f"{na} ({na/tensor.size(0):.2%}) of {name} "
f"{na} ({na / tensor.size(0):.2%}) of {name} "
"values were found to be NA or infinite (even after encoding). NA values are not allowed "
"`allow_missing_timesteps` refers to missing rows, not to missing values. Possible strategies to "
f"fix the issue are (a) dropping the variable {name}, "
Expand Down

0 comments on commit 63fadd1

Please sign in to comment.