Skip to content

Commit

Permalink
Enable compatibility with torchmetrics >= 0.6.0 (#27)
Browse files Browse the repository at this point in the history
  • Loading branch information
marcovarrone authored Dec 12, 2022
1 parent 47a71b7 commit 7f5acde
Show file tree
Hide file tree
Showing 12 changed files with 59 additions and 35 deletions.
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -86,18 +86,18 @@ class. It's
[**init**](https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.trainer.trainer.html#pytorch_lightning.trainer.trainer.Trainer.__init__)
method provides various configuration options.

If you want to run K-Means with a GPU, you can pass the option `gpus=1` to the estimator's
initializer:
If you want to run K-Means with a GPU, you can pass the options `accelerator='gpu'` and `devices=1`
to the estimator's initializer:

```python
estimator = KMeans(3, trainer_params=dict(gpus=1))
estimator = KMeans(3, trainer_params=dict(accelerator='gpu', devices=1))
```

Similarly, if you want to train on 4 nodes simultaneously where each node has one GPU available,
you can specify this as follows:

```python
estimator = KMeans(3, trainer_params=dict(num_nodes=4, gpus=1))
estimator = KMeans(3, trainer_params=dict(num_nodes=4, accelerator='gpu', devices=1))
```

In fact, **you do not need to change anything else in your code**.
Expand Down
6 changes: 3 additions & 3 deletions docs/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -78,19 +78,19 @@ For GPU- and multi-node training, PyCave leverages PyTorch Lightning. The hardwa
runs on is determined by the :class:`pytorch_lightning.trainer.Trainer` class. It's
:meth:`~pytorch_lightning.trainer.Trainer.__init__` method provides various configuration options.

If you want to run K-Means with a GPU, you can pass the option ``gpus=1`` to the estimator's
If you want to run K-Means with a GPU, you can pass the option ``accelerator='gpu'`` and ``devices=1`` to the estimator's
initializer:

.. code-block:: python
estimator = KMeans(3, trainer_params=dict(gpus=1))
estimator = KMeans(3, trainer_params=dict(accelerator='gpu', devices=1))
Similarly, if you want to train on 4 nodes simultaneously where each node has one GPU available,
you can specify this as follows:

.. code-block:: python
estimator = KMeans(3, trainer_params=dict(num_nodes=4, gpus=1))
estimator = KMeans(3, trainer_params=dict(num_nodes=4, accelerator='gpu', 1))
In fact, **you do not need to change anything else in your code**.

Expand Down
40 changes: 23 additions & 17 deletions poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions pycave/bayes/gmm/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping
from torchmetrics import AverageMeter
from torchmetrics import MeanMetric
from pycave.bayes.core import cholesky_precision
from pycave.utils import NonparametricLightningModule
from .metrics import CovarianceAggregator, MeanAggregator, PriorAggregator
Expand Down Expand Up @@ -65,7 +65,7 @@ def __init__(
)

# Initialize metrics
self.metric_nll = AverageMeter(dist_sync_fn=self.all_gather)
self.metric_nll = MeanMetric(dist_sync_fn=self.all_gather)

def configure_callbacks(self) -> list[pl.Callback]:
if self.convergence_tolerance == 0:
Expand Down
6 changes: 6 additions & 0 deletions pycave/bayes/gmm/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ class PriorAggregator(Metric):
The prior aggregator aggregates component probabilities over batches and process.
"""

full_state_update = False

def __init__(
self,
num_components: int,
Expand All @@ -33,6 +35,8 @@ class MeanAggregator(Metric):
The mean aggregator aggregates component means over batches and processes.
"""

full_state_update = False

def __init__(
self,
num_components: int,
Expand Down Expand Up @@ -63,6 +67,8 @@ class CovarianceAggregator(Metric):
The covariance aggregator aggregates component covariances over batches and processes.
"""

full_state_update = False

def __init__(
self,
num_components: int,
Expand Down
4 changes: 2 additions & 2 deletions pycave/bayes/markov_chain/lightning_module.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import torch
from torch.nn.utils.rnn import PackedSequence
from torchmetrics import AverageMeter
from torchmetrics import MeanMetric
from pycave.bayes.markov_chain.metrics import StateCountAggregator
from pycave.utils import NonparametricLightningModule
from .model import MarkovChainModel
Expand All @@ -27,7 +27,7 @@ def __init__(self, model: MarkovChainModel, symmetric: bool = False):
symmetric=self.symmetric,
dist_sync_fn=self.all_gather,
)
self.metric_nll = AverageMeter(dist_sync_fn=self.all_gather)
self.metric_nll = MeanMetric(dist_sync_fn=self.all_gather)

def on_train_epoch_start(self) -> None:
self.aggregator.reset()
Expand Down
2 changes: 2 additions & 0 deletions pycave/bayes/markov_chain/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ class StateCountAggregator(Metric):
The state count aggregator aggregates initial states and transitions between states.
"""

full_state_update = False

def __init__(
self,
num_states: int,
Expand Down
8 changes: 4 additions & 4 deletions pycave/clustering/kmeans/lightning_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping
from torchmetrics import AverageMeter
from torchmetrics import MeanMetric
from pycave.utils import NonparametricLightningModule
from .metrics import (
BatchAverager,
Expand Down Expand Up @@ -51,7 +51,7 @@ def __init__(
)

# Initialize metrics
self.metric_inertia = AverageMeter()
self.metric_inertia = MeanMetric()

def configure_callbacks(self) -> List[pl.Callback]:
if self.convergence_tolerance == 0:
Expand Down Expand Up @@ -239,8 +239,8 @@ def nonparametric_training_step(self, batch: torch.Tensor, batch_idx: int) -> No

def nonparametric_training_epoch_end(self) -> None:
if self.current_epoch == 0:
choice = self.uniform_sampler.compute()[0]
self.model.centroids[0].copy_(choice)
choice = self.uniform_sampler.compute()
self.model.centroids[0].copy_(choice[0] if choice.dim() > 0 else choice)
elif self._is_current_epoch_sampling:
candidates = self.distance_sampler.compute()
self.centroid_candidates.copy_(candidates)
Expand Down
10 changes: 10 additions & 0 deletions pycave/clustering/kmeans/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ class CentroidAggregator(Metric):
The centroid aggregator aggregates kmeans centroids over batches and processes.
"""

full_state_update = False

def __init__(
self,
num_clusters: int,
Expand Down Expand Up @@ -49,6 +51,8 @@ class UniformSampler(Metric):
they were already sampled from).
"""

full_state_update = False

def __init__(
self,
num_choices: int,
Expand Down Expand Up @@ -109,6 +113,8 @@ class DistanceSampler(Metric):
duplicates.
"""

full_state_update = False

def __init__(
self,
num_choices: int,
Expand Down Expand Up @@ -169,6 +175,8 @@ class BatchSummer(Metric):
Sums the values for a batch of items independently.
"""

full_state_update = True

def __init__(self, num_values: int, *, dist_sync_fn: Optional[Callable[[Any], Any]] = None):
super().__init__(dist_sync_fn=dist_sync_fn) # type: ignore

Expand All @@ -187,6 +195,8 @@ class BatchAverager(Metric):
Averages the values for a batch of items independently.
"""

full_state_update = False

def __init__(
self,
num_values: int,
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ numpy = "^1.20.3"
python = ">=3.8,<3.11"
pytorch-lightning = "^1.6.0"
torch = "^1.8.0"
torchmetrics = "^0.5.1,<0.6.0"
torchmetrics = "^0.6.0"

[tool.poetry.group.pre-commit.dependencies]
black = "^22.12.0"
Expand Down
2 changes: 1 addition & 1 deletion tests/bayes/gmm/benchmark_gmm_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,6 @@ def test_pycave_gpu(
convergence_tolerance=0,
covariance_regularization=1e-3,
batch_size=batch_size,
trainer_params=dict(max_epochs=100, gpus=1),
trainer_params=dict(max_epochs=100, accelerator="gpu", devices=1),
)
benchmark(estimator.fit, data)
2 changes: 1 addition & 1 deletion tests/clustering/kmeans/benchmark_kmeans_estimator.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,6 @@ def test_pycave_gpu(
init_strategy=init_strategy,
batch_size=batch_size,
convergence_tolerance=0,
trainer_params=dict(gpus=1, max_epochs=100),
trainer_params=dict(max_epochs=100, accelerator="gpu", devices=1),
)
benchmark(estimator.fit, data)

0 comments on commit 7f5acde

Please sign in to comment.