Skip to content

Commit

Permalink
Add more loss types and options (#385)
Browse files Browse the repository at this point in the history
  • Loading branch information
frostedoyster authored Nov 12, 2024
1 parent f6d4b64 commit 36de384
Show file tree
Hide file tree
Showing 10 changed files with 335 additions and 45 deletions.
18 changes: 13 additions & 5 deletions docs/src/architectures/alchemical-model.rst
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,19 @@ hyperparameters to tune are (in decreasing order of importance):
This hyperparameter controls the size and depth of the descriptors and the neural
network. In general, increasing this might lead to better accuracy,
especially on larger datasets, at the cost of increased training and evaluation time.
- ``loss_weights``: This controls the weighting of different contributions to the loss
(e.g., energy, forces, virial, etc.). The default values work well for most datasets,
but they might need to be adjusted. For example, to set a weight of 1.0 for the energy
and 0.1 for the forces, you can set the following in the ``options.yaml`` file:
``loss_weights: {"energy": 1.0, "forces": 0.1}``.
- ``loss``: This section describes the loss function to be used, and it has three
subsections. 1. ``weights``. This controls the weighting of different contributions
to the loss (e.g., energy, forces, virial, etc.). The default values of 1.0 for all
targets work well for most datasets, but they might need to be adjusted. For example,
to set a weight of 1.0 for the energy and 0.1 for the forces, you can set the
following in the ``options.yaml`` file under ``loss``:
``weights: {"energy": 1.0, "forces": 0.1}``. 2. ``type``. This controls the type of
loss to be used. The default value is ``mse``, and other options are ``mae`` and
``huber``. ``huber`` is a subsection of its own, and it requires the user to specify
the ``deltas`` parameters in a similar way to how the ``weights`` are specified (e.g.,
``deltas: {"energy": 0.1, "forces": 0.01}``). 3. ``reduction``. This controls how the
loss is reduced over batches. The default value is ``sum``, and the other allowed
option is ``mean``.


Architecture Hyperparameters
Expand Down
18 changes: 13 additions & 5 deletions docs/src/architectures/soap-bpnn.rst
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,22 @@ hyperparameters to tune are (in decreasing order of importance):
- ``radial_scaling`` hyperparameters: These hyperparameters control the radial scaling
of the SOAP descriptor. In general, the default values should work well, but they
might need to be adjusted for specific datasets.
- ``loss_weights``: This controls the weighting of different contributions to the loss
(e.g., energy, forces, virial, etc.). The default values work well for most datasets,
but they might need to be adjusted. For example, to set a weight of 1.0 for the energy
and 0.1 for the forces, you can set the following in the ``options.yaml`` file:
``loss_weights: {"energy": 1.0, "forces": 0.1}``.
- ``layernorm``: Whether to use layer normalization before the neural network. Setting
this hyperparameter to ``false`` will lead to slower convergence of training, but
might lead to better generalization outside of the training set distribution.
- ``loss``: This section describes the loss function to be used, and it has three
subsections. 1. ``weights``. This controls the weighting of different contributions
to the loss (e.g., energy, forces, virial, etc.). The default values of 1.0 for all
targets work well for most datasets, but they might need to be adjusted. For example,
to set a weight of 1.0 for the energy and 0.1 for the forces, you can set the
following in the ``options.yaml`` file under ``loss``:
``weights: {"energy": 1.0, "forces": 0.1}``. 2. ``type``. This controls the type of
loss to be used. The default value is ``mse``, and other options are ``mae`` and
``huber``. ``huber`` is a subsection of its own, and it requires the user to specify
the ``deltas`` parameters in a similar way to how the ``weights`` are specified (e.g.,
``deltas: {"energy": 0.1, "forces": 0.01}``). 3. ``reduction``. This controls how the
loss is reduced over batches. The default value is ``sum``, and the other allowed
option is ``mean``.


All Hyperparameters
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,8 @@ architecture:
log_interval: 5
checkpoint_interval: 25
per_structure_targets: []
loss_weights: {}
log_mae: False
loss:
type: mse
weights: {}
reduction: sum
54 changes: 47 additions & 7 deletions src/metatrain/experimental/alchemical_model/schema-hypers.json
Original file line number Diff line number Diff line change
Expand Up @@ -93,17 +93,57 @@
"type": "string"
}
},
"loss_weights": {
"log_mae": {
"type": "boolean"
},
"loss": {
"type": "object",
"patternProperties": {
".*": {
"type": "number"
"properties": {
"weights": {
"type": "object",
"patternProperties": {
".*": {
"type": "number"
}
},
"additionalProperties": false
},
"reduction": {
"type": "string",
"enum": ["sum", "mean", "none"]
},
"type": {
"oneOf": [
{
"type": "string",
"enum": ["mse", "mae"]
},
{
"type": "object",
"properties": {
"huber": {
"type": "object",
"properties": {
"deltas": {
"type": "object",
"patternProperties": {
".*": {
"type": "number"
}
},
"additionalProperties": false
}
},
"required": ["deltas"],
"additionalProperties": false
}
},
"additionalProperties": false
}
]
}
},
"additionalProperties": false
},
"log_mae": {
"type": "boolean"
}
},
"additionalProperties": false
Expand Down
12 changes: 9 additions & 3 deletions src/metatrain/experimental/alchemical_model/trainer.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import copy
import logging
from pathlib import Path
from typing import List, Union
Expand Down Expand Up @@ -175,21 +176,26 @@ def train(
loss_weights_dict = {}
for output_name in outputs_list:
loss_weights_dict[output_name] = (
self.hypers["loss_weights"][
self.hypers["loss"]["weights"][
to_external_name(output_name, model.outputs)
]
if to_external_name(output_name, model.outputs)
in self.hypers["loss_weights"]
in self.hypers["loss"]["weights"]
else 1.0
)
loss_weights_dict_external = {
to_external_name(key, model.outputs): value
for key, value in loss_weights_dict.items()
}
# Update the loss weights in the hypers:
loss_hypers = copy.deepcopy(self.hypers["loss"])
loss_hypers["weights"] = loss_weights_dict
logging.info(f"Training with loss weights: {loss_weights_dict_external}")

# Create a loss function:
loss_fn = TensorMapDictLoss(loss_weights_dict)
loss_fn = TensorMapDictLoss(
**loss_hypers,
)

# Create an optimizer:
optimizer = torch.optim.Adam(
Expand Down
5 changes: 4 additions & 1 deletion src/metatrain/experimental/soap_bpnn/default-hypers.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,5 +35,8 @@ architecture:
checkpoint_interval: 25
fixed_composition_weights: {}
per_structure_targets: []
loss_weights: {}
log_mae: False
loss:
type: mse
weights: {}
reduction: sum
54 changes: 47 additions & 7 deletions src/metatrain/experimental/soap_bpnn/schema-hypers.json
Original file line number Diff line number Diff line change
Expand Up @@ -141,17 +141,57 @@
"type": "string"
}
},
"loss_weights": {
"log_mae": {
"type": "boolean"
},
"loss": {
"type": "object",
"patternProperties": {
".*": {
"type": "number"
"properties": {
"weights": {
"type": "object",
"patternProperties": {
".*": {
"type": "number"
}
},
"additionalProperties": false
},
"reduction": {
"type": "string",
"enum": ["sum", "mean", "none"]
},
"type": {
"oneOf": [
{
"type": "string",
"enum": ["mse", "mae"]
},
{
"type": "object",
"properties": {
"huber": {
"type": "object",
"properties": {
"deltas": {
"type": "object",
"patternProperties": {
".*": {
"type": "number"
}
},
"additionalProperties": false
}
},
"required": ["deltas"],
"additionalProperties": false
}
},
"additionalProperties": false
}
]
}
},
"additionalProperties": false
},
"log_mae": {
"type": "boolean"
}
},
"additionalProperties": false
Expand Down
11 changes: 8 additions & 3 deletions src/metatrain/experimental/soap_bpnn/trainer.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import copy
import logging
import warnings
from pathlib import Path
Expand Down Expand Up @@ -191,21 +192,25 @@ def train(
loss_weights_dict = {}
for output_name in outputs_list:
loss_weights_dict[output_name] = (
self.hypers["loss_weights"][
self.hypers["loss"]["weights"][
to_external_name(output_name, train_targets)
]
if to_external_name(output_name, train_targets)
in self.hypers["loss_weights"]
in self.hypers["loss"]["weights"]
else 1.0
)
loss_weights_dict_external = {
to_external_name(key, train_targets): value
for key, value in loss_weights_dict.items()
}
loss_hypers = copy.deepcopy(self.hypers["loss"])
loss_hypers["weights"] = loss_weights_dict
logging.info(f"Training with loss weights: {loss_weights_dict_external}")

# Create a loss function:
loss_fn = TensorMapDictLoss(loss_weights_dict)
loss_fn = TensorMapDictLoss(
**loss_hypers,
)

# Create an optimizer:
optimizer = torch.optim.Adam(
Expand Down
64 changes: 59 additions & 5 deletions src/metatrain/utils/loss.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,10 @@
from typing import Dict, Optional, Tuple
from typing import Dict, Optional, Tuple, Union

import torch
from metatensor.torch import TensorMap
from omegaconf import DictConfig

from metatrain.utils.external_naming import to_internal_name


# This file defines losses for metatensor models.
Expand Down Expand Up @@ -30,10 +33,34 @@ def __init__(
reduction: str = "sum",
weight: float = 1.0,
gradient_weights: Optional[Dict[str, float]] = None,
type: Union[str, dict] = "mse",
):
self.loss = torch.nn.MSELoss(reduction=reduction)
if gradient_weights is None:
gradient_weights = {}

losses = {}
if type == "mse":
losses["values"] = torch.nn.MSELoss(reduction=reduction)
for key in gradient_weights.keys():
losses[key] = torch.nn.MSELoss(reduction=reduction)
elif type == "mae":
losses["values"] = torch.nn.L1Loss(reduction=reduction)
for key in gradient_weights.keys():
losses[key] = torch.nn.L1Loss(reduction=reduction)
elif isinstance(type, dict) and "huber" in type:
# Huber loss
deltas = type["huber"]["deltas"]
losses["values"] = torch.nn.HuberLoss(
reduction=reduction, delta=deltas["values"]
)
for key in gradient_weights.keys():
losses[key] = torch.nn.HuberLoss(reduction=reduction, delta=deltas[key])
else:
raise ValueError(f"Unknown loss type: {type}")

self.losses = losses
self.weight = weight
self.gradient_weights = {} if gradient_weights is None else gradient_weights
self.gradient_weights = gradient_weights

def __call__(
self, tensor_map_1: TensorMap, tensor_map_2: TensorMap
Expand Down Expand Up @@ -97,12 +124,12 @@ def __call__(

values_1 = tensor_map_1.block().values
values_2 = tensor_map_2.block().values
loss += self.weight * self.loss(values_1, values_2)
loss += self.weight * self.losses["values"](values_1, values_2)

for gradient_name, gradient_weight in self.gradient_weights.items():
values_1 = tensor_map_1.block().gradient(gradient_name).values
values_2 = tensor_map_2.block().gradient(gradient_name).values
loss += gradient_weight * self.loss(values_1, values_2)
loss += gradient_weight * self.losses[gradient_name](values_1, values_2)

return loss

Expand All @@ -129,6 +156,7 @@ def __init__(
self,
weights: Dict[str, float],
reduction: str = "sum",
type: Union[str, dict] = "mse",
):
outputs = [key for key in weights.keys() if "gradients" not in key]
self.losses = {}
Expand All @@ -141,10 +169,12 @@ def __init__(
"_gradients", ""
)
gradient_weights[gradient_name] = weight
type_output = _process_type(type, output)
self.losses[output] = TensorMapLoss(
reduction=reduction,
weight=value_weight,
gradient_weights=gradient_weights,
type=type_output,
)

def __call__(
Expand All @@ -167,3 +197,27 @@ def __call__(
loss += target_loss

return loss


def _process_type(type: Union[str, DictConfig], output: str) -> Union[str, dict]:
if not isinstance(type, str):
assert "huber" in type
# we process the Huber loss delta dict to make it similar to the
# `weights` dict
type_output = {"huber": {"deltas": {}}} # type: ignore
for key, delta in type["huber"]["deltas"].items():
key_internal = to_internal_name(key)
if key_internal == output:
type_output["huber"]["deltas"]["values"] = delta
elif key_internal.startswith(output) and key_internal.endswith(
"_gradients"
):
gradient_name = key_internal.replace(f"{output}_", "").replace(
"_gradients", ""
)
type_output["huber"]["deltas"][gradient_name] = delta
else:
pass
else:
type_output = type # type: ignore
return type_output
Loading

0 comments on commit 36de384

Please sign in to comment.