From 1716be92af99f04108efd567107d3b9fa8721a8d Mon Sep 17 00:00:00 2001 From: frostedoyster Date: Wed, 12 Jun 2024 23:13:45 +0200 Subject: [PATCH] Change `mtm::` prefix to `mtt::` --- .../advanced-concepts/auxiliary-outputs.rst | 8 ++-- docs/src/advanced-concepts/output-naming.rst | 6 +-- .../getting-started/custom_dataset_conf.rst | 6 +-- .../alchemical_model/tests/test_regression.py | 14 +++---- .../experimental/gap/tests/test_regression.py | 20 ++++----- .../gap/tests/test_torchscript.py | 16 +++---- src/metatrain/experimental/soap_bpnn/model.py | 10 ++--- .../soap_bpnn/tests/test_continue.py | 14 +++---- .../soap_bpnn/tests/test_functionality.py | 16 +++---- .../soap_bpnn/tests/test_regression.py | 18 ++++---- .../experimental/soap_bpnn/trainer.py | 2 +- src/metatrain/utils/data/dataset.py | 6 +-- src/metatrain/utils/data/readers/readers.py | 6 +-- tests/cli/test_train_model.py | 2 +- tests/utils/data/test_combine_dataloaders.py | 30 ++++++------- tests/utils/data/test_dataset.py | 30 ++++++------- tests/utils/data/test_readers.py | 4 +- tests/utils/test_export.py | 4 +- tests/utils/test_external_naming.py | 42 +++++++++---------- 19 files changed, 127 insertions(+), 127 deletions(-) diff --git a/docs/src/advanced-concepts/auxiliary-outputs.rst b/docs/src/advanced-concepts/auxiliary-outputs.rst index 7eb934793..ad8d24a02 100644 --- a/docs/src/advanced-concepts/auxiliary-outputs.rst +++ b/docs/src/advanced-concepts/auxiliary-outputs.rst @@ -1,7 +1,7 @@ Auxiliary outputs ================= -These outputs, which are idenfified by the ``mtm::aux::`` prefix, +These outputs, which are idenfified by the ``mtt::aux::`` prefix, represent additional information that the model may provide. They are not conventional trainable outputs, and they often correspond to internal information that the model is capable of providing, such as its internal @@ -10,7 +10,7 @@ representation. The following auxiliary outputs that are currently supported by one or more architectures in the library: -- ``mtm::aux::last_layer_features``: The internal representation +- ``mtt::aux::last_layer_features``: The internal representation of the model at the last layer, before the final linear transformation. The following table shows the architectures that support each of the @@ -19,13 +19,13 @@ auxiliary outputs: +------------------------------------------+-----------+------------------+-----+ | Auxiliary output | SOAP-BPNN | Alchemical Model | PET | +------------------------------------------+-----------+------------------+-----+ -| ``mtm::aux::last_layer_features`` | Yes | No | No | +| ``mtt::aux::last_layer_features`` | Yes | No | No | +------------------------------------------+-----------+------------------+-----+ The following tables show the metadata that is expected for each of the auxiliary outputs: -mtm::aux::last_layer_features +mtt::aux::last_layer_features ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. list-table:: Metadata for last-layer features diff --git a/docs/src/advanced-concepts/output-naming.rst b/docs/src/advanced-concepts/output-naming.rst index c4453a3e1..29bb131cd 100644 --- a/docs/src/advanced-concepts/output-naming.rst +++ b/docs/src/advanced-concepts/output-naming.rst @@ -7,9 +7,9 @@ https://lab-cosmo.github.io/metatensor/latest/atomistic/outputs.html>`_ package. An immediate example is given by the ``energy`` output. Any additional outputs present within the library are denoted by the -``mtm::`` prefix. For example, some models can output their last-layer -features, which are named as ``mtm::aux::last_layer_features``, where +``mtt::`` prefix. For example, some models can output their last-layer +features, which are named as ``mtt::aux::last_layer_features``, where ``aux`` denotes an auxiliary output. Outputs that are specific to a particular model should be named as -``mtm::::``. +``mtt::::``. diff --git a/docs/src/getting-started/custom_dataset_conf.rst b/docs/src/getting-started/custom_dataset_conf.rst index 3aeec77a4..1102c7362 100644 --- a/docs/src/getting-started/custom_dataset_conf.rst +++ b/docs/src/getting-started/custom_dataset_conf.rst @@ -88,11 +88,11 @@ Allows defining multiple target sections, each with a unique name. - Commonly, a section named ``energy`` should be defined, which is essential for running molecular dynamics simulations. For the ``energy`` section gradients like `forces` and `stress` are enabled by default. -- Other target sections can also be defined, as long as they are prefixed by ``mtm::``. - For example, ``mtm::free_energy``. In general, all targets that are not standard +- Other target sections can also be defined, as long as they are prefixed by ``mtt::``. + For example, ``mtt::free_energy``. In general, all targets that are not standard outputs of ``metatensor.torch.atomistic`` (see https://docs.metatensor.org/latest/atomistic/outputs.html) should be prefixed by - ``mtm::``. + ``mtt::``. Target section parameters include: diff --git a/src/metatrain/experimental/alchemical_model/tests/test_regression.py b/src/metatrain/experimental/alchemical_model/tests/test_regression.py index a7cc17ef8..38f2f871e 100644 --- a/src/metatrain/experimental/alchemical_model/tests/test_regression.py +++ b/src/metatrain/experimental/alchemical_model/tests/test_regression.py @@ -29,7 +29,7 @@ def test_regression_init(): """Perform a regression test on the model at initialization""" targets = TargetInfoDict() - targets["mtm::U0"] = TargetInfo(quantity="energy", unit="eV") + targets["mtt::U0"] = TargetInfo(quantity="energy", unit="eV") dataset_info = DatasetInfo( length_unit="Angstrom", atomic_types={1, 6, 7, 8}, targets=targets @@ -64,10 +64,10 @@ def test_regression_init(): # if you need to change the hardcoded values: # torch.set_printoptions(precision=12) - # print(output["mtm::U0"].block().values) + # print(output["mtt::U0"].block().values) torch.testing.assert_close( - output["mtm::U0"].block().values, + output["mtt::U0"].block().values, expected_output, ) @@ -79,7 +79,7 @@ def test_regression_train(): systems = read_systems(DATASET_PATH) conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": DATASET_PATH, "file_format": ".xyz", @@ -91,7 +91,7 @@ def test_regression_train(): } } targets, target_info_dict = read_targets(OmegaConf.create(conf)) - dataset = Dataset({"system": systems, "mtm::U0": targets["mtm::U0"]}) + dataset = Dataset({"system": systems, "mtt::U0": targets["mtt::U0"]}) hypers = DEFAULT_HYPERS.copy() @@ -131,9 +131,9 @@ def test_regression_train(): # if you need to change the hardcoded values: # torch.set_printoptions(precision=12) - # print(output["mtm::U0"].block().values) + # print(output["mtt::U0"].block().values) torch.testing.assert_close( - output["mtm::U0"].block().values, + output["mtt::U0"].block().values, expected_output, ) diff --git a/src/metatrain/experimental/gap/tests/test_regression.py b/src/metatrain/experimental/gap/tests/test_regression.py index d648f21a4..68d3302c2 100644 --- a/src/metatrain/experimental/gap/tests/test_regression.py +++ b/src/metatrain/experimental/gap/tests/test_regression.py @@ -26,7 +26,7 @@ def test_regression_init(): """Perform a regression test on the model at initialization""" targets = TargetInfoDict() - targets["mtm::U0"] = TargetInfo(quantity="energy", unit="eV") + targets["mtt::U0"] = TargetInfo(quantity="energy", unit="eV") dataset_info = DatasetInfo( length_unit="Angstrom", atomic_types={1, 6, 7, 8}, targets=targets @@ -43,7 +43,7 @@ def test_regression_train_and_invariance(): systems = read_systems(DATASET_PATH, dtype=torch.float64) conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": DATASET_PATH, "file_format": ".xyz", @@ -55,10 +55,10 @@ def test_regression_train_and_invariance(): } } targets, _ = read_targets(OmegaConf.create(conf), dtype=torch.float64) - dataset = Dataset({"system": systems, "mtm::U0": targets["mtm::U0"]}) + dataset = Dataset({"system": systems, "mtt::U0": targets["mtt::U0"]}) target_info_dict = TargetInfoDict() - target_info_dict["mtm::U0"] = TargetInfo(quantity="energy", unit="eV") + target_info_dict["mtt::U0"] = TargetInfo(quantity="energy", unit="eV") dataset_info = DatasetInfo( length_unit="Angstrom", atomic_types={1, 6, 7, 8}, targets=target_info_dict @@ -69,13 +69,13 @@ def test_regression_train_and_invariance(): trainer.train(gap, [torch.device("cpu")], [dataset], [dataset], ".") # Predict on the first five systems - output = gap(systems[:5], {"mtm::U0": gap.outputs["mtm::U0"]}) + output = gap(systems[:5], {"mtt::U0": gap.outputs["mtt::U0"]}) expected_output = torch.tensor( [[-40.5891], [-56.7122], [-76.4146], [-77.3364], [-93.4905]] ) - assert torch.allclose(output["mtm::U0"].block().values, expected_output, rtol=0.3) + assert torch.allclose(output["mtt::U0"].block().values, expected_output, rtol=0.3) # Tests that the model is rotationally invariant system = ase.io.read(DATASET_PATH) @@ -86,16 +86,16 @@ def test_regression_train_and_invariance(): original_output = gap( [metatensor.torch.atomistic.systems_to_torch(original_system)], - {"mtm::U0": gap.outputs["mtm::U0"]}, + {"mtt::U0": gap.outputs["mtt::U0"]}, ) rotated_output = gap( [metatensor.torch.atomistic.systems_to_torch(system)], - {"mtm::U0": gap.outputs["mtm::U0"]}, + {"mtt::U0": gap.outputs["mtt::U0"]}, ) assert torch.allclose( - original_output["mtm::U0"].block().values, - rotated_output["mtm::U0"].block().values, + original_output["mtt::U0"].block().values, + rotated_output["mtt::U0"].block().values, ) diff --git a/src/metatrain/experimental/gap/tests/test_torchscript.py b/src/metatrain/experimental/gap/tests/test_torchscript.py index 7f72f05e2..b6f712f84 100644 --- a/src/metatrain/experimental/gap/tests/test_torchscript.py +++ b/src/metatrain/experimental/gap/tests/test_torchscript.py @@ -14,13 +14,13 @@ def test_torchscript(): """Tests that the model can be jitted.""" target_info_dict = TargetInfoDict() - target_info_dict["mtm::U0"] = TargetInfo(quantity="energy", unit="eV") + target_info_dict["mtt::U0"] = TargetInfo(quantity="energy", unit="eV") dataset_info = DatasetInfo( length_unit="Angstrom", atomic_types={1, 6, 7, 8}, targets=target_info_dict ) conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": DATASET_PATH, "file_format": ".xyz", @@ -36,7 +36,7 @@ def test_torchscript(): # for system in systems: # system.types = torch.ones(len(system.types), dtype=torch.int32) - dataset = Dataset({"system": systems, "mtm::U0": targets["mtm::U0"]}) + dataset = Dataset({"system": systems, "mtt::U0": targets["mtt::U0"]}) hypers = DEFAULT_HYPERS.copy() gap = GAP(DEFAULT_HYPERS["model"], dataset_info) @@ -44,21 +44,21 @@ def test_torchscript(): trainer.train(gap, [torch.device("cpu")], [dataset], [dataset], ".") scripted_gap = torch.jit.script(gap) - ref_output = gap.forward(systems[:5], {"mtm::U0": gap.outputs["mtm::U0"]}) + ref_output = gap.forward(systems[:5], {"mtt::U0": gap.outputs["mtt::U0"]}) scripted_output = scripted_gap.forward( - systems[:5], {"mtm::U0": gap.outputs["mtm::U0"]} + systems[:5], {"mtt::U0": gap.outputs["mtt::U0"]} ) assert torch.allclose( - ref_output["mtm::U0"].block().values, - scripted_output["mtm::U0"].block().values, + ref_output["mtt::U0"].block().values, + scripted_output["mtt::U0"].block().values, ) def test_torchscript_save(): """Tests that the model can be jitted and saved.""" targets = TargetInfoDict() - targets["mtm::U0"] = TargetInfo(quantity="energy", unit="eV") + targets["mtt::U0"] = TargetInfo(quantity="energy", unit="eV") dataset_info = DatasetInfo( length_unit="Angstrom", atomic_types={1, 6, 7, 8}, targets=targets diff --git a/src/metatrain/experimental/soap_bpnn/model.py b/src/metatrain/experimental/soap_bpnn/model.py index 04409a091..37e742c68 100644 --- a/src/metatrain/experimental/soap_bpnn/model.py +++ b/src/metatrain/experimental/soap_bpnn/model.py @@ -120,7 +120,7 @@ def __init__(self, model_hypers: Dict, dataset_info: DatasetInfo) -> None: } # the model is always capable of outputting the last layer features - self.outputs["mtm::aux::last_layer_features"] = ModelOutput(per_atom=True) + self.outputs["mtt::aux::last_layer_features"] = ModelOutput(per_atom=True) # creates a composition weight tensor that can be directly indexed by species, # this can be left as a tensor of zero or set from the outside using @@ -189,7 +189,7 @@ def __init__(self, model_hypers: Dict, dataset_info: DatasetInfo) -> None: ], ) for output_name in self.outputs.keys() - if "mtm::aux::" not in output_name + if "mtt::aux::" not in output_name } ) @@ -243,14 +243,14 @@ def forward( last_layer_features = self.bpnn(soap_features) # output the hidden features, if requested: - if "mtm::aux::last_layer_features" in outputs: - last_layer_features_options = outputs["mtm::aux::last_layer_features"] + if "mtt::aux::last_layer_features" in outputs: + last_layer_features_options = outputs["mtt::aux::last_layer_features"] out_features = last_layer_features.keys_to_properties( self.center_type_labels.to(device) ) if not last_layer_features_options.per_atom: out_features = metatensor.torch.sum_over_samples(out_features, ["atom"]) - return_dict["mtm::aux::last_layer_features"] = ( + return_dict["mtt::aux::last_layer_features"] = ( _remove_center_type_from_properties(out_features) ) diff --git a/src/metatrain/experimental/soap_bpnn/tests/test_continue.py b/src/metatrain/experimental/soap_bpnn/tests/test_continue.py index 4e2c275a8..19c740da8 100644 --- a/src/metatrain/experimental/soap_bpnn/tests/test_continue.py +++ b/src/metatrain/experimental/soap_bpnn/tests/test_continue.py @@ -22,16 +22,16 @@ def test_continue(monkeypatch, tmp_path): systems = read_systems(DATASET_PATH) target_info_dict = TargetInfoDict() - target_info_dict["mtm::U0"] = TargetInfo(quantity="energy", unit="eV") + target_info_dict["mtt::U0"] = TargetInfo(quantity="energy", unit="eV") dataset_info = DatasetInfo( length_unit="Angstrom", atomic_types={1, 6, 7, 8}, targets=target_info_dict ) model = SoapBpnn(MODEL_HYPERS, dataset_info) - output_before = model(systems[:5], {"mtm::U0": model.outputs["mtm::U0"]}) + output_before = model(systems[:5], {"mtt::U0": model.outputs["mtt::U0"]}) conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": DATASET_PATH, "file_format": ".xyz", @@ -43,7 +43,7 @@ def test_continue(monkeypatch, tmp_path): } } targets, _ = read_targets(OmegaConf.create(conf)) - dataset = Dataset({"system": systems, "mtm::U0": targets["mtm::U0"]}) + dataset = Dataset({"system": systems, "mtt::U0": targets["mtt::U0"]}) hypers = DEFAULT_HYPERS.copy() hypers["training"]["num_epochs"] = 0 @@ -57,8 +57,8 @@ def test_continue(monkeypatch, tmp_path): # Predict on the first five systems output_before = model_before( - systems[:5], {"mtm::U0": model_before.outputs["mtm::U0"]} + systems[:5], {"mtt::U0": model_before.outputs["mtt::U0"]} ) - output_after = model_after(systems[:5], {"mtm::U0": model_after.outputs["mtm::U0"]}) + output_after = model_after(systems[:5], {"mtt::U0": model_after.outputs["mtt::U0"]}) - assert metatensor.torch.allclose(output_before["mtm::U0"], output_after["mtm::U0"]) + assert metatensor.torch.allclose(output_before["mtt::U0"], output_after["mtt::U0"]) diff --git a/src/metatrain/experimental/soap_bpnn/tests/test_functionality.py b/src/metatrain/experimental/soap_bpnn/tests/test_functionality.py index aea3c56d9..beca1bb95 100644 --- a/src/metatrain/experimental/soap_bpnn/tests/test_functionality.py +++ b/src/metatrain/experimental/soap_bpnn/tests/test_functionality.py @@ -127,12 +127,12 @@ def test_output_last_layer_features(): [system], { "energy": model.outputs["energy"], - "mtm::aux::last_layer_features": ll_output_options, + "mtt::aux::last_layer_features": ll_output_options, }, ) assert "energy" in outputs - assert "mtm::aux::last_layer_features" in outputs - last_layer_features = outputs["mtm::aux::last_layer_features"].block() + assert "mtt::aux::last_layer_features" in outputs + last_layer_features = outputs["mtt::aux::last_layer_features"].block() assert last_layer_features.samples.names == [ "system", "atom", @@ -155,17 +155,17 @@ def test_output_last_layer_features(): [system], { "energy": model.outputs["energy"], - "mtm::aux::last_layer_features": ll_output_options, + "mtt::aux::last_layer_features": ll_output_options, }, ) assert "energy" in outputs - assert "mtm::aux::last_layer_features" in outputs - assert outputs["mtm::aux::last_layer_features"].block().samples.names == ["system"] - assert outputs["mtm::aux::last_layer_features"].block().values.shape == ( + assert "mtt::aux::last_layer_features" in outputs + assert outputs["mtt::aux::last_layer_features"].block().samples.names == ["system"] + assert outputs["mtt::aux::last_layer_features"].block().values.shape == ( 1, 128, ) - assert outputs["mtm::aux::last_layer_features"].block().properties.names == [ + assert outputs["mtt::aux::last_layer_features"].block().properties.names == [ "properties", ] diff --git a/src/metatrain/experimental/soap_bpnn/tests/test_regression.py b/src/metatrain/experimental/soap_bpnn/tests/test_regression.py index c76dc6d37..bf2ad6bd2 100644 --- a/src/metatrain/experimental/soap_bpnn/tests/test_regression.py +++ b/src/metatrain/experimental/soap_bpnn/tests/test_regression.py @@ -22,7 +22,7 @@ def test_regression_init(): """Perform a regression test on the model at initialization""" targets = TargetInfoDict() - targets["mtm::U0"] = TargetInfo(quantity="energy", unit="eV") + targets["mtt::U0"] = TargetInfo(quantity="energy", unit="eV") dataset_info = DatasetInfo( length_unit="Angstrom", atomic_types={1, 6, 7, 8}, targets=targets @@ -34,7 +34,7 @@ def test_regression_init(): output = model( systems, - {"mtm::U0": ModelOutput(quantity="energy", unit="", per_atom=False)}, + {"mtt::U0": ModelOutput(quantity="energy", unit="", per_atom=False)}, ) expected_output = torch.tensor( @@ -43,10 +43,10 @@ def test_regression_init(): # if you need to change the hardcoded values: torch.set_printoptions(precision=5) - print(output["mtm::U0"].block().values) + print(output["mtt::U0"].block().values) torch.testing.assert_close( - output["mtm::U0"].block().values, expected_output, rtol=1e-5, atol=1e-5 + output["mtt::U0"].block().values, expected_output, rtol=1e-5, atol=1e-5 ) @@ -57,7 +57,7 @@ def test_regression_train(): systems = read_systems(DATASET_PATH) conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": DATASET_PATH, "file_format": ".xyz", @@ -69,7 +69,7 @@ def test_regression_train(): } } targets, target_info_dict = read_targets(OmegaConf.create(conf)) - dataset = Dataset({"system": systems, "mtm::U0": targets["mtm::U0"]}) + dataset = Dataset({"system": systems, "mtt::U0": targets["mtt::U0"]}) hypers = DEFAULT_HYPERS.copy() hypers["training"]["num_epochs"] = 2 @@ -86,7 +86,7 @@ def test_regression_train(): # Predict on the first five systems output = model( systems[:5], - {"mtm::U0": ModelOutput(quantity="energy", unit="", per_atom=False)}, + {"mtt::U0": ModelOutput(quantity="energy", unit="", per_atom=False)}, ) expected_output = torch.tensor( @@ -95,8 +95,8 @@ def test_regression_train(): # if you need to change the hardcoded values: # torch.set_printoptions(precision=5) - # print(output["mtm::U0"].block().values) + # print(output["mtt::U0"].block().values) torch.testing.assert_close( - output["mtm::U0"].block().values, expected_output, rtol=1e-5, atol=1e-5 + output["mtt::U0"].block().values, expected_output, rtol=1e-5, atol=1e-5 ) diff --git a/src/metatrain/experimental/soap_bpnn/trainer.py b/src/metatrain/experimental/soap_bpnn/trainer.py index 041f6f518..81865c334 100644 --- a/src/metatrain/experimental/soap_bpnn/trainer.py +++ b/src/metatrain/experimental/soap_bpnn/trainer.py @@ -58,7 +58,7 @@ def train( # Calculate and set the composition weights for all targets: logger.info("Calculating composition weights") for target_name in model.new_outputs: - if "mtm::aux::" in target_name: + if "mtt::aux::" in target_name: continue # TODO: document transfer learning and say that outputs that are already # present in the model will keep their composition weights diff --git a/src/metatrain/utils/data/dataset.py b/src/metatrain/utils/data/dataset.py index f713665cb..86a3b496c 100644 --- a/src/metatrain/utils/data/dataset.py +++ b/src/metatrain/utils/data/dataset.py @@ -11,7 +11,7 @@ class Dataset: """A version of the `metatensor.learn.Dataset` class that allows for - the use of `mtm::` prefixes in the keys of the dictionary. See + the use of `mtt::` prefixes in the keys of the dictionary. See https://github.com/lab-cosmo/metatensor/issues/621. It is important to note that, instead of named tuples, this class @@ -24,7 +24,7 @@ def __init__(self, dict: Dict): new_dict = {} for key, value in dict.items(): - key = key.replace("mtm::", "mtm_") + key = key.replace("mtt::", "mtm_") new_dict[key] = value self.mts_learn_dataset = metatensor.learn.Dataset(**new_dict) @@ -34,7 +34,7 @@ def __getitem__(self, idx: int) -> Dict: mts_dataset_item = self.mts_learn_dataset[idx]._asdict() new_dict = {} for key, value in mts_dataset_item.items(): - key = key.replace("mtm_", "mtm::") + key = key.replace("mtm_", "mtt::") new_dict[key] = value return new_dict diff --git a/src/metatrain/utils/data/readers/readers.py b/src/metatrain/utils/data/readers/readers.py index 3ea309ebd..d658396d1 100644 --- a/src/metatrain/utils/data/readers/readers.py +++ b/src/metatrain/utils/data/readers/readers.py @@ -170,7 +170,7 @@ def read_targets( as well as a ``TargetInfoDict`` instance containing the metadata of the targets. :raises ValueError: if the target name is not valid. Valid target names are - those that either start with ``mtm::`` or those that are in the list of + those that either start with ``mtt::`` or those that are in the list of standard outputs of ``metatensor.torch.atomistic`` (see https://docs.metatensor.org/latest/atomistic/outputs.html) """ @@ -182,11 +182,11 @@ def read_targets( target_info_gradients = set() if target_key not in standard_outputs_list and not target_key.startswith( - "mtm::" + "mtt::" ): raise ValueError( f"Target names must either be one of {standard_outputs_list} " - "or start with `mtm::`." + "or start with `mtt::`." ) if target["quantity"] == "energy": blocks = read_energy( diff --git a/tests/cli/test_train_model.py b/tests/cli/test_train_model.py index f255f358e..097eb7b39 100644 --- a/tests/cli/test_train_model.py +++ b/tests/cli/test_train_model.py @@ -178,7 +178,7 @@ def test_train_multiple_datasets(monkeypatch, tmp_path, options): options["training_set"][1]["systems"]["read_from"] = "ethanol_reduced_100.xyz" options["training_set"][1]["targets"]["energy"]["key"] = "energy" options["training_set"][0]["targets"].pop("energy") - options["training_set"][0]["targets"]["mtm::U0"] = OmegaConf.create({"key": "U0"}) + options["training_set"][0]["targets"]["mtt::U0"] = OmegaConf.create({"key": "U0"}) train_model(options) diff --git a/tests/utils/data/test_combine_dataloaders.py b/tests/utils/data/test_combine_dataloaders.py index 613701a9a..c664b5a26 100644 --- a/tests/utils/data/test_combine_dataloaders.py +++ b/tests/utils/data/test_combine_dataloaders.py @@ -24,7 +24,7 @@ def test_without_shuffling(): systems = read_systems(RESOURCES_PATH / "qm9_reduced_100.xyz") conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": RESOURCES_PATH / "qm9_reduced_100.xyz", "file_format": ".xyz", @@ -36,14 +36,14 @@ def test_without_shuffling(): } } targets, _ = read_targets(OmegaConf.create(conf)) - dataset = Dataset({"system": systems, "mtm::U0": targets["mtm::U0"]}) + dataset = Dataset({"system": systems, "mtt::U0": targets["mtt::U0"]}) dataloader_qm9 = DataLoader(dataset, batch_size=10, collate_fn=collate_fn) # will yield 10 batches of 10 systems = read_systems(RESOURCES_PATH / "alchemical_reduced_10.xyz") conf = { - "mtm::free_energy": { + "mtt::free_energy": { "quantity": "energy", "read_from": RESOURCES_PATH / "alchemical_reduced_10.xyz", "file_format": ".xyz", @@ -56,7 +56,7 @@ def test_without_shuffling(): } targets, _ = read_targets(OmegaConf.create(conf)) dataset = Dataset( - {"system": systems, "mtm::free_energy": targets["mtm::free_energy"]} + {"system": systems, "mtt::free_energy": targets["mtt::free_energy"]} ) dataloader_alchemical = DataLoader(dataset, batch_size=2, collate_fn=collate_fn) # will yield 5 batches of 2 @@ -68,9 +68,9 @@ def test_without_shuffling(): assert len(combined_dataloader) == 15 for i_batch, batch in enumerate(combined_dataloader): if i_batch < 10: - assert batch[1]["mtm::U0"].block().values.shape == (10, 1) + assert batch[1]["mtt::U0"].block().values.shape == (10, 1) else: - assert batch[1]["mtm::free_energy"].block().values.shape == (2, 1) + assert batch[1]["mtt::free_energy"].block().values.shape == (2, 1) def test_with_shuffling(): @@ -81,7 +81,7 @@ def test_with_shuffling(): systems = read_systems(RESOURCES_PATH / "qm9_reduced_100.xyz") conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": RESOURCES_PATH / "qm9_reduced_100.xyz", "file_format": ".xyz", @@ -93,7 +93,7 @@ def test_with_shuffling(): } } targets, _ = read_targets(OmegaConf.create(conf)) - dataset = Dataset({"system": systems, "mtm::U0": targets["mtm::U0"]}) + dataset = Dataset({"system": systems, "mtt::U0": targets["mtt::U0"]}) dataloader_qm9 = DataLoader( dataset, batch_size=10, collate_fn=collate_fn, shuffle=True ) @@ -102,7 +102,7 @@ def test_with_shuffling(): systems = read_systems(RESOURCES_PATH / "alchemical_reduced_10.xyz") conf = { - "mtm::free_energy": { + "mtt::free_energy": { "quantity": "energy", "read_from": RESOURCES_PATH / "alchemical_reduced_10.xyz", "file_format": ".xyz", @@ -115,7 +115,7 @@ def test_with_shuffling(): } targets, _ = read_targets(OmegaConf.create(conf)) dataset = Dataset( - {"system": systems, "mtm::free_energy": targets["mtm::free_energy"]} + {"system": systems, "mtt::free_energy": targets["mtt::free_energy"]} ) dataloader_alchemical = DataLoader( dataset, batch_size=2, collate_fn=collate_fn, shuffle=True @@ -136,17 +136,17 @@ def test_with_shuffling(): alchemical_samples = [] for batch in combined_dataloader: - if "mtm::U0" in batch[1]: + if "mtt::U0" in batch[1]: qm9_batch_count += 1 - assert batch[1]["mtm::U0"].block().values.shape == (10, 1) + assert batch[1]["mtt::U0"].block().values.shape == (10, 1) actual_ordering.append("qm9") - qm9_samples.append(batch[1]["mtm::U0"].block().samples.column("system")) + qm9_samples.append(batch[1]["mtt::U0"].block().samples.column("system")) else: alchemical_batch_count += 1 - assert batch[1]["mtm::free_energy"].block().values.shape == (2, 1) + assert batch[1]["mtt::free_energy"].block().values.shape == (2, 1) actual_ordering.append("alchemical") alchemical_samples.append( - batch[1]["mtm::free_energy"].block().samples.column("system") + batch[1]["mtt::free_energy"].block().samples.column("system") ) assert qm9_batch_count == 10 diff --git a/tests/utils/data/test_dataset.py b/tests/utils/data/test_dataset.py index 410971af1..51c86d314 100644 --- a/tests/utils/data/test_dataset.py +++ b/tests/utils/data/test_dataset.py @@ -199,7 +199,7 @@ def test_dataset_info(): """Tests the DatasetInfo class.""" targets = TargetInfoDict(energy=TargetInfo(quantity="energy", unit="kcal/mol")) - targets["mtm::U0"] = TargetInfo(quantity="energy", unit="kcal/mol") + targets["mtt::U0"] = TargetInfo(quantity="energy", unit="kcal/mol") dataset_info = DatasetInfo( length_unit="angstrom", atomic_types={1, 2, 3}, targets=targets @@ -209,8 +209,8 @@ def test_dataset_info(): assert dataset_info.atomic_types == {1, 2, 3} assert dataset_info.targets["energy"].quantity == "energy" assert dataset_info.targets["energy"].unit == "kcal/mol" - assert dataset_info.targets["mtm::U0"].quantity == "energy" - assert dataset_info.targets["mtm::U0"].unit == "kcal/mol" + assert dataset_info.targets["mtt::U0"].quantity == "energy" + assert dataset_info.targets["mtt::U0"].unit == "kcal/mol" def test_length_unit_none_conversion(): @@ -225,7 +225,7 @@ def test_length_unit_none_conversion(): def test_dataset_info_copy(): targets = TargetInfoDict() targets["energy"] = TargetInfo(quantity="energy", unit="eV") - targets["forces"] = TargetInfo(quantity="mtm::forces", unit="eV/Angstrom") + targets["forces"] = TargetInfo(quantity="mtt::forces", unit="eV/Angstrom") info = DatasetInfo(length_unit="angstrom", atomic_types={1, 6}, targets=targets) copy = info.copy() @@ -241,7 +241,7 @@ def test_dataset_info_update(): info = DatasetInfo(length_unit="angstrom", atomic_types={1, 6}, targets=targets) targets2 = targets.copy() - targets2["forces"] = TargetInfo(quantity="mtm::forces", unit="eV/Angstrom") + targets2["forces"] = TargetInfo(quantity="mtt::forces", unit="eV/Angstrom") info2 = DatasetInfo(length_unit="angstrom", atomic_types={8}, targets=targets2) info.update(info2) @@ -258,7 +258,7 @@ def test_dataset_info_update_non_matching_length_unit(): info = DatasetInfo(length_unit="angstrom", atomic_types={1, 6}, targets=targets) targets2 = targets.copy() - targets2["forces"] = TargetInfo(quantity="mtm::forces", unit="eV/Angstrom") + targets2["forces"] = TargetInfo(quantity="mtt::forces", unit="eV/Angstrom") info2 = DatasetInfo(length_unit="nanometer", atomic_types={8}, targets=targets2) @@ -291,11 +291,11 @@ def test_dataset_info_union(): """Tests the union method.""" targets = TargetInfoDict() targets["energy"] = TargetInfo(quantity="energy", unit="eV") - targets["forces"] = TargetInfo(quantity="mtm::forces", unit="eV/Angstrom") + targets["forces"] = TargetInfo(quantity="mtt::forces", unit="eV/Angstrom") info = DatasetInfo(length_unit="angstrom", atomic_types={1, 6}, targets=targets) other_targets = targets.copy() - other_targets["mtm::stress"] = TargetInfo(quantity="mtm::stress", unit="GPa") + other_targets["mtt::stress"] = TargetInfo(quantity="mtt::stress", unit="GPa") other_info = DatasetInfo( length_unit="angstrom", atomic_types={1}, targets=other_targets @@ -341,7 +341,7 @@ def test_get_atomic_types(): systems = read_systems(RESOURCES_PATH / "qm9_reduced_100.xyz") conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": str(RESOURCES_PATH / "qm9_reduced_100.xyz"), "file_format": ".xyz", @@ -380,7 +380,7 @@ def test_get_all_targets(): systems = read_systems(RESOURCES_PATH / "qm9_reduced_100.xyz") conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": str(RESOURCES_PATH / "qm9_reduced_100.xyz"), "file_format": ".xyz", @@ -408,9 +408,9 @@ def test_get_all_targets(): targets_2, _ = read_targets(OmegaConf.create(conf_2)) dataset = Dataset({"system": systems, **targets}) dataset_2 = Dataset({"system": systems_2, **targets_2}) - assert get_all_targets(dataset) == ["mtm::U0"] + assert get_all_targets(dataset) == ["mtt::U0"] assert get_all_targets(dataset_2) == ["energy"] - assert get_all_targets([dataset, dataset_2]) == ["energy", "mtm::U0"] + assert get_all_targets([dataset, dataset_2]) == ["energy", "mtt::U0"] def test_check_datasets(): @@ -418,7 +418,7 @@ def test_check_datasets(): systems_qm9 = read_systems(RESOURCES_PATH / "qm9_reduced_100.xyz") conf_qm9 = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": str(RESOURCES_PATH / "qm9_reduced_100.xyz"), "file_format": ".xyz", @@ -487,7 +487,7 @@ def test_collate_fn(): systems = read_systems(RESOURCES_PATH / "qm9_reduced_100.xyz") conf = { - "mtm::U0": { + "mtt::U0": { "quantity": "energy", "read_from": str(RESOURCES_PATH / "qm9_reduced_100.xyz"), "file_format": ".xyz", @@ -499,7 +499,7 @@ def test_collate_fn(): } } targets, _ = read_targets(OmegaConf.create(conf)) - dataset = Dataset({"system": systems, "mtm::U0": targets["mtm::U0"]}) + dataset = Dataset({"system": systems, "mtt::U0": targets["mtt::U0"]}) batch = collate_fn([dataset[0], dataset[1], dataset[2]]) diff --git a/tests/utils/data/test_readers.py b/tests/utils/data/test_readers.py index 3c8ef320f..f80b9de1e 100644 --- a/tests/utils/data/test_readers.py +++ b/tests/utils/data/test_readers.py @@ -158,7 +158,7 @@ def test_read_targets(stress_dict, virial_dict, monkeypatch, tmp_path, caplog): conf = { "energy": energy_section, - "mtm::energy2": energy_section, + "mtt::energy2": energy_section, } caplog.set_level(logging.INFO) @@ -302,6 +302,6 @@ def test_unsupported_target_name(): with pytest.raises( ValueError, - match="start with `mtm::`", + match="start with `mtt::`", ): read_targets(OmegaConf.create(conf)) diff --git a/tests/utils/test_export.py b/tests/utils/test_export.py index 972500acb..3a7e318e7 100644 --- a/tests/utils/test_export.py +++ b/tests/utils/test_export.py @@ -100,7 +100,7 @@ def test_units_warning(): dataset_info = DatasetInfo( length_unit="angstrom", atomic_types={1}, - targets={"mtm::output": TargetInfo(quantity="energy")}, + targets={"mtt::output": TargetInfo(quantity="energy")}, ) model = __model__(model_hypers=MODEL_HYPERS, dataset_info=dataset_info) @@ -113,5 +113,5 @@ def test_units_warning(): dtype="float32", ) - with pytest.warns(match="No target units were provided for output 'mtm::output'"): + with pytest.warns(match="No target units were provided for output 'mtt::output'"): export(model, capabilities) diff --git a/tests/utils/test_external_naming.py b/tests/utils/test_external_naming.py index dc488463d..666d1c890 100644 --- a/tests/utils/test_external_naming.py +++ b/tests/utils/test_external_naming.py @@ -7,31 +7,31 @@ def test_to_external_name(): quantities = { "energy": TargetInfo(quantity="energy"), - "mtm::free_energy": TargetInfo(quantity="energy"), - "mtm::foo": TargetInfo(quantity="bar"), + "mtt::free_energy": TargetInfo(quantity="energy"), + "mtt::foo": TargetInfo(quantity="bar"), } assert to_external_name("energy_positions_gradients", quantities) == "forces" assert ( - to_external_name("mtm::free_energy_positions_gradients", quantities) - == "forces[mtm::free_energy]" + to_external_name("mtt::free_energy_positions_gradients", quantities) + == "forces[mtt::free_energy]" ) assert ( - to_external_name("mtm::foo_positions_gradients", quantities) - == "mtm::foo_positions_gradients" + to_external_name("mtt::foo_positions_gradients", quantities) + == "mtt::foo_positions_gradients" ) assert to_external_name("energy_strain_gradients", quantities) == "virial" assert ( - to_external_name("mtm::free_energy_strain_gradients", quantities) - == "virial[mtm::free_energy]" + to_external_name("mtt::free_energy_strain_gradients", quantities) + == "virial[mtt::free_energy]" ) assert ( - to_external_name("mtm::foo_strain_gradients", quantities) - == "mtm::foo_strain_gradients" + to_external_name("mtt::foo_strain_gradients", quantities) + == "mtt::foo_strain_gradients" ) assert to_external_name("energy", quantities) == "energy" - assert to_external_name("mtm::free_energy", quantities) == "mtm::free_energy" - assert to_external_name("mtm::foo", quantities) == "mtm::foo" + assert to_external_name("mtt::free_energy", quantities) == "mtt::free_energy" + assert to_external_name("mtt::foo", quantities) == "mtt::foo" def test_to_internal_name(): @@ -39,19 +39,19 @@ def test_to_internal_name(): assert to_internal_name("forces") == "energy_positions_gradients" assert ( - to_internal_name("forces[mtm::free_energy]") - == "mtm::free_energy_positions_gradients" + to_internal_name("forces[mtt::free_energy]") + == "mtt::free_energy_positions_gradients" ) assert ( - to_internal_name("mtm::foo_positions_gradients") - == "mtm::foo_positions_gradients" + to_internal_name("mtt::foo_positions_gradients") + == "mtt::foo_positions_gradients" ) assert to_internal_name("virial") == "energy_strain_gradients" assert ( - to_internal_name("virial[mtm::free_energy]") - == "mtm::free_energy_strain_gradients" + to_internal_name("virial[mtt::free_energy]") + == "mtt::free_energy_strain_gradients" ) - assert to_internal_name("mtm::foo_strain_gradients") == "mtm::foo_strain_gradients" + assert to_internal_name("mtt::foo_strain_gradients") == "mtt::foo_strain_gradients" assert to_internal_name("energy") == "energy" - assert to_internal_name("mtm::free_energy") == "mtm::free_energy" - assert to_internal_name("mtm::foo") == "mtm::foo" + assert to_internal_name("mtt::free_energy") == "mtt::free_energy" + assert to_internal_name("mtt::foo") == "mtt::foo"