From 11cbde12cd8bf616313a669c91c44f3c32ad5318 Mon Sep 17 00:00:00 2001 From: Juno Nam Date: Sun, 28 Apr 2024 19:30:32 -0400 Subject: [PATCH] Apply pre-commit --- .pre-commit-config.yaml | 14 +++++++------- LICENSE | 2 +- alchemical_mace/optimize.py | 3 ++- notebooks/2_compositional_optimization.ipynb | 1 - requirements.txt | 2 +- scripts/perovskite_alchemy.py | 4 ++-- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c7a2d66..c956e16 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -8,10 +8,10 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.2 - hooks: - - id: ruff - types_or: [ python, pyi, jupyter ] - args: [ --fix ] - - id: ruff-format - types_or: [ python, pyi, jupyter ] \ No newline at end of file + rev: v0.4.2 + hooks: + - id: ruff + types_or: [ python, pyi, jupyter ] + args: [ --fix ] + - id: ruff-format + types_or: [ python, pyi, jupyter ] diff --git a/LICENSE b/LICENSE index 5c5d1d7..5033e4e 100644 --- a/LICENSE +++ b/LICENSE @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. diff --git a/alchemical_mace/optimize.py b/alchemical_mace/optimize.py index 87171be..3623ebb 100644 --- a/alchemical_mace/optimize.py +++ b/alchemical_mace/optimize.py @@ -7,11 +7,12 @@ class ExponentiatedGradientDescent(torch.optim.Optimizer): Implements Exponentiated Gradient Descent. Args: - params (iterable of torch.Tensor or dict): iterable of parameters to optimize or + params (iterable of torch.Tensor or dict): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. Defaults to 1e-3. eps (float, optional): small constant for numerical stability. Defaults to 1e-8. """ + def __init__( self, params: Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]], diff --git a/notebooks/2_compositional_optimization.ipynb b/notebooks/2_compositional_optimization.ipynb index 34cee6c..da79c3c 100644 --- a/notebooks/2_compositional_optimization.ipynb +++ b/notebooks/2_compositional_optimization.ipynb @@ -21,7 +21,6 @@ " alchemical_mace_mp,\n", " get_z_table_and_r_max,\n", ")\n", - "from alchemical_mace.optimize import ExponentiatedGradientDescent\n", "from alchemical_mace.utils import suppress_print\n", "\n", "plt.style.use(\"default\")" diff --git a/requirements.txt b/requirements.txt index 19e8df1..3ac8ded 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,4 +9,4 @@ pandas==2.2.2 matplotlib==3.8.0 mpltern==1.0.2 tqdm==4.66.1 -ipykernel==6.25.2 \ No newline at end of file +ipykernel==6.25.2 diff --git a/scripts/perovskite_alchemy.py b/scripts/perovskite_alchemy.py index c58422c..ef86b37 100644 --- a/scripts/perovskite_alchemy.py +++ b/scripts/perovskite_alchemy.py @@ -131,7 +131,7 @@ # Define alchemical path t = np.linspace(0.0, 1.0, args.alchemy_switch_steps) -lambda_steps = t ** 5 * (70 * t ** 4 - 315 * t ** 3 + 540 * t ** 2 - 420 * t + 126) +lambda_steps = t**5 * (70 * t**4 - 315 * t**3 + 540 * t**2 - 420 * t + 126) lambda_values = [ np.zeros(args.alchemy_equil_steps), lambda_steps, @@ -167,7 +167,7 @@ def get_observables(dynamics, time, lambda_value): total_steps = 2 * args.alchemy_equil_steps + 2 * args.alchemy_switch_steps observables = [] -for step in (tqdm(range(total_steps), desc="Alchemical switching")): +for step in tqdm(range(total_steps), desc="Alchemical switching"): lambda_value = lambda_values[step] grad_enabled = calculate_gradients[step]