Skip to content

Commit

Permalink
Apply pre-commit
Browse files Browse the repository at this point in the history
  • Loading branch information
recisic committed Apr 28, 2024
1 parent 93ef2f4 commit 11cbde1
Show file tree
Hide file tree
Showing 6 changed files with 13 additions and 13 deletions.
14 changes: 7 additions & 7 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ repos:
- id: end-of-file-fixer
- id: trailing-whitespace
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.4.2
hooks:
- id: ruff
types_or: [ python, pyi, jupyter ]
args: [ --fix ]
- id: ruff-format
types_or: [ python, pyi, jupyter ]
rev: v0.4.2
hooks:
- id: ruff
types_or: [ python, pyi, jupyter ]
args: [ --fix ]
- id: ruff-format
types_or: [ python, pyi, jupyter ]
2 changes: 1 addition & 1 deletion LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
SOFTWARE.
3 changes: 2 additions & 1 deletion alchemical_mace/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,12 @@ class ExponentiatedGradientDescent(torch.optim.Optimizer):
Implements Exponentiated Gradient Descent.
Args:
params (iterable of torch.Tensor or dict): iterable of parameters to optimize or
params (iterable of torch.Tensor or dict): iterable of parameters to optimize or
dicts defining parameter groups.
lr (float, optional): learning rate. Defaults to 1e-3.
eps (float, optional): small constant for numerical stability. Defaults to 1e-8.
"""

def __init__(
self,
params: Union[Iterable[torch.Tensor], Iterable[Dict[str, Any]]],
Expand Down
1 change: 0 additions & 1 deletion notebooks/2_compositional_optimization.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@
" alchemical_mace_mp,\n",
" get_z_table_and_r_max,\n",
")\n",
"from alchemical_mace.optimize import ExponentiatedGradientDescent\n",
"from alchemical_mace.utils import suppress_print\n",
"\n",
"plt.style.use(\"default\")"
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,4 +9,4 @@ pandas==2.2.2
matplotlib==3.8.0
mpltern==1.0.2
tqdm==4.66.1
ipykernel==6.25.2
ipykernel==6.25.2
4 changes: 2 additions & 2 deletions scripts/perovskite_alchemy.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@

# Define alchemical path
t = np.linspace(0.0, 1.0, args.alchemy_switch_steps)
lambda_steps = t ** 5 * (70 * t ** 4 - 315 * t ** 3 + 540 * t ** 2 - 420 * t + 126)
lambda_steps = t**5 * (70 * t**4 - 315 * t**3 + 540 * t**2 - 420 * t + 126)
lambda_values = [
np.zeros(args.alchemy_equil_steps),
lambda_steps,
Expand Down Expand Up @@ -167,7 +167,7 @@ def get_observables(dynamics, time, lambda_value):
total_steps = 2 * args.alchemy_equil_steps + 2 * args.alchemy_switch_steps

observables = []
for step in (tqdm(range(total_steps), desc="Alchemical switching")):
for step in tqdm(range(total_steps), desc="Alchemical switching"):
lambda_value = lambda_values[step]
grad_enabled = calculate_gradients[step]

Expand Down

0 comments on commit 11cbde1

Please sign in to comment.