Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

V1.4.4 #354

Merged
merged 6 commits into from
Sep 27, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion autode/atoms.py
Original file line number Diff line number Diff line change
Expand Up @@ -804,7 +804,7 @@ def vector(self, i: int, j: int) -> np.ndarray:
Raises:
(IndexError): If i or j are not present
"""
return self[j].coord - self[i].coord
return np.asarray(self[j].coord - self[i].coord)

def nvector(self, i: int, j: int) -> np.ndarray:
"""
Expand Down
45 changes: 27 additions & 18 deletions autode/bracket/dhs.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,13 +15,14 @@
from autode.opt.optimisers.utils import TruncatedTaylor
from autode.opt.optimisers.hessian_update import BFGSSR1Update
from autode.bracket.base import BaseBracketMethod
from autode.opt.optimisers import RFOptimiser
from autode.opt.optimisers import RFOptimiser, ConvergenceParams
from autode.exceptions import OptimiserStepError
from autode.log import logger

if TYPE_CHECKING:
from autode.species.species import Species
from autode.wrappers.methods import Method
from autode.opt.optimisers.base import ConvergenceTolStr


class DistanceConstrainedOptimiser(RFOptimiser):
Expand Down Expand Up @@ -60,10 +61,10 @@ def __init__(
pivot_point: Coordinates of the pivot point
line_search: Whether to use linear search
angle_thresh: An angle threshold above which linear search
will be rejected (in Degrees)
will be rejected (in Degrees)
old_coords_read_hess: Old coordinate with hessian which will
be used to obtain initial hessian by
a Hessian update scheme
be used to obtain the initial hessian by a
Hessian update scheme
"""
kwargs.pop("init_alpha", None)
super().__init__(*args, init_alpha=init_trust, **kwargs)
Expand Down Expand Up @@ -103,15 +104,22 @@ def _initialise_run(self) -> None:
@property
def converged(self) -> bool:
"""Has the optimisation converged"""
# The tangential gradient should be close to zero
return (
self.rms_tangent_grad < self.gtol and self._abs_delta_e < self.etol
)
assert self._coords is not None

# Check only the tangential component of gradient
g_tau = self.tangent_grad
rms_g_tau = np.sqrt(np.mean(np.square(g_tau)))
max_g_tau = np.max(np.abs(g_tau))

curr_params = self._history.conv_params()
curr_params.rms_g = GradientRMS(rms_g_tau, "Ha/ang")
curr_params.max_g = GradientRMS(max_g_tau, "Ha/ang")
return self.conv_tol.meets_criteria(curr_params)

@property
def rms_tangent_grad(self) -> GradientRMS:
def tangent_grad(self) -> np.ndarray:
"""
Obtain the RMS of the gradient tangent to the distance
Obtain the component of atomic gradients tangent to the distance
vector between current coords and pivot point
"""
assert self._coords is not None and self._coords.g is not None
Expand All @@ -120,8 +128,7 @@ def rms_tangent_grad(self) -> GradientRMS:
# unit vector in the direction of distance vector
d_hat = self.dist_vec / np.linalg.norm(self.dist_vec)
tangent_grad = grad - (grad.dot(d_hat)) * d_hat
rms_grad = np.sqrt(np.mean(np.square(tangent_grad)))
return GradientRMS(rms_grad)
return tangent_grad

@property
def dist_vec(self) -> np.ndarray:
Expand Down Expand Up @@ -491,6 +498,7 @@ def __init__(
large_step: Union[Distance, float] = Distance(0.2, "ang"),
small_step: Union[Distance, float] = Distance(0.05, "ang"),
switch_thresh: Union[Distance, float] = Distance(1.5, "ang"),
conv_tol: Union["ConvergenceParams", "ConvergenceTolStr"] = "loose",
**kwargs,
):
"""
Expand All @@ -513,6 +521,9 @@ def __init__(
switch_thresh: When distance between the two images is less than
this cutoff, smaller DHS extrapolation steps are taken

conv_tol: Convergence tolerance for the distance-constrained
optimiser

Keyword Args:

maxiter: Maximum number of en/grad evaluations
Expand All @@ -521,9 +532,6 @@ def __init__(
stop, values less than 0.5 Angstrom are not
recommended.

gtol: Gradient tolerance for the optimiser micro-iterations
in DHS (Hartree/angstrom)

cineb_at_conv: Whether to run CI-NEB calculation from the end
points after the DHS is converged
"""
Expand All @@ -542,6 +550,7 @@ def __init__(
self._small_step = Distance(abs(small_step), "ang")
self._sw_thresh = Distance(abs(switch_thresh), "ang")
assert self._small_step < self._large_step
self._conv_tol = conv_tol

self._step_size: Optional[Distance] = None
if self._large_step > self.imgpair.dist:
Expand Down Expand Up @@ -597,8 +606,7 @@ def _step(self) -> None:

opt = DistanceConstrainedOptimiser(
maxiter=curr_maxiter,
gtol=self._gtol,
etol=1.0e-3, # seems like a reasonable etol
conv_tol=self._conv_tol,
init_trust=opt_trust,
pivot_point=pivot,
old_coords_read_hess=old_coords,
Expand All @@ -612,9 +620,10 @@ def _step(self) -> None:
if not opt.converged:
return None

rms_g_tau = np.sqrt(np.mean(np.square(opt.tangent_grad)))
logger.info(
"Successful optimization after DHS step, final RMS of "
f"tangential gradient = {opt.rms_tangent_grad:.6f} "
f"tangential gradient = {rms_g_tau:.6f} "
f"Ha/angstrom"
)

Expand Down
8 changes: 2 additions & 6 deletions autode/calculations/executors.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,10 +346,7 @@ class CalculationExecutorO(_IndirectCalculationExecutor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)

self.etol = PotentialEnergy(3e-5, units="Ha")
self.gtol = GradientRMS(
1e-3, units="Ha Å^-1"
) # TODO: A better number here
self.conv_tol = "normal"
self._fix_unique()

def run(self) -> None:
Expand All @@ -367,8 +364,7 @@ def run(self) -> None:
self.optimiser: "NDOptimiser" = type_(
init_alpha=self._step_size,
maxiter=self._max_opt_cycles,
etol=self.etol,
gtol=self.gtol,
conv_tol=self.conv_tol,
)
method = self.method.copy()
method.keywords.grad = kws.GradientKeywords(self.input.keywords)
Expand Down
2 changes: 1 addition & 1 deletion autode/hessians.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,7 @@ def _eigenvalues_to_freqs(self, lambdas) -> List[Frequency]:
(list(autode.values.Frequency)):
"""

nus = np.sqrt(np.complex_(lambdas)) / (
nus = np.sqrt(np.complex128(lambdas)) / (
2.0 * np.pi * Constants.ang_to_m * Constants.c_in_cm
)
nus *= self._freq_scale_factor
Expand Down
110 changes: 108 additions & 2 deletions autode/opt/coordinates/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,13 +228,14 @@ def update_h_from_old_h(
assert isinstance(old_coords, OptCoordinates), "Wrong type!"
assert old_coords._h is not None
assert old_coords._g is not None
idxs = self.active_mol_indexes

for update_type in hessian_update_types:
updater = update_type(
h=old_coords._h,
s=self.raw - old_coords.raw,
s=np.array(self) - np.array(old_coords),
y=self._g - old_coords._g,
subspace_idxs=old_coords.indexes,
subspace_idxs=idxs,
)

if not updater.conditions_met:
Expand All @@ -250,6 +251,79 @@ def update_h_from_old_h(
"Could not update the Hessian - no suitable update strategies"
)

@property
def rfo_shift(self) -> float:
"""
Get the RFO diagonal shift factor λ for the molecular Hessian that
can be applied (H - λI) to obtain the RFO downhill step. The shift
is only calculated in active subspace

Returns:
(float): The shift parameter
"""
assert self._h is not None
# ignore constraint modes
n, _ = self._h.shape
idxs = self.active_mol_indexes
hess = self._h[:, idxs][idxs, :]
grad = self._g[idxs]

h_n, _ = hess.shape
# form the augmented Hessian in active subspace
aug_h = np.zeros(shape=(h_n + 1, h_n + 1))

aug_h[:h_n, :h_n] = hess
aug_h[-1, :h_n] = grad
aug_h[:h_n, -1] = grad

# first non-zero eigenvalue
aug_h_lmda = np.linalg.eigvalsh(aug_h)
rfo_lmda = aug_h_lmda[0]
assert abs(rfo_lmda) > 1.0e-10
return rfo_lmda

@property
def min_eigval(self) -> float:
"""
Obtain the minimum eigenvalue of the molecular Hessian in
the active space

Returns:
(float): The minimum eigenvalue
"""
assert self._h is not None
n, _ = self._h.shape
idxs = self.active_mol_indexes
hess = self._h[:, idxs][idxs, :]

eigvals = np.linalg.eigvalsh(hess)
assert abs(eigvals[0]) > 1.0e-10
return eigvals[0]

def pred_quad_delta_e(self, new_coords: np.ndarray) -> float:
"""
Calculate the estimated change in energy at the new coordinates
based on the quadratic model (i.e. second order Taylor expansion)

Args:
new_coords(np.ndarray): The new coordinates

Returns:
(float): The predicted change in energy
"""
assert self._g is not None and self._h is not None

step = np.array(new_coords) - np.array(self)

idxs = self.active_mol_indexes
step = step[idxs]
grad = self._g[idxs]
hess = self._h[:, idxs][idxs, :]

pred_delta = np.dot(grad, step)
pred_delta += 0.5 * np.linalg.multi_dot((step, hess, step))
return pred_delta

def make_hessian_positive_definite(self) -> None:
"""
Make the Hessian matrix positive definite by shifting eigenvalues
Expand All @@ -269,6 +343,38 @@ def to(self, *args, **kwargs) -> "OptCoordinates":
def iadd(self, value: np.ndarray) -> "OptCoordinates":
"""Inplace addition of some coordinates"""

@property
@abstractmethod
def n_constraints(self) -> int:
"""Number of constraints in these coordinates"""

@property
@abstractmethod
def n_satisfied_constraints(self) -> int:
"""Number of constraints that are satisfied in these coordinates"""

@property
@abstractmethod
def active_indexes(self) -> List[int]:
"""A list of indexes which are active in this coordinate set"""

@property
def active_mol_indexes(self) -> List[int]:
"""Active indexes that are actually atomic coordinates in the molecule"""
return [i for i in self.active_indexes if i < len(self)]

@property
@abstractmethod
def inactive_indexes(self) -> List[int]:
"""A list of indexes which are non-active in this coordinate set"""

@property
@abstractmethod
def cart_proj_g(self) -> Optional[np.ndarray]:
"""
The Cartesian gradient with any constraints projected out
"""

def __eq__(self, other):
"""Coordinates can never be identical..."""
return False
Expand Down
27 changes: 24 additions & 3 deletions autode/opt/coordinates/cartesian.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import numpy as np
from typing import Optional, TYPE_CHECKING
from typing import Optional, List, TYPE_CHECKING

from autode.log import logger
from autode.values import ValueArray
Expand Down Expand Up @@ -45,7 +45,7 @@ def _update_g_from_cart_g(self, arr: Optional["Gradient"]) -> None:
Arguments:
arr: Gradient array
"""
self.g = None if arr is None else np.array(arr).flatten()
self._g = None if arr is None else np.array(arr).flatten()

def _update_h_from_cart_h(self, arr: Optional["Hessian"]) -> None:
"""
Expand All @@ -57,7 +57,24 @@ def _update_h_from_cart_h(self, arr: Optional["Hessian"]) -> None:
Arguments:
arr: Hessian matrix
"""
self.h = None if arr is None else np.array(arr)
assert self.h_or_h_inv_has_correct_shape(arr)
self._h = None if arr is None else np.array(arr)

@property
def n_constraints(self) -> int:
return 0

@property
def n_satisfied_constraints(self) -> int:
return 0

@property
def active_indexes(self) -> List[int]:
return list(range(len(self)))

@property
def inactive_indexes(self) -> List[int]:
return []

def iadd(self, value: np.ndarray) -> OptCoordinates:
return np.ndarray.__iadd__(self, value)
Expand Down Expand Up @@ -96,6 +113,10 @@ def to(self, value: str) -> OptCoordinates:
f"Cannot convert Cartesian coordinates to {value}"
)

@property
def cart_proj_g(self) -> Optional[np.ndarray]:
return self.g

@property
def expected_number_of_dof(self) -> int:
"""Expected number of degrees of freedom for the system"""
Expand Down
Loading
Loading