diff --git a/.coderabbit.yaml b/.coderabbit.yaml new file mode 100644 index 00000000..15cca8ab --- /dev/null +++ b/.coderabbit.yaml @@ -0,0 +1,17 @@ +# yaml-language-server: $schema=https://coderabbit.ai/integrations/schema.v2.json +language: "en-US" +early_access: false +reviews: + request_changes_workflow: false + high_level_summary: false + poem: false + review_status: false + collapse_walkthrough: false + auto_review: + enabled: true + ignore_title_keywords: + - "WIP" + - "DO NOT MERGE" + drafts: false +chat: + auto_reply: true diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 79e5ded1..3bc6315e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,14 +7,14 @@ ci: repos: - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.3.2 + rev: v0.4.3 hooks: - id: ruff - args: [--fix, --ignore, D] + args: [--fix, --ignore, D, --unsafe-fixes] - id: ruff-format - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v4.6.0 hooks: - id: check-yaml exclude: pymatgen/analysis/vesta_cutoffs.yaml @@ -22,7 +22,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.8.0 + rev: v1.10.0 hooks: - id: mypy additional_dependencies: [types-requests] diff --git a/custodian/__init__.py b/custodian/__init__.py index 50b2a3da..e63c7146 100644 --- a/custodian/__init__.py +++ b/custodian/__init__.py @@ -11,7 +11,7 @@ "Shyue Ping Ong, William Davidson Richards, Stephen Dacek, Xiaohui Qu, Matthew Horton, " "Samuel M. Blau, Janosh Riebesell" ) -__version__ = "2024.1.9" +__version__ = "2024.4.18" PKG_DIR = os.path.dirname(__file__) diff --git a/custodian/ansible/actions.py b/custodian/ansible/actions.py index 0082ede9..b555f896 100644 --- a/custodian/ansible/actions.py +++ b/custodian/ansible/actions.py @@ -11,13 +11,13 @@ def get_nested_dict(input_dict, key): """Helper function to interpret a nested dict input.""" current = input_dict - toks = key.split("->") - n = len(toks) - for i, tok in enumerate(toks): + tokens = key.split("->") + n = len(tokens) + for i, tok in enumerate(tokens): if tok not in current and i < n - 1: current[tok] = {} elif i == n - 1: - return current, toks[-1] + return current, tokens[-1] current = current[tok] return None @@ -46,7 +46,7 @@ class DictActions: """ @staticmethod - def set(input_dict, settings, directory=None): + def set(input_dict, settings, directory=None) -> None: """ Sets a value using MongoDB syntax. @@ -55,12 +55,12 @@ def set(input_dict, settings, directory=None): settings (dict): The specification of the modification to be made. directory (None): dummy parameter for compatibility with FileActions """ - for k, v in settings.items(): - (d, key) = get_nested_dict(input_dict, k) - d[key] = v + for key, val in settings.items(): + dct, sub_key = get_nested_dict(input_dict, key) + dct[sub_key] = val @staticmethod - def unset(input_dict, settings, directory=None): + def unset(input_dict, settings, directory=None) -> None: """ Unset a value using MongoDB syntax. @@ -74,7 +74,7 @@ def unset(input_dict, settings, directory=None): del dct[inner_key] @staticmethod - def push(input_dict, settings, directory=None): + def push(input_dict, settings, directory=None) -> None: """ Push to a list using MongoDB syntax. @@ -83,15 +83,15 @@ def push(input_dict, settings, directory=None): settings (dict): The specification of the modification to be made. directory (None): dummy parameter for compatibility with FileActions """ - for k, v in settings.items(): - (d, key) = get_nested_dict(input_dict, k) - if key in d: - d[key].append(v) + for key, val in settings.items(): + dct, sub_key = get_nested_dict(input_dict, key) + if sub_key in dct: + dct[sub_key].append(val) else: - d[key] = [v] + dct[sub_key] = [val] @staticmethod - def push_all(input_dict, settings, directory=None): + def push_all(input_dict, settings, directory=None) -> None: """ Push multiple items to a list using MongoDB syntax. @@ -108,24 +108,24 @@ def push_all(input_dict, settings, directory=None): dct[k2] = val @staticmethod - def inc(input_dict, settings, directory=None): + def inc(input_dict, settings, directory=None) -> None: """ - Increment a value using MongdoDB syntax. + Increment a value using MongoDB syntax. Args: input_dict (dict): The input dictionary to be modified. settings (dict): The specification of the modification to be made. directory (None): dummy parameter for compatibility with FileActions """ - for k, v in settings.items(): - (d, key) = get_nested_dict(input_dict, k) - if key in d: - d[key] += v + for key, val in settings.items(): + dct, sub_key = get_nested_dict(input_dict, key) + if sub_key in dct: + dct[sub_key] += val else: - d[key] = v + dct[sub_key] = val @staticmethod - def rename(input_dict, settings, directory=None): + def rename(input_dict, settings, directory=None) -> None: """ Rename a key using MongoDB syntax. @@ -134,12 +134,12 @@ def rename(input_dict, settings, directory=None): settings (dict): The specification of the modification to be made. directory (None): dummy parameter for compatibility with FileActions """ - for key, v in settings.items(): - if val := input_dict.pop(key, None): - input_dict[v] = val + for key, val in settings.items(): + if input_val := input_dict.pop(key, None): + input_dict[val] = input_val @staticmethod - def add_to_set(input_dict, settings, directory=None): + def add_to_set(input_dict, settings, directory=None) -> None: """ Add to set using MongoDB syntax. @@ -148,17 +148,17 @@ def add_to_set(input_dict, settings, directory=None): settings (dict): The specification of the modification to be made. directory (None): dummy parameter for compatibility with FileActions """ - for k, v in settings.items(): - (d, key) = get_nested_dict(input_dict, k) - if key in d and (not isinstance(d[key], list)): - raise ValueError(f"Keyword {k} does not refer to an array.") - if key in d and v not in d[key]: - d[key].append(v) - elif key not in d: - d[key] = v + for key, val in settings.items(): + dct, sub_key = get_nested_dict(input_dict, key) + if sub_key in dct and (not isinstance(dct[sub_key], list)): + raise ValueError(f"Keyword {key} does not refer to an array.") + if sub_key in dct and val not in dct[sub_key]: + dct[sub_key].append(val) + elif sub_key not in dct: + dct[sub_key] = val @staticmethod - def pull(input_dict, settings, directory=None): + def pull(input_dict, settings, directory=None) -> None: """ Pull an item using MongoDB syntax. @@ -175,7 +175,7 @@ def pull(input_dict, settings, directory=None): dct[k2] = [itm for itm in dct[k2] if itm != val] @staticmethod - def pull_all(input_dict, settings, directory=None): + def pull_all(input_dict, settings, directory=None) -> None: """ Pull multiple items to a list using MongoDB syntax. @@ -191,7 +191,7 @@ def pull_all(input_dict, settings, directory=None): DictActions.pull(input_dict, {key: itm}) @staticmethod - def pop(input_dict, settings, directory=None): + def pop(input_dict, settings, directory=None) -> None: """ Pop item from a list using MongoDB syntax. @@ -200,14 +200,14 @@ def pop(input_dict, settings, directory=None): settings (dict): The specification of the modification to be made. directory (None): dummy parameter for compatibility with FileActions """ - for k, v in settings.items(): - (d, key) = get_nested_dict(input_dict, k) - if key in d and (not isinstance(d[key], list)): - raise ValueError(f"Keyword {k} does not refer to an array.") - if v == 1: - d[key].pop() - elif v == -1: - d[key].pop(0) + for key, val in settings.items(): + dct, sub_key = get_nested_dict(input_dict, key) + if sub_key in dct and (not isinstance(dct[sub_key], list)): + raise ValueError(f"Keyword {key} does not refer to an array.") + if val == 1: + dct[sub_key].pop() + elif val == -1: + dct[sub_key].pop(0) class FileActions: @@ -218,7 +218,7 @@ class FileActions: """ @staticmethod - def file_create(filename, settings, directory): + def file_create(filename, settings, directory) -> None: """ Creates a file. @@ -229,13 +229,13 @@ def file_create(filename, settings, directory): """ if len(settings) != 1: raise ValueError("Settings must only contain one item with key 'content'.") - for k, v in settings.items(): - if k == "content": + for key, val in settings.items(): + if key == "content": with open(filename, "w") as file: - file.write(v) + file.write(val) @staticmethod - def file_move(filename, settings, directory): + def file_move(filename, settings, directory) -> None: """ Moves a file. {'_file_move': {'dest': 'new_file_name'}}. @@ -246,12 +246,12 @@ def file_move(filename, settings, directory): """ if len(settings) != 1: raise ValueError("Settings must only contain one item with key 'dest'.") - for k, v in settings.items(): - if k == "dest": - shutil.move(os.path.join(directory, filename), os.path.join(directory, v)) + for key, val in settings.items(): + if key == "dest": + shutil.move(os.path.join(directory, filename), os.path.join(directory, val)) @staticmethod - def file_delete(filename, settings, directory): + def file_delete(filename, settings, directory) -> None: """ Deletes a file. {'_file_delete': {'mode': "actual"}}. @@ -263,18 +263,18 @@ def file_delete(filename, settings, directory): """ if len(settings) != 1: raise ValueError("Settings must only contain one item with key 'mode'.") - for k, v in settings.items(): - if k == "mode" and v == "actual": + for key, val in settings.items(): + if key == "mode" and val == "actual": try: os.remove(os.path.join(directory, filename)) except OSError: # Skip file not found error. pass - elif k == "mode" and v == "simulated": + elif key == "mode" and val == "simulated": print(f"Simulated removal of {filename}") @staticmethod - def file_copy(filename, settings, directory): + def file_copy(filename, settings, directory) -> None: """ Copies a file. {'_file_copy': {'dest': 'new_file_name'}}. @@ -283,12 +283,12 @@ def file_copy(filename, settings, directory): settings (dict): Must be {"dest": path of new file} directory (str): Directory to copy file to/from """ - for k, v in settings.items(): - if k.startswith("dest"): - shutil.copyfile(os.path.join(directory, filename), os.path.join(directory, v)) + for key, val in settings.items(): + if key.startswith("dest"): + shutil.copyfile(os.path.join(directory, filename), os.path.join(directory, val)) @staticmethod - def file_modify(filename, settings, directory): + def file_modify(filename, settings, directory) -> None: """ Modifies file access. @@ -297,8 +297,9 @@ def file_modify(filename, settings, directory): settings (dict): Can be "mode" or "owners" directory (str): Directory to modify file in """ - for k, v in settings.items(): - if k == "mode": - os.chmod(os.path.join(directory, filename), v) - if k == "owners": - os.chown(os.path.join(directory, filename), v) + for key, val in settings.items(): + if key == "mode": + os.chmod(os.path.join(directory, filename), val) + if key == "owners": + # TODO fix this mypy error, missing 3rd positional argument to chown + os.chown(os.path.join(directory, filename), val) # type: ignore[call-arg] diff --git a/custodian/ansible/interpreter.py b/custodian/ansible/interpreter.py index 82eefd05..261b5ee5 100644 --- a/custodian/ansible/interpreter.py +++ b/custodian/ansible/interpreter.py @@ -30,7 +30,7 @@ class Modder: 'Universe' """ - def __init__(self, actions=None, strict=True, directory="./"): + def __init__(self, actions=None, strict=True, directory="./") -> None: """Initialize a Modder from a list of supported actions. Args: @@ -41,6 +41,8 @@ def __init__(self, actions=None, strict=True, directory="./"): mode, unsupported actions are simply ignored without any errors raised. In strict mode, if an unsupported action is supplied, a ValueError is raised. Defaults to True. + directory (str): The directory containing the files to be modified. + Defaults to "./". """ self.supported_actions = {} actions = actions if actions is not None else [DictActions] @@ -51,7 +53,7 @@ def __init__(self, actions=None, strict=True, directory="./"): self.strict = strict self.directory = directory - def modify(self, modification, obj): + def modify(self, modification, obj) -> None: """ Note that modify makes actual in-place modifications. It does not return a copy. diff --git a/custodian/cli/converge_geometry.py b/custodian/cli/converge_geometry.py index dbc6e2c7..71996dd1 100644 --- a/custodian/cli/converge_geometry.py +++ b/custodian/cli/converge_geometry.py @@ -64,7 +64,7 @@ def get_runs(args): ) -def do_run(args): +def do_run(args) -> None: """Perform the run.""" handlers = [ VaspErrorHandler(), diff --git a/custodian/cli/converge_kpoints.py b/custodian/cli/converge_kpoints.py index 8da9934d..8ef75917 100644 --- a/custodian/cli/converge_kpoints.py +++ b/custodian/cli/converge_kpoints.py @@ -49,7 +49,7 @@ def get_runs(vasp_command, target=1e-3, max_steps=10, mode="linear"): ) -def do_run(args): +def do_run(args) -> None: """Perform the run.""" handlers = [VaspErrorHandler(), UnconvergedErrorHandler()] c = Custodian( @@ -65,7 +65,7 @@ def do_run(args): c.run() -def main(): +def main() -> None: """Main method.""" import argparse diff --git a/custodian/cli/cstdn.py b/custodian/cli/cstdn.py index 53f1b671..88aa4ea7 100644 --- a/custodian/cli/cstdn.py +++ b/custodian/cli/cstdn.py @@ -73,7 +73,7 @@ """ -def run(args): +def run(args) -> None: """Perform a single run.""" FORMAT = "%(asctime)s %(message)s" logging.basicConfig(format=FORMAT, level=logging.INFO, filename="run.log") @@ -83,12 +83,12 @@ def run(args): c.run() -def print_example(args): +def print_example(args) -> None: """Print the example_yaml.""" print(example_yaml) -def main(): +def main() -> None: """Main method.""" parser = argparse.ArgumentParser( description=""" diff --git a/custodian/cli/run_nwchem.py b/custodian/cli/run_nwchem.py index f62bbe7a..5dae45bf 100644 --- a/custodian/cli/run_nwchem.py +++ b/custodian/cli/run_nwchem.py @@ -7,7 +7,7 @@ from custodian.nwchem.jobs import NwchemJob -def do_run(args): +def do_run(args) -> None: """Do the run.""" logging.basicConfig(format="%(asctime)s %(message)s", level=logging.INFO, filename="run.log") job = NwchemJob( @@ -26,7 +26,7 @@ def do_run(args): c.run() -def main(): +def main() -> None: """Main method.""" import argparse diff --git a/custodian/cli/run_vasp.py b/custodian/cli/run_vasp.py index 22f09477..aa940a7c 100644 --- a/custodian/cli/run_vasp.py +++ b/custodian/cli/run_vasp.py @@ -196,7 +196,7 @@ def get_jobs(args): ) -def do_run(args): +def do_run(args) -> None: """Do the run.""" FORMAT = "%(asctime)s %(message)s" logging.basicConfig(format=FORMAT, level=logging.INFO, filename="run.log") @@ -216,7 +216,7 @@ def do_run(args): c.run() -def main(): +def main() -> None: """Main method.""" import argparse diff --git a/custodian/cp2k/handlers.py b/custodian/cp2k/handlers.py index 6cca46c9..c7402889 100644 --- a/custodian/cp2k/handlers.py +++ b/custodian/cp2k/handlers.py @@ -18,6 +18,7 @@ import os import re import time +from typing import ClassVar import numpy as np from monty.os.path import zpath @@ -56,9 +57,13 @@ class StdErrHandler(ErrorHandler): is_monitor = True raises_runtime_error = False - error_msgs = {"seg_fault": ["SIGSEGV"], "out_of_memory": ["insufficient virtual memory"], "abort": ["SIGABRT"]} + error_msgs: ClassVar = { + "seg_fault": ["SIGSEGV"], + "out_of_memory": ["insufficient virtual memory"], + "abort": ["SIGABRT"], + } - def __init__(self, std_err="std_err.txt"): + def __init__(self, std_err="std_err.txt") -> None: """Initialize the handler with the output file to check. Args: @@ -68,7 +73,7 @@ def __init__(self, std_err="std_err.txt"): default redirect used by :class:`custodian.cp2k.jobs.Cp2kJob`. """ self.std_err = std_err - self.errors = set() + self.errors: set[str] = set() def check(self, directory="./"): """Check for error in std_err file.""" @@ -133,7 +138,7 @@ class UnconvergedScfErrorHandler(ErrorHandler): is_monitor = True - def __init__(self, input_file="cp2k.inp", output_file="cp2k.out"): + def __init__(self, input_file="cp2k.inp", output_file="cp2k.out") -> None: """Initialize the error handler from a set of input and output files. Args: @@ -160,9 +165,7 @@ def check(self, directory="./"): # General catch for SCF not converged # TODO: should not-static runs allow for some unconverged scf? Leads to issues in my experience scf = out.data["scf_converged"] or [True] - if not scf[0]: - return True - return False + return bool(not scf[0]) def correct(self, directory="./"): """Apply corrections to aid convergence if possible.""" @@ -401,7 +404,7 @@ class DivergingScfErrorHandler(ErrorHandler): is_monitor = True - def __init__(self, output_file="cp2k.out", input_file="cp2k.inp"): + def __init__(self, output_file="cp2k.out", input_file="cp2k.inp") -> None: """Initializes the error handler from an output files. Args: @@ -463,7 +466,7 @@ class FrozenJobErrorHandler(ErrorHandler): is_monitor = True - def __init__(self, input_file="cp2k.inp", output_file="cp2k.out", timeout=3600): + def __init__(self, input_file="cp2k.inp", output_file="cp2k.out", timeout=3600) -> None: """Initialize the handler with the output file to check. Args: @@ -488,9 +491,7 @@ def check(self, directory="./"): try: out.ran_successfully() # If job finished, then hung, don't need to wait very long to confirm frozen - if time.time() - st.st_mtime > 300: - return True - return False + return time.time() - st.st_mtime > 300 except ValueError: pass @@ -604,7 +605,7 @@ class AbortHandler(ErrorHandler): is_monitor = False is_terminating = True - def __init__(self, input_file="cp2k.inp", output_file="cp2k.out"): + def __init__(self, input_file="cp2k.inp", output_file="cp2k.out") -> None: """ Initialize handler for CP2K abort messages. @@ -618,9 +619,9 @@ def __init__(self, input_file="cp2k.inp", output_file="cp2k.out"): "cholesky": r"(Cholesky decomposition failed. Matrix ill conditioned ?)", "cholesky_scf": r"(Cholesky decompose failed: the matrix is not positive definite or)", } - self.responses = [] + self.responses: list[str] = [] - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for abort messages.""" matches = regrep( os.path.join(directory, self.output_file), @@ -790,7 +791,7 @@ def __init__( pgf_orb_strict=1e-20, eps_default_strict=1e-12, eps_gvg_strict=1e-10, - ): + ) -> None: """ Initialize the error handler. @@ -825,9 +826,7 @@ def check(self, directory="./"): """Check for stuck SCF convergence.""" conv = get_conv(os.path.join(directory, self.output_file)) counts = [len([*group]) for _k, group in itertools.groupby(conv)] - if any(cnt > self.max_same for cnt in counts): - return True - return False + return bool(any(cnt > self.max_same for cnt in counts)) def correct(self, directory="/."): """Correct issue if possible.""" @@ -948,7 +947,7 @@ def __init__( max_iter=20, max_total_iter=200, optimizers=("BFGS", "CG", "BFGS", "CG"), - ): + ) -> None: """ Initialize the error handler. @@ -974,9 +973,7 @@ def check(self, directory="./"): """Check for unconverged geometry optimization.""" o = Cp2kOutput(os.path.join(directory, self.output_file)) o.convergence() - if o.data.get("geo_opt_not_converged"): - return True - return False + return bool(o.data.get("geo_opt_not_converged")) def correct(self, directory): """Correct issue if possible.""" @@ -1034,7 +1031,7 @@ class WalltimeHandler(ErrorHandler): raises_runtime_error = False is_terminating = False - def __init__(self, output_file="cp2k.out", enable_checkpointing=True): + def __init__(self, output_file="cp2k.out", enable_checkpointing=True) -> None: """ Args: output_file (str): name of the cp2k output file @@ -1046,15 +1043,15 @@ def __init__(self, output_file="cp2k.out", enable_checkpointing=True): def check(self, directory="./"): """Check if internal CP2K walltime handler was tripped.""" - if regrep( - filename=os.path.join(directory, self.output_file), - patterns={"walltime": r"(exceeded requested execution time)"}, - reverse=True, - terminate_on_match=True, - postprocess=bool, - ).get("walltime"): - return True - return False + return bool( + regrep( + filename=os.path.join(directory, self.output_file), + patterns={"walltime": "(exceeded requested execution time)"}, + reverse=True, + terminate_on_match=True, + postprocess=bool, + ).get("walltime") + ) def correct(self, directory="./"): """Dump checkpoint info if requested.""" diff --git a/custodian/cp2k/interpreter.py b/custodian/cp2k/interpreter.py index 32bf0a0e..7baa1ffd 100644 --- a/custodian/cp2k/interpreter.py +++ b/custodian/cp2k/interpreter.py @@ -20,7 +20,7 @@ class Cp2kModder(Modder): also supports modifications that are file operations (e.g. copying). """ - def __init__(self, filename="cp2k.inp", actions=None, strict=True, ci=None, directory="./"): + def __init__(self, filename="cp2k.inp", actions=None, strict=True, ci=None, directory="./") -> None: """Initialize a Modder for Cp2kInput sets. Args: @@ -35,7 +35,8 @@ def __init__(self, filename="cp2k.inp", actions=None, strict=True, ci=None, dire supplied, a ValueError is raised. Defaults to True. ci (Cp2kInput): A Cp2kInput object from the current directory. Initialized automatically if not passed (but passing it will - avoid having to reparse the directory). + avoid having to re-parse the directory). + directory (str): The directory containing the Cp2kInput set. Defaults to "./". """ self.directory = directory self.ci = ci or Cp2kInput.from_file(os.path.join(self.directory, filename)) @@ -43,7 +44,7 @@ def __init__(self, filename="cp2k.inp", actions=None, strict=True, ci=None, dire actions = actions or [FileActions, DictActions] super().__init__(actions, strict) - def apply_actions(self, actions): + def apply_actions(self, actions) -> None: """ Applies a list of actions to the CP2K Input Set and rewrites modified files. @@ -68,7 +69,7 @@ def apply_actions(self, actions): self.ci.write_file(os.path.join(self.directory, self.filename)) @staticmethod - def _modify(modification, obj): + def _modify(modification, obj) -> None: """ Note that modify makes actual in-place modifications. It does not return a copy. diff --git a/custodian/cp2k/jobs.py b/custodian/cp2k/jobs.py index ccc1a7e5..a1f7330a 100644 --- a/custodian/cp2k/jobs.py +++ b/custodian/cp2k/jobs.py @@ -39,7 +39,7 @@ def __init__( backup=True, settings_override=None, restart=False, - ): + ) -> None: """ This constructor is necessarily complex due to the need for flexibility. For standard kinds of runs, it's often better to use one @@ -77,10 +77,10 @@ def __init__( self.final = final self.backup = backup self.suffix = suffix - self.settings_override = settings_override if settings_override else [] + self.settings_override = settings_override or [] self.restart = restart - def setup(self, directory="./"): + def setup(self, directory="./") -> None: """ Performs initial setup for Cp2k in three stages. First, if custodian is running in restart mode, then the restart function will copy the restart file to self.input_file, and remove any previous WFN initialization @@ -128,7 +128,7 @@ def run(self, directory="./"): # TODO double jobs, file manipulations, etc. should be done in atomate in the future # and custodian should only run the job itself - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: """Postprocessing includes renaming and gzipping where necessary.""" files = os.listdir(directory) if os.path.isfile(self.output_file) and self.suffix != "": @@ -147,7 +147,7 @@ def postprocess(self, directory="./"): if os.path.isfile(os.path.join(directory, "continue.json")): os.remove(os.path.join(directory, "continue.json")) - def terminate(self, directory="./"): + def terminate(self, directory="./") -> None: """Terminate cp2k.""" for cmd in self.cp2k_cmd: if "cp2k" in cmd: diff --git a/custodian/cp2k/utils.py b/custodian/cp2k/utils.py index 0a0a5eb9..c0bfa4ae 100644 --- a/custodian/cp2k/utils.py +++ b/custodian/cp2k/utils.py @@ -8,7 +8,7 @@ from pymatgen.io.cp2k.outputs import Cp2kOutput -def restart(actions, output_file, input_file, no_actions_needed=False): +def restart(actions, output_file, input_file, no_actions_needed=False) -> None: """ Helper function. To discard old restart if convergence is already good, and copy the restart file to the input file. Restart also supports switching back and forth @@ -51,7 +51,7 @@ def restart(actions, output_file, input_file, no_actions_needed=False): # TODO Not sure I like this solution -def cleanup_input(ci): +def cleanup_input(ci) -> None: """ Intention is to use this to remove problematic parts of the input file. @@ -67,7 +67,7 @@ def cleanup_input(ci): cleanup_input(val) -def activate_ot(actions, ci): +def activate_ot(actions, ci) -> None: """ Activate OT scheme. @@ -118,7 +118,7 @@ def activate_ot(actions, ci): actions += ot_actions -def activate_diag(actions): +def activate_diag(actions) -> None: """ Activate diagonalization. @@ -164,14 +164,12 @@ def can_use_ot(output, ci, minimum_band_gap=0.1): minimum_band_gap (float): the minimum band gap for OT """ output.parse_dos() - if ( + return bool( not ci.check("FORCE_EVAL/DFT/SCF/OT") and not ci.check("FORCE_EVAL/DFT/KPOINTS") and output.band_gap and output.band_gap > minimum_band_gap - ): - return True - return False + ) def tail(filename, n=10): diff --git a/custodian/cp2k/validators.py b/custodian/cp2k/validators.py index 70c2af11..741fb138 100644 --- a/custodian/cp2k/validators.py +++ b/custodian/cp2k/validators.py @@ -1,5 +1,7 @@ """Validators for CP2K calculations.""" +from __future__ import annotations + import os from abc import abstractmethod, abstractproperty @@ -39,7 +41,7 @@ def no_children(self, directory="./"): class Cp2kOutputValidator(Cp2kValidator): """Checks that a valid cp2k output file was generated.""" - def __init__(self, output_file="cp2k.out"): + def __init__(self, output_file="cp2k.out") -> None: """ Args: output_file (str): cp2k output file to analyze. @@ -47,7 +49,7 @@ def __init__(self, output_file="cp2k.out"): self.output_file = output_file self._check = False - def check(self, directory="./"): + def check(self, directory="./") -> bool | None: """ Check for valid output. Checks that the end of the program was reached, and that convergence was @@ -72,16 +74,16 @@ def check(self, directory="./"): return True @property - def kill(self, directory="./"): + def kill(self, directory="./") -> bool: """Kill the job with raise error.""" return True @property - def exit(self, directory="./"): + def exit(self, directory="./") -> bool: """Don't raise error, but exit the job.""" return True @property - def no_children(self, directory="./"): + def no_children(self, directory="./") -> bool: """Job should not have children.""" return True diff --git a/custodian/custodian.py b/custodian/custodian.py index a2b5e3e8..0441d342 100644 --- a/custodian/custodian.py +++ b/custodian/custodian.py @@ -4,6 +4,8 @@ ErrorHandlers and Jobs. """ +from __future__ import annotations + import datetime import logging import os @@ -124,7 +126,7 @@ def __init__( terminate_on_nonzero_returncode=True, directory=None, **kwargs, - ): + ) -> None: """Initialize a Custodian from a list of jobs and error handlers. Args: @@ -210,12 +212,12 @@ def __init__( def _load_checkpoint(directory): restart = 0 run_log = [] - chkpts = glob(os.path.join(directory, "custodian.chk.*.tar.gz")) - if chkpts: - chkpt = sorted(chkpts, key=lambda c: int(c.split(".")[-3]))[0] - restart = int(chkpt.split(".")[-3]) - logger.info(f"Loading from checkpoint file {chkpt}...") - with tarfile.open(chkpt) as file: + chk_pts = glob(os.path.join(directory, "custodian.chk.*.tar.gz")) + if chk_pts: + chk_pt = min(chk_pts, key=lambda c: int(c.split(".")[-3])) + restart = int(chk_pt.split(".")[-3]) + logger.info(f"Loading from checkpoint file {chk_pt}...") + with tarfile.open(chk_pt) as file: def is_within_directory(directory, target): abs_directory = os.path.abspath(directory) @@ -225,7 +227,7 @@ def is_within_directory(directory, target): return prefix == abs_directory - def safe_extract(tar, path=directory, members=None, *, numeric_owner=False): + def safe_extract(tar, path=directory, members=None, *, numeric_owner=False) -> None: for member in tar.getmembers(): member_path = os.path.join(path, member.name) if not is_within_directory(path, member_path): @@ -240,12 +242,12 @@ def safe_extract(tar, path=directory, members=None, *, numeric_owner=False): return restart, run_log @staticmethod - def _delete_checkpoints(directory): + def _delete_checkpoints(directory) -> None: for file in glob(os.path.join(directory, "custodian.chk.*.tar.gz")): os.remove(file) @staticmethod - def _save_checkpoint(directory, index): + def _save_checkpoint(directory, index) -> None: try: Custodian._delete_checkpoints(directory) n = os.path.join(directory, f"custodian.chk.{index}.tar.gz") @@ -379,7 +381,7 @@ def run(self): try: # skip jobs until the restart - for job_n, job in islice(enumerate(self.jobs, 1), self.restart, None): + for job_n, job in islice(enumerate(self.jobs, start=1), self.restart, None): self._run_job(job_n, job) # We do a dump of the run log after each job. dumpfn(self.run_log, os.path.join(self.directory, Custodian.LOG_FILE), cls=MontyEncoder, indent=4) @@ -408,7 +410,7 @@ def run(self): return self.run_log - def _run_job(self, job_n, job): + def _run_job(self, job_n, job) -> None: """ Runs a single job. @@ -653,16 +655,14 @@ def _do_check(self, handlers, terminate_func=None): handler.max_num_corrections is not None and handler.n_applied_corrections >= handler.max_num_corrections ): - msg = ( - f"Maximum number of corrections {handler.max_num_corrections} reached for handler {handler}" - ) + msg = f"Maximum number of corrections {handler.max_num_corrections} reached for {handler=}" if handler.raise_on_max: self.run_log[-1]["handler"] = handler self.run_log[-1]["max_errors_per_handler"] = True raise MaxCorrectionsPerHandlerError( msg, raises=True, max_errors_per_handler=handler.max_num_corrections, handler=handler ) - logger.warning(msg + " Correction not applied.") + logger.warning(f"{msg} Correction not applied.") continue if terminate_func is not None and handler.is_terminating: logger.info("Terminating job") @@ -679,9 +679,9 @@ def _do_check(self, handlers, terminate_func=None): raise import traceback - logger.error(f"Bad handler {handler}") + logger.error(f"Bad {handler=}") logger.error(traceback.format_exc()) - corrections.append({"errors": [f"Bad handler {handler}"], "actions": []}) + corrections.append({"errors": [f"Bad {handler=}"], "actions": []}) self.total_errors += len(corrections) self.errors_current_job += len(corrections) self.run_log[-1]["corrections"] += corrections @@ -717,7 +717,7 @@ def postprocess(self, directory="./"): etc. """ - def terminate(self, directory="./"): + def terminate(self, directory="./") -> None: """Implement termination function.""" return @@ -760,7 +760,7 @@ class ErrorHandler(MSONable): "actions":[]) """ - max_num_corrections = None + max_num_corrections: int | None = None raise_on_max = False """ Whether corrections from this specific handler should be applied only a @@ -813,7 +813,7 @@ def n_applied_corrections(self): return self._num_applied_corrections @n_applied_corrections.setter - def n_applied_corrections(self, value): + def n_applied_corrections(self, value) -> None: """ Setter for the number of corrections applied. @@ -844,7 +844,7 @@ def check(self, directory="./"): class CustodianError(RuntimeError): """Exception class for Custodian errors.""" - def __init__(self, message, raises=False): + def __init__(self, message, raises=False) -> None: """Initialize the error with a message. Args: @@ -859,7 +859,7 @@ def __init__(self, message, raises=False): class ValidationError(CustodianError): """Error raised when a validator does not pass the check.""" - def __init__(self, message, raises, validator): + def __init__(self, message, raises, validator) -> None: """ Args: message (str): Message passed to Exception @@ -873,7 +873,7 @@ def __init__(self, message, raises, validator): class NonRecoverableError(CustodianError): """Error raised when a handler found an error but could not fix it.""" - def __init__(self, message, raises, handler): + def __init__(self, message, raises, handler) -> None: """ Args: message (str): Message passed to Exception @@ -891,7 +891,7 @@ class ReturnCodeError(CustodianError): class MaxCorrectionsError(CustodianError): """Error raised when the maximum allowed number of errors is reached.""" - def __init__(self, message, raises, max_errors): + def __init__(self, message, raises, max_errors) -> None: """ Args: message (str): Message passed to Exception @@ -905,7 +905,7 @@ def __init__(self, message, raises, max_errors): class MaxCorrectionsPerJobError(CustodianError): """Error raised when the maximum allowed number of errors per job is reached.""" - def __init__(self, message, raises, max_errors_per_job, job): + def __init__(self, message, raises, max_errors_per_job, job) -> None: """ Args: message (str): Message passed to Exception diff --git a/custodian/feff/handlers.py b/custodian/feff/handlers.py index 3ee52158..55b6bbab 100644 --- a/custodian/feff/handlers.py +++ b/custodian/feff/handlers.py @@ -1,5 +1,7 @@ """This module implements specific error handler for FEFF runs.""" +from __future__ import annotations + import logging import os import re @@ -35,7 +37,7 @@ class UnconvergedErrorHandler(ErrorHandler): is_monitor = False - def __init__(self, output_filename="log1.dat"): + def __init__(self, output_filename="log1.dat") -> None: """Initialize the handler with the output file to check. Args: @@ -52,7 +54,7 @@ def check(self, directory="./"): """ return self._notconverge_check(directory) - def _notconverge_check(self, directory): + def _notconverge_check(self, directory) -> bool | None: # Process the output file and get converge information not_converge_pattern = re.compile("Convergence not reached.*") converge_pattern = re.compile("Convergence reached.*") diff --git a/custodian/feff/interpreter.py b/custodian/feff/interpreter.py index 2192ab8e..df44cdbd 100644 --- a/custodian/feff/interpreter.py +++ b/custodian/feff/interpreter.py @@ -11,8 +11,9 @@ class FeffModder(Modder): """A Modder for FeffInput sets.""" - def __init__(self, actions=None, strict=True, feffinp=None, directory="./"): - """ + def __init__(self, actions=None, strict=True, feffinp=None, directory="./") -> None: + """Initialize a FeffModder. + Args: actions ([Action]): A sequence of supported actions. See actions ([Action]): A sequence of supported actions. See @@ -24,8 +25,8 @@ def __init__(self, actions=None, strict=True, feffinp=None, directory="./"): supplied, a ValueError is raised. Defaults to True. feffinp (FEFFInput): A FeffInput object from the current directory. Initialized automatically if not passed (but passing it will - avoid having to reparse the directory). - directory (str): Directory to run in + avoid having to re-parse the directory). + directory (str): The directory containing the FeffInput set. Defaults to "./". """ self.directory = directory self.feffinp = feffinp or FEFFDictSet.from_directory(self.directory) @@ -33,7 +34,7 @@ def __init__(self, actions=None, strict=True, feffinp=None, directory="./"): actions = actions or [FileActions, DictActions] super().__init__(actions, strict) - def apply_actions(self, actions): + def apply_actions(self, actions) -> None: """ Applies a list of actions to the FEFF Input Set and rewrites modified files. @@ -46,9 +47,9 @@ def apply_actions(self, actions): modified = [] for action in actions: if "dict" in action: - k = action["dict"] - modified.append(k) - self.feffinp[k] = self.modify_object(action["action"], self.feffinp[k]) + key = action["dict"] + modified.append(key) + self.feffinp[key] = self.modify_object(action["action"], self.feffinp[key]) elif "file" in action: self.modify(action["action"], action["file"]) else: @@ -58,9 +59,9 @@ def apply_actions(self, actions): feff_input = "\n\n".join( str(feff[key]) for key in ("HEADER", "PARAMETERS", "POTENTIALS", "ATOMS") if key in feff ) - for k, v in feff.items(): - with open(os.path.join(self.directory, k), "w") as file: - file.write(str(v)) + for key, val in feff.items(): + with open(os.path.join(self.directory, key), "w") as file: + file.write(str(val)) with open(os.path.join(self.directory, "feff.inp"), "w") as file: file.write(feff_input) diff --git a/custodian/feff/jobs.py b/custodian/feff/jobs.py index 8796245f..449944a3 100644 --- a/custodian/feff/jobs.py +++ b/custodian/feff/jobs.py @@ -34,7 +34,7 @@ def __init__( backup=True, gzipped=False, gzipped_prefix="feff_out", - ): + ) -> None: """ This constructor is used for a standard FEFF initialization. @@ -85,7 +85,7 @@ def run(self, directory="./"): # On TSCC, need to run shell command return subprocess.Popen(self.feff_cmd, cwd=directory, stdout=f_std, stderr=f_err, shell=True) # pylint: disable=R1732 - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: """Renaming or gzipping all the output as needed.""" if self.gzipped: backup("*", directory=directory, prefix=self.gzipped_prefix) diff --git a/custodian/gaussian/__init__.py b/custodian/gaussian/__init__.py new file mode 100644 index 00000000..cd7a1386 --- /dev/null +++ b/custodian/gaussian/__init__.py @@ -0,0 +1,8 @@ +"""This package implements various Gaussian Jobs and Error Handlers.""" + +__author__ = "Rasha Atwi" +__version__ = "0.1" +__maintainer__ = "Rasha Atwi" +__email__ = "rasha.atwi@stonybrook.edu" +__status__ = "Alpha" +__date__ = "5/13/21" diff --git a/custodian/gaussian/handlers.py b/custodian/gaussian/handlers.py new file mode 100644 index 00000000..70b2d546 --- /dev/null +++ b/custodian/gaussian/handlers.py @@ -0,0 +1,893 @@ +"""This module implements error handlers for Gaussian runs.""" + +from __future__ import annotations + +import datetime +import glob +import logging +import math +import os +import re +import shutil +from typing import TYPE_CHECKING, Any, ClassVar + +import numpy as np +from monty.io import zopen +from pymatgen.io.gaussian import GaussianInput, GaussianOutput + +from custodian.custodian import ErrorHandler +from custodian.utils import backup + +if TYPE_CHECKING: + from collections.abc import Iterable + +__author__ = "Rasha Atwi" +__version__ = "0.1" +__maintainer__ = "Rasha Atwi" +__email__ = "rasha.atwi@stonybrook.edu" +__status__ = "Alpha" +__date__ = "5/13/21" + +BACKUP_FILES = { + "checkpoint": "*.[Cc][Hh][Kk]", + "form_checkpoint": "*.[Ff][Cc][Hh][Kk]", + "rwf": "*.[Rr][Ww][Ff]", + "inp": "*.[Ii][Nn][Pp]", + "int": "*.[Ii][Nn][Tt]", + "d2e": "*.[Dd]2[Ee]", + "skr": "*.[Ss][Kk][Rr]", + "convergence": "convergence.png", +} + + +class GaussianErrorHandler(ErrorHandler): + """ + Master GaussianErrorHandler class that handles a number of common errors that occur + during Gaussian runs. + """ + + # definition of job errors as they appear in Gaussian output file + error_defs: ClassVar = { + "Optimization stopped": "opt_steps", + "Convergence failure": "scf_convergence", + "FormBX had a problem": "linear_bend", + "Linear angle in Tors.": "linear_bend", + "Inv3 failed in PCMMkU": "solute_solvent_surface", + "Error in internal coordinate system": "internal_coords", + "End of file in ZSymb": "zmatrix", + "There are no atoms in this input structure !": "missing_mol", + "Atom specifications unexpectedly found in input stream.": "found_coords", + "End of file reading connectivity.": "coords", + "FileIO operation on non-existent file.": "missing_file", + "No data on chk file.": "empty_file", + "Bad file opened by FileIO": "bad_file", + "Z-matrix optimization but no Z-matrix variables.": "coord_inputs", + "A syntax error was detected in the input line.": "syntax", + r"The combination of multiplicity ([0-9]+) and \s+? ([0-9]+) electrons is impossible.": "charge", + "Out-of-memory error in routine": "insufficient_mem", + } + + error_patt = re.compile("|".join(list(error_defs))) + recom_mem_patt = re.compile( + r"Use %mem=([0-9]+)MW to provide the minimum amount of memory required to complete this step." + ) + conv_criteria: ClassVar = { + "max_force": re.compile(r"\s+(Maximum Force)\s+(-?\d+.?\d*|.*)\s+(-?\d+.?\d*)"), + "rms_force": re.compile(r"\s+(RMS {5}Force)\s+(-?\d+.?\d*|.*)\s+(-?\d+.?\d*)"), + "max_disp": re.compile(r"\s+(Maximum Displacement)\s+(-?\d+.?\d*|.*)\s+(-?\d+.?\d*)"), + "rms_disp": re.compile(r"\s+(RMS {5}Displacement)\s+(-?\d+.?\d*|.*)\s+(-?\d+.?\d*)"), + } + + grid_patt = re.compile(r"(-?\d{5})") + GRID_NAMES = ( + "finegrid", + "fine", + "superfinegrid", + "superfine", + "coarsegrid", + "coarse", + "sg1grid", + "sg1", + "pass0grid", + "pass0", + ) + MEM_UNITS = ("kb", "mb", "gb", "tb", "kw", "mw", "gw", "tw") + + activate_better_guess = False + + def __init__( + self, + input_file: str, + output_file: str, + stderr_file: str = "stderr.txt", + cart_coords: bool = True, + scf_max_cycles: int = 100, + opt_max_cycles: int = 100, + job_type: str = "normal", + lower_functional: str | None = None, + lower_basis_set: str | None = None, + prefix: str = "error", + check_convergence: bool = True, + ): + """ + Initialize the GaussianErrorHandler class. + + Args: + input_file (str): The name of the input file for the Gaussian job. + output_file (str): The name of the output file for the Gaussian job. + stderr_file (str): The name of the standard error file for the Gaussian job. + Defaults to 'stderr.txt'. + cart_coords (bool): Whether the coordinates are in cartesian format. + Defaults to True. + scf_max_cycles (int): The maximum number of SCF cycles. Defaults to 100. + opt_max_cycles (int): The maximum number of optimization cycles. Defaults to + 100. + job_type (str): The type of job to run. Supported options are 'normal' and + 'better_guess'. Defaults to 'normal'. If 'better_guess' is chosen, the + job will be rerun at a lower level of theory to get a better initial + guess of molecular orbitals or geometry, if needed. + lower_functional (str): The lower level of theory to use for a better guess. + lower_basis_set (str): The lower basis set to use for a better guess. + prefix (str): The prefix to use for the backup files. Defaults to error, + which means a series of error.1.tar.gz, error.2.tar.gz, ... will be + generated. + check_convergence (bool): Whether to check for convergence during an + optimization job. Defaults to True. If True, the convergence data will + be monitored and plotted (convergence criteria versus cycle number) and + saved to a file called 'convergence.png'. + """ + self.input_file = input_file + self.output_file = output_file + self.stderr_file = stderr_file + self.cart_coords = cart_coords + self.errors: set[str] = set() + self.gout: GaussianOutput = None + self.gin: GaussianInput = None + self.scf_max_cycles = scf_max_cycles + self.opt_max_cycles = opt_max_cycles + self.job_type = job_type + self.lower_functional = lower_functional + self.lower_basis_set = lower_basis_set + self.prefix = prefix + self.check_convergence = check_convergence + self.conv_data: dict[str, dict[str, Any]] = {} + self.recom_mem: float | None = None + self.logger: logging.Logger = logging.getLogger(self.__class__.__name__) + logging.basicConfig(level=logging.INFO) + + @staticmethod + def _recursive_lowercase(obj: dict[str, Any] | str | Iterable[Any]) -> dict[str, Any] | str | Iterable[Any]: + """ + Recursively convert all string elements in a given object to lowercase. + + This method iterates over the input object. If the object is a dictionary, it + converts all its string keys and values to lowercase, applying the same logic + recursively to the values. If the object is a string, it directly converts it + to lowercase. If the object is iterable (but not a string or dictionary), it + applies the same lowercase conversion to each element in the iterable. For all + other types, the object is returned unchanged. + + Args: + obj (dict | str | iterable): The object to be converted to lowercase. + This can be a dictionary, a string, or any iterable collection. + Non-iterable objects or non-string elements within iterables are + returned unchanged. + + Returns: + dict | str | iterable: + A new object with all string elements converted to + lowercase. The type of the returned object matches the type of the + input `obj`. + """ + if isinstance(obj, dict): + return {k.lower(): GaussianErrorHandler._recursive_lowercase(v) for k, v in obj.items()} + if isinstance(obj, str): + return obj.lower() + if hasattr(obj, "__iter__"): + return [GaussianErrorHandler._recursive_lowercase(i) for i in obj] + return obj + + @staticmethod + def _recursive_remove_space(obj: dict[str, Any]) -> dict[str, Any]: + """ + Recursively remove leading and trailing whitespace from keys and string values + in a dictionary. + + This method processes each key-value pair in the given dictionary. If a value + is a string, it strips leading and trailing whitespace from it. If a value is + a dictionary, it applies the same stripping process recursively to that + dictionary. The keys of the dictionary are also stripped of leading and trailing + whitespace. Non-string values are included in the output without modification. + + Args: + obj (dict): The dictionary whose keys and string values will have whitespace + removed. It can be nested, with dictionaries as values, which will + also be processed. + + Returns: + dict: + A new dictionary with all keys and string values stripped of leading + and trailing whitespace. The structure of the dictionary is preserved. + """ + return { + key.strip(): GaussianErrorHandler._recursive_remove_space(value) + if isinstance(value, dict) + else value.strip() + if isinstance(value, str) + else value + for key, value in obj.items() + } + + @staticmethod + def _update_route_params(route_params: dict, key: str, value: str | dict) -> dict: + """ + Update Gaussian route parameters with new key-value pairs, handling nested + structures. + + Args: + route_params (dict): The dictionary of route parameters to be updated. + key (str): The key in the route parameters to update or add. + value (str | dict): The new value to set or add to the route parameters. + This can be a string or a dictionary. If it is a dictionary, it is + merged with the existing dictionary at `key`. + + Returns: + dict: + The updated route parameters. + """ + obj = route_params.get(key, {}) + if not obj: + route_params[key] = value + elif isinstance(obj, str): + update = {key: {obj: None, **value}} if isinstance(value, dict) else {key: {obj: None, value: None}} + route_params.update(update) + elif isinstance(obj, dict): + route_params[key].update(value if isinstance(value, dict) else {value: None}) + return route_params + + @staticmethod + def _int_keyword(route_params: dict[str, str | dict]) -> tuple[str, str | dict]: + """ + Determine the keyword used for 'Integral' in the Gaussian route parameters of + the input file. Possible keywords are 'int' and 'integral'. If neither keyword + is found, an empty string is returned. + + Args: + route_params (dict): The route parameters dictionary. + + Returns: + tuple: + The key ('int' or 'integral' or an empty string if neither is found), + and the value associated with this key in `route_params`. If the key is + not found, the second element in the tuple is an empty string. + """ + if "int" in route_params: + int_key = "int" + elif "integral" in route_params: + int_key = "integral" + else: + int_key = "" + # int_key = 'int' if 'int' in route_params else 'integral' + return int_key, route_params.get(int_key, "") + + @staticmethod + def _int_grid(route_params: dict[str, str | dict]) -> bool: + """ + Check if the integration grid used for numerical integrations matches specific + options. + + Args: + route_params (dict): The route parameters dictionary. + + Returns: + bool: + True if the integral grid parameter matches one of the predefined + options, otherwise False. + """ + _, int_value = GaussianErrorHandler._int_keyword(route_params) + options = ["ultrafine", "ultrafinegrid", "99590"] + + if isinstance(int_value, str) and int_value in options: + return True + if isinstance(int_value, dict): + if int_value.get("grid") in options: + return True + if set(int_value) & set(options): + return True + return False + + @staticmethod + def convert_mem(mem: float, unit: str) -> float: + """ + Convert memory size between different units to megabytes (MB). + + Args: + mem (float): The memory size to convert. + unit (str): The unit of the input memory size. Supported units include + 'kb', 'mb', 'gb', 'tb', and word units ('kw', 'mw', 'gw', 'tw'), or an + empty string for default conversion (from words). + + Returns: + float: + The memory size in MB. + """ + conversion = { + "kb": 1 / 1000, + "mb": 1, + "gb": 1000, + "tb": 1000**2, + "": 7.63e-6, + "kw": 7.63e-3, + "mw": 7.63, + "gw": 7.63e3, + "tw": 7.63e6, + } + return mem * conversion[unit] + + @staticmethod + def _find_dynamic_memory_allocated(link0_params: dict[str, str]) -> tuple[str | None, float | None]: + """ + Find and convert the memory allocation from Gaussian link0 parameters. This + method searches for the '%mem' key in the link0 parameters of a Gaussian job + to determine the memory allocated for the job. It extracts the memory value + and its unit, then converts the memory allocation to MB. The default memory + unit used in Gaussian is words, and this method accounts for different units + specified in the memory string. + + Args: + link0_params (dict): A dictionary of link0 parameters from a Gaussian input + file. + + Returns: + tuple: + The memory key (None if '%mem' is not found) and the converted memory + allocation in MB. If '%mem' is not found, the second element will be None. + """ + mem_key = None + dynamic_mem = None + for k in link0_params: + if k.lower() == "%mem": + mem_key = k + break + if mem_key: + dynamic_mem_str = link0_params[mem_key] + # default memory unit in Gaussian is words + dynamic_mem_str = dynamic_mem_str.lower() + mem_unit = "" + for unit in GaussianErrorHandler.MEM_UNITS: + if unit in dynamic_mem_str: + mem_unit = unit + break + dynamic_mem = float(dynamic_mem_str.strip(mem_unit)) + dynamic_mem = GaussianErrorHandler.convert_mem(dynamic_mem, mem_unit) + return mem_key, dynamic_mem + + def _add_int(self) -> bool: + """ + Check and update the integration grid setting ('int') in the Gaussian input + file's route parameters to 'ultrafine', if necessary. + + Returns: + bool: True if changes were made to the integration grid setting, False otherwise. + """ + if not GaussianErrorHandler._int_grid(self.gin.route_parameters): + # nothing int is set or is set to different values + warning_msg = ( + "Changing the numerical integration grid. " + "This will bring changes in the predicted " + "total energy. It is necessary to use the same " + "integration grid in all the calculations in " + "the same study in order for the computed " + "energies and molecular properties to be " + "comparable." + ) + + int_key, int_value = GaussianErrorHandler._int_keyword(self.gin.route_parameters) + if not int_value and GaussianErrorHandler._not_g16(self.gout): + # if int keyword is missing and Gaussian version is 03 or + # 09, set integration grid to ultrafine + int_key = int_key or "int" + self.logger.warning(warning_msg) + self.gin.route_parameters[int_key] = "ultrafine" + return True + if isinstance(int_value, dict): + # if int grid is set and is different from ultrafine, + # set it to ultrafine (works when others int options are + # specified) + flag = False + if "grid" in self.gin.route_parameters[int_key]: + flag = True + for key in self.gin.route_parameters[int_key]: + if key in self.GRID_NAMES or self.grid_patt.match(key): + self.gin.route_parameters[int_key].pop(key) + flag = True + break + if flag or GaussianErrorHandler._not_g16(self.gout): + self.logger.warning(warning_msg) + self.gin.route_parameters[int_key]["grid"] = "ultrafine" + return True + if isinstance(int_value, str) and (int_value in self.GRID_NAMES or self.grid_patt.match(int_value)): + # if int grid is set and is different from ultrafine, + # set it to ultrafine (works when no other int options + # are specified) + self.logger.warning(warning_msg) + self.gin.route_parameters[int_key] = "ultrafine" + return True + if GaussianErrorHandler._not_g16(self.gout): + # if int grid is not specified, and Gaussian version is + # not 16, update with ultrafine integral grid + self.logger.warning(warning_msg) + GaussianErrorHandler._update_route_params(self.gin.route_parameters, int_key, {"grid": "ultrafine"}) + return True + return False + return False + + @staticmethod + def _not_g16(gout: GaussianOutput) -> bool: + """ + Determine if the Gaussian version is not 16. + + Args: + gout (GaussianOutput): A GaussianOutput object. + + Returns: + bool: True if the Gaussian version is not 16, False otherwise. + """ + return "16" not in gout.version + + @staticmethod + def _monitor_convergence(data: dict[str, dict[str, Any]], directory: str = "./") -> None: + """ + Plot and save a convergence graph for an optimization job as a function of the + number of iterations. + + Parameters: + data (dict): A dictionary containing two keys: 'values' and 'thresh'. 'values' + is a dictionary where each key-value pair represents a parameter and its + values across iterations. 'thresh' is a dictionary where each key-value pair + represents a parameter and its threshold value. The convergence parameters + are: 'max_force', 'rms_force', 'max_disp', and 'rms_disp'. + directory (str, optional): The directory where the convergence plot image will + be saved. Defaults to "./". + """ + import matplotlib.pyplot as plt + from matplotlib.ticker import MaxNLocator + + _fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(12, 10)) + for i, (k, v) in enumerate(data["values"].items()): + row = int(np.floor(i / 2)) + col = i % 2 + iters = range(len(v)) + ax[row, col].plot(iters, v, color="#cf3759", linewidth=2) + ax[row, col].axhline(y=data["thresh"][k], linewidth=2, color="black", linestyle="--") + ax[row, col].tick_params(which="major", length=8) + ax[row, col].tick_params(axis="both", which="both", direction="in", labelsize=16) + ax[row, col].set_xlabel("Iteration", fontsize=16) + ax[row, col].set_ylabel(f"{k}", fontsize=16) + ax[row, col].xaxis.set_major_locator(MaxNLocator(integer=True)) + ax[row, col].grid(ls="--", zorder=1) + plt.tight_layout() + plt.savefig(os.path.join(directory, "convergence.png")) + + def check(self, directory: str = "./") -> bool: + """Check for errors in the Gaussian output file.""" + # TODO: this backups the original file instead of the actual one + if "linear_bend" in self.errors: + os.rename( + os.path.join(directory, self.input_file + ".prev"), + os.path.join(directory, self.input_file), + ) + + self.gin = GaussianInput.from_file(os.path.join(directory, self.input_file)) + self.gin.route_parameters = GaussianErrorHandler._recursive_lowercase(self.gin.route_parameters) + assert isinstance(self.gin.route_parameters, dict) + self.gin.route_parameters = GaussianErrorHandler._recursive_remove_space(self.gin.route_parameters) + self.gout = GaussianOutput(os.path.join(directory, self.output_file)) + self.errors = set() + error_patts = set() + # TODO: move this to pymatgen? + self.conv_data = {"values": {}, "thresh": {}} + with zopen(os.path.join(directory, self.output_file)) as f: + for line in f: + error_match = GaussianErrorHandler.error_patt.search(line) + mem_match = GaussianErrorHandler.recom_mem_patt.search(line) + if error_match: + patt = error_match.group(0) + error_patts.add(patt) + for pattern, error_key in GaussianErrorHandler.error_defs.items(): + if re.match(pattern, patt): + self.errors.add(error_key) + break + # self.errors.add(GaussianErrorHandler.error_defs[patt]) + if mem_match: + mem = mem_match.group(1) + self.recom_mem = GaussianErrorHandler.convert_mem(float(mem), "mw") + + if self.check_convergence and "opt" in self.gin.route_parameters: + for k, v in GaussianErrorHandler.conv_criteria.items(): + m = v.search(line) + if m: + if k not in self.conv_data["values"]: + self.conv_data["values"][k] = [m.group(2)] + self.conv_data["thresh"][k] = float(m.group(3)) + else: + self.conv_data["values"][k].append(m.group(2)) + + # TODO: it only plots after the job finishes, modify? + if self.conv_data["values"] and all(len(v) >= 2 for v in self.conv_data["values"].values()): + for k, v in self.conv_data["values"].items(): + # convert strings to float taking into account the + # possibility of having ******** values + self.conv_data["values"][k] = np.genfromtxt(np.array(v)) + GaussianErrorHandler._monitor_convergence(self.conv_data) + for patt in error_patts: + self.logger.error(patt) + return len(self.errors) > 0 + + def correct(self, directory: str = "./"): + """Perform necessary actions to correct the errors in the Gaussian output.""" + actions: list[Any] = [] + # to avoid situations like 'linear_bend', where if we backup input_file, + # it will not be the actual input used in the current calc + # shutil.copy(self.input_file, f'{self.input_file}.backup') + # backup_files = [self.input_file, self.output_file, + # self.stderr_file] + # checkpoint = glob.glob('*.[Cc][Hh][Kk]') + # form_checkpoint = glob.glob('*.[Ff][Cc][Hh][Kk]') + # png = glob.glob('convergence.png') + # [backup_files.append(i[0]) for i in [checkpoint, form_checkpoint, png] + # if i] + # backup(backup_files, self.prefix) + # os.remove(f'{self.input_file}.backup') + backup_files = [self.input_file, self.output_file, self.stderr_file, *BACKUP_FILES.values()] + backup(backup_files, prefix=self.prefix, directory=directory) + if "scf_convergence" in self.errors: + self.gin.route_parameters = GaussianErrorHandler._update_route_params(self.gin.route_parameters, "scf", {}) + # if the SCF procedure has failed to converge + if self.gin.route_parameters.get("scf", {}).get("maxcycle") != str(self.scf_max_cycles): + # increase number of cycles if not already set or is different + # from scf_max_cycles + self.gin.route_parameters["scf"]["maxcycle"] = self.scf_max_cycles + actions.append({"scf_max_cycles": self.scf_max_cycles}) + + elif not {"xqc", "yqc", "qc"}.intersection(self.gin.route_parameters.get("scf", set())): + # use an alternate SCF converger + self.gin.route_parameters["scf"]["xqc"] = None + actions.append({"scf_algorithm": "xqc"}) + + elif self.job_type == "better_guess" and not GaussianErrorHandler.activate_better_guess: + # try to get a better initial guess at a lower level of theory + self.logger.info( + "SCF calculation failed. Switching to a lower " + "level of theory to get a better initial " + "guess of molecular orbitals" + ) + # TODO: what if inputs don't work with scf_lot? e.g. extra_basis + self.gin.functional = self.lower_functional + self.gin.basis_set = self.lower_basis_set + GaussianErrorHandler.activate_better_guess = True + actions.append({"scf_level_of_theory": "better_scf_guess"}) + + else: + if self.job_type != "better_guess": + self.logger.info( + "Try to switch to better_guess job type to " + "generate a different initial guess using a " + "lower level of theory" + ) + else: + self.logger.info("SCF calculation failed. Exiting...") + return {"errors": list(self.errors), "actions": None} + + elif "opt_steps" in self.errors: + # int_actions = self._add_int() + if self.gin.route_parameters.get("opt").get("maxcycles") != str(self.opt_max_cycles): + self.gin.route_parameters["opt"]["maxcycles"] = self.opt_max_cycles + if len(self.gout.structures) > 1: + self.gin._mol = self.gout.final_structure + actions.append({"structure": "from_final_structure"}) + actions.append({"opt_max_cycles": self.opt_max_cycles}) + + elif self.check_convergence and all(v[-1] < v[0] for v in self.conv_data["values"].values()): + self.gin._mol = self.gout.final_structure + actions.append({"structure": "from_final_structure"}) + + elif self._add_int(): + actions.append({"integral": "ultra_fine"}) + + # elif int_actions: + # actions.append(int_actions) + # TODO: check if the defined methods are clean + # TODO: don't enter this if condition if g16 and ... + + elif self.job_type == "better_guess" and not GaussianErrorHandler.activate_better_guess: + # TODO: check if the logic is correct since this is used with scf + # try to get a better initial guess at a lower level of theory + self.logger.info( + "Geometry optimization failed. Switching to a " + "lower level of theory to get a better " + "initial guess of molecular geometry" + ) + self.gin.functional = self.lower_functional + self.gin.basis_set = self.lower_basis_set + GaussianErrorHandler.activate_better_guess = True + actions.append({"opt_level_of_theory": "better_geom_guess"}) + + else: + if self.job_type != "better_guess": + self.logger.info( + "Try to switch to better_guess job type to " + "generate a different initial guess using a " + "lower level of theory" + ) + else: + self.logger.info("Geometry optimization failed. Exiting...") + return {"errors": list(self.errors), "actions": None} + + elif "linear_bend" in self.errors: + # if there is some linear bend around an angle in the geometry, + # restart the job at the point it stopped while forcing Gaussian + # to rebuild the set of redundant internals + if not list(filter(re.compile(r"%[Cc][Hh][Kk]").match, self.gin.link0_parameters.keys())): + raise KeyError("This remedy reads coords from checkpoint file. Consider adding CHK to link0_parameters") + self.gin = GaussianInput( + mol=None, + charge=self.gin.charge, + spin_multiplicity=self.gin.spin_multiplicity, + title=self.gin.title, + functional=self.gin.functional, + basis_set=self.gin.basis_set, + route_parameters=self.gin.route_parameters, + input_parameters=self.gin.input_parameters, + link0_parameters=self.gin.link0_parameters, + dieze_tag=self.gin.dieze_tag, + gen_basis=self.gin.gen_basis, + ) + self.gin.route_parameters.update({"geom": "(checkpoint, newdefinition)"}) + actions.append({"coords": "rebuild_redundant_internals"}) + + elif "solute_solvent_surface" in self.errors: + # if non-convergence in the iteration of the PCM matrix is + # encountered, change the type of molecular surface representing + # the solute-solvent boundary + # TODO: test + input_parms = { + key.lower() if isinstance(key, str) else key: value for key, value in self.gin.input_parameters.items() + } + if input_parms.get("surface", "none").lower() != "sas": + GaussianErrorHandler._update_route_params(self.gin.route_parameters, "scrf", "read") + self.gin.input_parameters.update({"surface": "SAS"}) + actions.append({"surface": "SAS"}) + else: + self.logger.info("Not sure how to fix solute_solvent_surface_error if surface is already SAS!") + return {"errors": [self.errors], "actions": None} + + elif "internal_coords" in self.errors: + # check if optimization is requested to be performed in cartesian + # coords. if not, set it while overwriting other possibly requested + # coord systems, disable symmetry if applicable, and rerun + # however, this will come at a higher computational cost + if "opt" in self.gin.route_parameters and not any( + n in (self.gin.route_parameters.get("opt") or {}) for n in ["cart", "cartesian"] + ): + GaussianErrorHandler._update_route_params(self.gin.route_parameters, "opt", "cartesian") + if isinstance(self.gin.route_parameters["opt"], dict): + [self.gin.route_parameters["opt"].pop(i, None) for i in ["redundant", "zmatrix", "z-matrix"]] + + if ( + not self.gin.route_parameters.get("nosymmetry") + or self.gin.route_parameters.get("symmetry") != "none" + ): + self.gin.route_parameters["symmetry"] = "none" + actions.append({"symmetry": False}) + actions.append({"opt_cart_coords": True}) + else: + self.logger.info( + "An error occurred in internal coordinates. " + "Your molecule might have 3 or more atoms " + "that are nearly linear making it difficult " + "to generate internal coordinates. Try to " + "modify your structure input?" + ) + return {"errors": [self.errors], "actions": None} + + elif "zmatrix" in self.errors: + with open(os.path.join(directory, self.input_file)) as gfile: + lines = gfile.readlines() + last_lines = lines[-2:] + if set(last_lines) != {"\n"}: + # if the required blank lines at the end of the input file are + # missing, just rewrite the file + self.logger.info("Missing blank line at the end of the input file.") + actions.append({"blank_lines": "rewrite_input_file"}) + else: + self.logger.info("Not sure how to fix zmatrix error. Check manually?") + return {"errors": [self.errors], "actions": None} + + elif "coords" in self.errors: + if "connectivity" in self.gin.route_parameters.get("geom"): + self.logger.info("Explicit atom bonding is requested but no such input is provided") + if isinstance(self.gin.route_parameters["geom"], dict) and len(self.gin.route_parameters["geom"]) > 1: + self.gin.route_parameters["geom"].pop("connectivity", None) + else: + del self.gin.route_parameters["geom"] + actions.append({"coords": "remove_connectivity"}) + else: + self.logger.info("Missing connectivity info. Not sure how to fix this error. Exiting!") + return {"errors": [self.errors], "actions": None} + + elif "found_coords" in self.errors: + if self.gin.molecule and any( + key in self.gin.route_parameters.get("geom", {}) for key in ["checkpoint", "check", "allcheck"] + ): + # if coords are found in the input and the user chooses to read + # the molecule specification from the checkpoint file, + # remove mol + self.gin._mol = None + actions.append({"mol": "remove_from_input"}) + else: + self.logger.info("Not sure why atom specifications should not be found in the input. Examine manually!") + return {"errors": [self.errors], "actions": None} + + elif "coord_inputs" in self.errors: + if ( + any(key in self.gin.route_parameters.get("opt", {}) for key in ["z-matrix", "zmatrix"]) + and self.cart_coords + ): + # if molecule is specified in xyz format, but the user chooses + # to perform the optimization using internal coordinates, + # switch to z-matrix format + self.cart_coords = False + actions.append({"coords": "use_zmatrix_format"}) + else: + # error cannot be fixed automatically. Return None for actions + self.logger.info( + "Not sure how to fix problem with z-matrix " + "optimization if coords are already input in" + "z-matrix format. Examine manually!" + ) + return {"errors": [self.errors], "actions": None} + + elif "missing_mol" in self.errors: + if ( + not self.gin.molecule + and "read" in self.gin.route_parameters.get("guess") + and not any( + key in self.gin.route_parameters.get("geom", {}) for key in ["checkpoint", "check", "allcheck"] + ) + ): + # if molecule is not specified and the user requests that the + # initial guess be read from the checkpoint file but forgot to + # take the geom from the checkpoint file, add geom=check + if not glob.glob("*.[Cc][Hh][Kk]"): + raise FileNotFoundError("This remedy reads geometry from checkpoint file. This file is missing!") + GaussianErrorHandler._update_route_params(self.gin.route_parameters, "geom", "check") + self.gin.route_parameters["geom"] = "check" + actions.append({"mol": "get_from_checkpoint"}) + else: + # error cannot be fixed automatically. Return None for actions + self.logger.info("Molecule is not found in the input file. Fix manually!") + # TODO: check if logger.info is enough here or return is needed + return {"errors": list(self.errors), "actions": None} + + elif any(err in self.errors for err in ["empty_file", "bad_file"]): + self.logger.error("Required checkpoint file is bad. Fix manually!") + return {"errors": list(self.errors), "actions": None} + + elif "missing_file" in self.errors: + self.logger.error("Could not find the required file. Fix manually!") + return {"errors": list(self.errors), "actions": None} + + elif "syntax" in self.errors: + # error cannot be fixed automatically. Return None for actions + self.logger.info("A syntax error was detected in the input file. Fix manually!") + return {"errors": list(self.errors), "actions": None} + + elif "insufficient_mem" in self.errors: + mem_key, dynamic_mem = GaussianErrorHandler._find_dynamic_memory_allocated(self.gin.link0_parameters) + if dynamic_mem and self.recom_mem and dynamic_mem < self.recom_mem: + # this assumes that 1.5*minimum required memory is available + mem = math.ceil(self.recom_mem * 1.5) + self.gin.link0_parameters[mem_key] = f"{mem}MB" + actions.append({"memory": "increase_to_gaussian_recommendation"}) + else: + self.logger.info("Check job memory requirements manually and set as needed.") + return {"errors": list(self.errors), "actions": None} + + else: + self.logger.info("Must have gotten an error that is parsed but not handled yet. Fix manually!") + return {"errors": list(self.errors), "actions": None} + + os.rename( + os.path.join(directory, self.input_file), + os.path.join(directory, self.input_file + ".prev"), + ) + self.gin.write_file(os.path.join(directory, self.input_file), self.cart_coords) + # TODO: ADDED + if os.path.exists(os.path.join(directory, self.input_file) + ".wt"): + shutil.copyfile( + os.path.join(directory, self.input_file), + os.path.join(directory, self.input_file + ".wt"), + ) + return {"errors": list(self.errors), "actions": actions} + + +class WallTimeErrorHandler(ErrorHandler): + """ + Check if a run is nearing the walltime. If so, terminate the job and restart from + the last .rwf file. A job is considered to be nearing the walltime if the remaining + time is less than or equal to the buffer time. + """ + + is_monitor: bool = True + + def __init__( + self, + wall_time: int, + buffer_time: int, + input_file: str, + output_file: str, + stderr_file: str = "stderr.txt", + prefix: str = "error", + ): + """ + Initialize the WalTimeErrorHandler class. + + Args: + wall_time (int): The total wall time for the job in seconds. + buffer_time (int): The buffer time in seconds. If the remaining time is less + than or equal to the buffer time, the job is considered to be nearing the + walltime and will be terminated. + input_file (str): The name of the input file for the Gaussian job. + output_file (str): The name of the output file for the Gaussian job. + stderr_file (str): The name of the standard error file for the Gaussian job. + Defaults to 'stderr.txt'. + prefix (str): The prefix to use for the backup files. Defaults to error, + which means a series of error.1.tar.gz, error.2.tar.gz, ... will be + generated. + """ + self.wall_time = wall_time + self.buffer_time = buffer_time + self.input_file = input_file + self.output_file = output_file + self.stderr_file = stderr_file + self.prefix = prefix + self.logger: logging.Logger = logging.getLogger(self.__class__.__name__) + logging.basicConfig(level=logging.INFO) + + now_ = datetime.datetime.now() + now_str = datetime.datetime.strftime(now_, "%a %b %d %H:%M:%S UTC %Y") + init_time_str = os.environ.get("JOB_START_TIME", now_str) + os.environ["JOB_START_TIME"] = init_time_str + self.init_time = datetime.datetime.strptime(init_time_str, "%a %b %d %H:%M:%S %Z %Y") + + def check(self, directory: str = "./") -> bool: + """Check if the job is nearing the walltime. If so, return True, else False.""" + if self.wall_time: + run_time = datetime.datetime.now() - self.init_time + remaining_time = self.wall_time - run_time.total_seconds() + if remaining_time <= self.buffer_time: + return True + return False + + def correct(self, directory: str = "./") -> dict: + """Perform the corrections.""" + # TODO: when using restart, the rwf file might be in a different dir + backup_files = [self.input_file, self.output_file, self.stderr_file, *BACKUP_FILES.values()] + backup(backup_files, prefix=self.prefix, directory=directory) + if glob.glob(os.path.join(directory, BACKUP_FILES["rwf"])): + rwf = glob.glob(os.path.join(directory, BACKUP_FILES["rwf"]))[0] + gin = GaussianInput.from_file(os.path.join(directory, self.input_file)) + # TODO: check if rwf is already there like RWF or Rwf or ... + # gin.link0_parameters.update({'%rwf': rwf}) + # gin.route_parameters = {'Restart': None} + # os.rename(self.input_file, self.input_file + '.prev') + input_str = [f"%rwf={rwf}"] + [f"{i}={j}" for i, j in gin.link0_parameters.items()] + input_str.append(f"{gin.dieze_tag} Restart\n\n") + with open(os.path.join(directory, self.input_file + ".wt"), "w") as f: + f.write("\n".join(input_str)) + return {"errors": ["wall_time_limit"], "actions": None} + self.logger.info( + "Wall time handler requires a read-write gaussian file to be available. No such file is found." + ) + return {"errors": ["Walltime reached but not rwf file found"], "actions": None} diff --git a/custodian/gaussian/jobs.py b/custodian/gaussian/jobs.py new file mode 100644 index 00000000..78381dc3 --- /dev/null +++ b/custodian/gaussian/jobs.py @@ -0,0 +1,202 @@ +"""This module implements basic kinds of jobs for Gaussian runs.""" + +from __future__ import annotations + +import logging +import os +import shutil +import subprocess +from fnmatch import filter +from typing import TYPE_CHECKING + +from pymatgen.io.gaussian import GaussianInput, GaussianOutput + +from custodian.custodian import Job +from custodian.gaussian.handlers import GaussianErrorHandler + +if TYPE_CHECKING: + from collections.abc import Generator + +__author__ = "Rasha Atwi" +__version__ = "0.1" +__maintainer__ = "Rasha Atwi" +__email__ = "rasha.atwi@stonybrook.edu" +__status__ = "Alpha" +__date__ = "5/13/21" + +logger = logging.getLogger(__name__) + + +class GaussianJob(Job): + """A basic Gaussian job.""" + + def __init__( + self, + gaussian_cmd: str, + input_file: str, + output_file: str, + stderr_file: str = "stderr.txt", + suffix: str = "", + backup: bool = True, + ): + """ + Args: + gaussian_cmd (str): Command to run Gaussian. + input_file (str): Name of the Gaussian input file. + output_file (str): Name of the Gaussian output file. + stderr_file (str): Name of the stderr file. Defaults to 'stderr.txt'. + suffix (str): String to append to the input and output files, e.g., to + rename Gaussian input and output files from say gaussian.com and + gaussian.out, to gaussian.com.guess1 and gaussian.out.guess1, + respectively, provide ".guess1" as the suffix. . Defaults to ''. + backup (bool): Whether to backup the input file. If True, the input will be + copied with a ".orig" appended to the name. Defaults to True. + """ + self.gaussian_cmd = gaussian_cmd + self.input_file = input_file + self.output_file = output_file + self.stderr_file = stderr_file + self.suffix = suffix + self.backup = backup + self.process: subprocess.Popen | None = None + + def setup(self, directory: str = "./") -> None: + """ + Perform initial setup for the job, i.e., make a backup of the input file if + requested. + + Args: + directory (str): Directory where the job will be run. Defaults to './'. + """ + if self.backup: + shutil.copy( + os.path.join(directory, self.input_file), + os.path.join(directory, f"{self.input_file}.orig"), + ) + + def run(self, directory: str = "./") -> subprocess.Popen: + """ + Perform the actual Gaussian run. + + Args: + directory (str): Directory where the job will be run. Defaults to './'. + + Returns: + subprocess.Popen: + The process running the Gaussian job. + """ + logger.info(f"Running command: {self.gaussian_cmd}") + with ( + open(os.path.join(directory, self.output_file), "w") as out_file, + open(os.path.join(directory, self.stderr_file), "w", buffering=1) as error_file, + ): + process = subprocess.Popen(self.gaussian_cmd, stdout=out_file, stderr=error_file, shell=True) + self.process = process + return process + + def postprocess(self, directory: str = "./") -> None: + """ + Perform any postprocessing of the Gaussian run. This includes making a copy + of the input and output file if a suffix is specified. + + Args: + directory (str): Directory where the job was run. Defaults to './'. + """ + for file in [self.input_file, self.output_file]: + file_path = os.path.join(directory, file) + if os.path.exists(file_path) and self.suffix != "": + shutil.copy(file_path, f"{file_path}{self.suffix}") + + def terminate(self, directory: str = "./") -> None: + """ + Terminate the Gaussian job. + + Args: + directory (str): Directory where the job was run. Defaults to './'. + """ + if self.process: + self.process.terminate() + + @classmethod + def generate_better_guess( + cls, + gaussian_cmd: str, + input_file: str, + output_file: str, + stderr_file: str = "stderr.txt", + backup: bool = True, + cart_coords: bool = True, + directory: str = "./", + ) -> Generator[GaussianJob, None, None]: + """ + Generate a better initial guess for a Gaussian calculation (optimization or + SCF run). This is done by running the job at a lower level of theory + (e.g., STO-3G). The geometry for this job is read from the checkpoint file + generated by the previous job. + + Args: + gaussian_cmd (str): Command to run Gaussian. + input_file (str): Name of the Gaussian input file. + output_file (str): Name of the Gaussian output file. + stderr_file (str): Name of the stderr file. Defaults to 'stderr.txt'. + backup (bool): Whether to backup the input file. If True, the input will be + copied with a ".orig" appended to the name. Defaults to True. + cart_coords (bool): Whether to use Cartesian coordinates in the input file. + Defaults to True. + directory (str): Directory where the job will be run. Defaults to './'. + + Yields: + GaussianJob: The Gaussian job instance. + """ + orig_input = GaussianInput.from_file(os.path.join(directory, input_file)) + yield ( + GaussianJob( + gaussian_cmd=gaussian_cmd, + input_file=input_file, + output_file=output_file, + stderr_file=stderr_file, + suffix=".guess1", + backup=backup, + ) + ) + if GaussianErrorHandler.activate_better_guess: + # TODO: check why it comes here only if the lower job is not + # failing and not in the else condition + # continue only if other corrections are invalid or failed + lower_output = GaussianOutput(os.path.join(directory, output_file)) + if len(lower_output.errors) == 0: + # if the calculation at the lower level of theory succeeded + if not filter(os.listdir("."), "*.[Cc][Hh][Kk]"): + raise FileNotFoundError("Missing checkpoint file. Required to read initial guesses") + + gin = GaussianInput( + mol=None, + charge=orig_input.charge, + spin_multiplicity=orig_input.spin_multiplicity, + title=orig_input.title, + functional=orig_input.functional, + basis_set=orig_input.basis_set, + route_parameters=lower_output.route_parameters, + input_parameters=orig_input.input_parameters, + link0_parameters=orig_input.link0_parameters, + dieze_tag=orig_input.dieze_tag, + gen_basis=orig_input.gen_basis, + ) + gin.route_parameters["Guess"] = "Read" + gin.route_parameters["Geom"] = "Checkpoint" + gin.write_file(os.path.join(directory, input_file), cart_coords=cart_coords) + + yield ( + GaussianJob( + gaussian_cmd=gaussian_cmd, + input_file=input_file, + output_file=output_file, + stderr_file=stderr_file, + suffix=".guess2", + backup=backup, + ) + ) + else: + logger.info("Failed to generate a better initial guess") + else: + logger.info("Calculation completed normally without having to generate a better initial guess") diff --git a/custodian/lobster/handlers.py b/custodian/lobster/handlers.py index a6b1f94b..94086591 100644 --- a/custodian/lobster/handlers.py +++ b/custodian/lobster/handlers.py @@ -17,7 +17,7 @@ class EnoughBandsValidator(Validator): """validates if enough bands for COHP calculation are available.""" - def __init__(self, output_filename: str = "lobsterout"): + def __init__(self, output_filename: str = "lobsterout") -> None: """ Args: @@ -47,7 +47,7 @@ class LobsterFilesValidator(Validator): Check if lobster terminated normally by looking for finished. """ - def __init__(self): + def __init__(self) -> None: """Dummy init.""" def check(self, directory: str = "./") -> bool: @@ -63,7 +63,7 @@ def check(self, directory: str = "./") -> bool: class ChargeSpillingValidator(Validator): """Check if spilling is below certain threshold!""" - def __init__(self, output_filename: str = "lobsterout", charge_spilling_limit: float = 0.05): + def __init__(self, output_filename: str = "lobsterout", charge_spilling_limit: float = 0.05) -> None: """ Args: @@ -79,7 +79,7 @@ def check(self, directory: str = "./") -> bool: lobsterout = Lobsterout(os.path.join(directory, self.output_filename)) if lobsterout.charge_spilling[0] > self.charge_spilling_limit: return True - if len(lobsterout.charge_spilling) > 1 and lobsterout.charge_spilling[1] > self.charge_spilling_limit: - return True - return False + return bool( + len(lobsterout.charge_spilling) > 1 and lobsterout.charge_spilling[1] > self.charge_spilling_limit + ) return False diff --git a/custodian/lobster/jobs.py b/custodian/lobster/jobs.py index f2a79ac9..d3758070 100644 --- a/custodian/lobster/jobs.py +++ b/custodian/lobster/jobs.py @@ -2,6 +2,7 @@ import logging import os +import shlex import shutil import subprocess @@ -52,7 +53,7 @@ def __init__( gzipped: bool = True, add_files_to_gzip=(), backup: bool = True, - ): + ) -> None: """ Args: @@ -70,7 +71,7 @@ def __init__( self.add_files_to_gzip = add_files_to_gzip self.backup = backup - def setup(self, directory="./"): + def setup(self, directory="./") -> None: """Will backup lobster input files.""" if self.backup: for file in LOBSTERINPUT_FILES: @@ -78,18 +79,19 @@ def setup(self, directory="./"): def run(self, directory="./"): """Runs the job.""" - cmd = self.lobster_cmd + # join split commands (e.g. from atomate and atomate2) + cmd = self.lobster_cmd if isinstance(self.lobster_cmd, str) else shlex.join(self.lobster_cmd) - logger.info(f"Running {' '.join(cmd)}") + logger.info(f"Running {cmd}") with ( zopen(os.path.join(directory, self.output_file), "w") as f_std, + # use line buffering for stderr zopen(os.path.join(directory, self.stderr_file), "w", buffering=1) as f_err, ): - # use line buffering for stderr - return subprocess.Popen(cmd, cwd=directory, stdout=f_std, stderr=f_err) # pylint: disable=R1732 + return subprocess.run(cmd, stdout=f_std, stderr=f_err, shell=True, check=False) - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: """Will gzip relevant files (won't gzip custodian.json and other output files from the cluster).""" if self.gzipped: for file in LOBSTEROUTPUT_FILES: diff --git a/custodian/nwchem/handlers.py b/custodian/nwchem/handlers.py index 87f7d8b1..c0050ee3 100644 --- a/custodian/nwchem/handlers.py +++ b/custodian/nwchem/handlers.py @@ -18,7 +18,7 @@ class NwchemErrorHandler(ErrorHandler): generated by pymatgen. """ - def __init__(self, output_filename="mol.nwout"): + def __init__(self, output_filename="mol.nwout") -> None: """Initialize with an output file name. Args: @@ -73,5 +73,5 @@ def correct(self, directory="./"): nwi.write_file(self.input_file) return {"errors": self.errors, "actions": actions} - def __str__(self): + def __str__(self) -> str: return "NwchemErrorHandler" diff --git a/custodian/nwchem/jobs.py b/custodian/nwchem/jobs.py index fc88f8e4..2967f4e0 100644 --- a/custodian/nwchem/jobs.py +++ b/custodian/nwchem/jobs.py @@ -28,7 +28,7 @@ def __init__( gzipped=False, backup=True, settings_override=None, - ): + ) -> None: """Initialize a basic NwChem job. Args: @@ -53,7 +53,7 @@ def __init__( self.gzipped = gzipped self.settings_override = settings_override - def setup(self, directory="./"): + def setup(self, directory="./") -> None: """Performs backup if necessary.""" if self.backup: shutil.copy(os.path.join(directory, self.input_file), os.path.join(directory, f"{self.input_file}.orig")) @@ -63,7 +63,7 @@ def run(self, directory="./"): with zopen(self.output_file, "w") as fout: return subprocess.Popen([*self.nwchem_cmd, self.input_file], cwd=directory, stdout=fout) # pylint: disable=R1732 - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: """Renaming or gzipping as needed.""" if self.gzipped: gzip_dir(directory) diff --git a/custodian/qchem/handlers.py b/custodian/qchem/handlers.py index 0c57e635..b9493c1c 100644 --- a/custodian/qchem/handlers.py +++ b/custodian/qchem/handlers.py @@ -37,7 +37,7 @@ def __init__( output_file="mol.qout", scf_max_cycles=100, geom_max_cycles=200, - ): + ) -> None: """Initialize the error handler from a set of input and output files. Args: @@ -52,8 +52,8 @@ def __init__( self.scf_max_cycles = scf_max_cycles self.geom_max_cycles = geom_max_cycles self.outdata = None - self.errors = [] - self.opt_error_history = [] + self.errors: list[str] = [] + self.opt_error_history: list[str] = [] def check(self, directory="./"): """Checks output file for errors.""" diff --git a/custodian/qchem/jobs.py b/custodian/qchem/jobs.py index 1bff54ae..23bd4f34 100644 --- a/custodian/qchem/jobs.py +++ b/custodian/qchem/jobs.py @@ -49,7 +49,7 @@ def __init__( nboexe=None, save_scratch=False, backup=True, - ): + ) -> None: """ Args: qchem_command (str): Command to run QChem. @@ -120,7 +120,7 @@ def current_command(self, directory: str | Path = "./"): command = self.qchem_command + command return " ".join(command) - def setup(self, directory: str | Path = "./"): + def setup(self, directory: str | Path = "./") -> None: """Sets up environment variables necessary to efficiently run QChem. Args: @@ -145,7 +145,7 @@ def setup(self, directory: str | Path = "./"): raise RuntimeError("Trying to run NBO7 without providing NBOEXE in fworker! Exiting...") os.environ["NBOEXE"] = self.nboexe - def postprocess(self, directory: str | Path = "./"): + def postprocess(self, directory: str | Path = "./") -> None: """Renames and removes scratch files after running QChem. Args: diff --git a/custodian/utils.py b/custodian/utils.py index 532bb26d..fe366487 100644 --- a/custodian/utils.py +++ b/custodian/utils.py @@ -5,9 +5,10 @@ import os import tarfile from glob import glob +from typing import ClassVar -def backup(filenames, prefix="error", directory="./"): +def backup(filenames, prefix="error", directory="./") -> None: """ Backup files to a tar.gz file. Used, for example, in backing up the files of an errored run before performing corrections. @@ -60,9 +61,9 @@ class tracked_lru_cache: Allows Custodian to clear the cache after all the checks have been performed. """ - cached_functions: set = set() + cached_functions: ClassVar = set() - def __init__(self, func): + def __init__(self, func) -> None: """ Args: func: function to be decorated. @@ -81,7 +82,7 @@ def __call__(self, *args, **kwargs): return result @classmethod - def tracked_cache_clear(cls): + def tracked_cache_clear(cls) -> None: """Clear the cache of all the decorated functions.""" while cls.cached_functions: f = cls.cached_functions.pop() diff --git a/custodian/vasp/handlers.py b/custodian/vasp/handlers.py index 8909cffe..a89fe722 100644 --- a/custodian/vasp/handlers.py +++ b/custodian/vasp/handlers.py @@ -16,6 +16,7 @@ import warnings from collections import Counter from math import prod +from typing import ClassVar import numpy as np from monty.dev import deprecated @@ -66,7 +67,7 @@ class VaspErrorHandler(ErrorHandler): is_monitor = True - error_msgs = { + error_msgs: ClassVar = { "tet": [ "Tetrahedron method fails", "tetrahedron method fails", @@ -128,7 +129,7 @@ def __init__( errors_subset_to_catch=None, vtst_fixes=False, **kwargs, - ): + ) -> None: """Initialize the handler with the output file to check. Args: @@ -136,28 +137,25 @@ def __init__( is being redirected. The error messages that are checked are present in the stdout. Defaults to "vasp.out", which is the default redirect used by :class:`custodian.vasp.jobs.VaspJob`. - errors_subset_to_detect (list): A subset of errors to catch. The + errors_subset_to_catch (list): A subset of errors to catch. The default is None, which means all supported errors are detected. Use this to catch only a subset of supported errors. E.g., ["eddrmm", "zheev"] will only catch the eddrmm and zheev errors, and not others. If you wish to only exclude one or - two of the errors, you can create this list by the following - lines: + two of the errors, you can create this list by the following lines: - ``` - subset = list(VaspErrorHandler().error_msgs) - subset.remove("eddrmm") + subset = list(VaspErrorHandler().error_msgs) + subset.remove("eddrmm") + handler = VaspErrorHandler(errors_subset_to_catch=subset) - handler = VaspErrorHandler(errors_subset_to_catch=subset) - ``` vtst_fixes (bool): Whether to consider VTST optimizers. Defaults to False for compatibility purposes, but if you have VTST, you would likely benefit from setting this to True. **kwargs: Ignored. Added to increase signature flexibility. """ self.output_filename = output_filename - self.errors = set() - self.error_count = Counter() + self.errors: set[str] = set() + self.error_count: Counter[str] = Counter() self.errors_subset_to_catch = errors_subset_to_catch or list(VaspErrorHandler.error_msgs) self.vtst_fixes = vtst_fixes self.logger = logging.getLogger(type(self).__name__) @@ -705,9 +703,9 @@ class LrfCommutatorHandler(ErrorHandler): is_monitor = True - error_msgs = {"lrf_comm": ["LRF_COMMUTATOR internal error"]} + error_msgs: ClassVar = {"lrf_comm": ["LRF_COMMUTATOR internal error"]} - def __init__(self, output_filename: str = "std_err.txt"): + def __init__(self, output_filename: str = "std_err.txt") -> None: """Initialize the handler with the output file to check. Args: @@ -758,12 +756,12 @@ class StdErrHandler(ErrorHandler): is_monitor = True - error_msgs = { + error_msgs: ClassVar = { "kpoints_trans": ["internal error in GENERATE_KPOINTS_TRANS: number of G-vector changed in star"], "out_of_memory": ["Allocation would exceed memory limit"], } - def __init__(self, output_filename: str = "std_err.txt"): + def __init__(self, output_filename: str = "std_err.txt") -> None: """Initialize the handler with the output file to check. Args: @@ -818,12 +816,12 @@ class AliasingErrorHandler(ErrorHandler): is_monitor = True - error_msgs = { + error_msgs: ClassVar = { "aliasing": ["WARNING: small aliasing (wrap around) errors must be expected"], "aliasing_incar": ["Your FFT grids (NGX,NGY,NGZ) are not sufficient for an accurate"], } - def __init__(self, output_filename: str = "vasp.out"): + def __init__(self, output_filename: str = "vasp.out") -> None: """Initialize the handler with the output file to check. Args: @@ -904,7 +902,7 @@ def correct(self, directory="./"): class DriftErrorHandler(ErrorHandler): """Corrects for total drift exceeding the force convergence criteria.""" - def __init__(self, max_drift=None, to_average=3, enaug_multiply=2): + def __init__(self, max_drift=None, to_average=3, enaug_multiply=2) -> None: """Initialize the handler with max drift Args: max_drift (float): This defines the max drift. Leaving this at the default of None gets the max_drift from @@ -984,7 +982,7 @@ class MeshSymmetryErrorHandler(ErrorHandler): is_monitor = False - def __init__(self, output_filename: str = "vasp.out", output_vasprun="vasprun.xml"): + def __init__(self, output_filename: str = "vasp.out", output_vasprun="vasprun.xml") -> None: """Initialize the handler with the output files to check. Args: @@ -998,7 +996,7 @@ def __init__(self, output_filename: str = "vasp.out", output_vasprun="vasprun.xm self.output_filename = output_filename self.output_vasprun = output_vasprun - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for error.""" msg = "Reciprocal lattice and k-lattice belong to different class of lattices." @@ -1046,7 +1044,7 @@ class UnconvergedErrorHandler(ErrorHandler): is_monitor = False - def __init__(self, output_filename: str = "vasprun.xml"): + def __init__(self, output_filename: str = "vasprun.xml") -> None: """Initialize the handler with the output file to check. Args: @@ -1055,7 +1053,7 @@ def __init__(self, output_filename: str = "vasprun.xml"): """ self.output_filename = output_filename - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for error.""" try: v = load_vasprun(os.path.join(directory, self.output_filename)) @@ -1169,7 +1167,7 @@ class IncorrectSmearingHandler(ErrorHandler): is_monitor = False - def __init__(self, output_filename: str = "vasprun.xml"): + def __init__(self, output_filename: str = "vasprun.xml") -> None: """Initialize the handler with the output file to check. Args: @@ -1178,7 +1176,7 @@ def __init__(self, output_filename: str = "vasprun.xml"): """ self.output_filename = output_filename - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for error.""" try: v = load_vasprun(os.path.join(directory, self.output_filename)) @@ -1215,7 +1213,7 @@ class KspacingMetalHandler(ErrorHandler): is_monitor = False - def __init__(self, output_filename: str = "vasprun.xml"): + def __init__(self, output_filename: str = "vasprun.xml") -> None: """Initialize the handler with the output file to check. Args: @@ -1224,7 +1222,7 @@ def __init__(self, output_filename: str = "vasprun.xml"): """ self.output_filename = output_filename - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for error.""" try: v = load_vasprun(os.path.join(directory, self.output_filename)) @@ -1289,10 +1287,10 @@ class LargeSigmaHandler(ErrorHandler): is_monitor = True - def __init__(self): + def __init__(self) -> None: """Initializes the handler with a buffer time.""" - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for error.""" incar = Incar.from_file(os.path.join(directory, "INCAR")) try: @@ -1357,7 +1355,7 @@ class PotimErrorHandler(ErrorHandler): is_monitor = True - def __init__(self, input_filename="POSCAR", output_filename="OSZICAR", dE_threshold=1): + def __init__(self, input_filename="POSCAR", output_filename="OSZICAR", dE_threshold=1) -> None: """Initialize the handler with the input and output files to check. Args: @@ -1372,7 +1370,7 @@ def __init__(self, input_filename="POSCAR", output_filename="OSZICAR", dE_thresh self.output_filename = output_filename self.dE_threshold = dE_threshold - def check(self, directory="./"): + def check(self, directory="./") -> bool | None: """Check for error.""" try: oszicar = Oszicar(os.path.join(directory, self.output_filename)) @@ -1424,7 +1422,7 @@ def __init__(self, output_filename: str = "vasp.out", timeout=21_600) -> None: self.output_filename = output_filename self.timeout = timeout - def check(self, directory="./"): + def check(self, directory="./") -> bool | None: """Check for error.""" st = os.stat(os.path.join(directory, self.output_filename)) if time.time() - st.st_mtime > self.timeout: @@ -1456,7 +1454,7 @@ class NonConvergingErrorHandler(ErrorHandler): is_monitor = True - def __init__(self, output_filename: str = "OSZICAR", nionic_steps=10): + def __init__(self, output_filename: str = "OSZICAR", nionic_steps=10) -> None: """Initialize the handler with the output file to check. Args: @@ -1592,7 +1590,7 @@ class WalltimeHandler(ErrorHandler): # error raises_runtime_error = False - def __init__(self, wall_time=None, buffer_time=300, electronic_step_stop=False): + def __init__(self, wall_time=None, buffer_time=300, electronic_step_stop=False) -> None: """Initialize the handler with a buffer time. Args: @@ -1644,7 +1642,7 @@ def __init__(self, wall_time=None, buffer_time=300, electronic_step_stop=False): self.electronic_steps_timings = [0] self.prev_check_time = self.start_time - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for error.""" if self.wall_time: run_time = datetime.datetime.now() - self.start_time @@ -1697,7 +1695,7 @@ class CheckpointHandler(ErrorHandler): # itself naturally with the STOPCAR. is_terminating = False - def __init__(self, interval=3600): + def __init__(self, interval=3600) -> None: """Initialize the handler with an interval. Args: @@ -1712,9 +1710,7 @@ def check(self, directory="./"): """Check for error.""" run_time = datetime.datetime.now() - self.start_time total_secs = run_time.seconds + run_time.days * 3600 * 24 - if total_secs > self.interval: - return True - return False + return total_secs > self.interval def correct(self, directory="./"): """Perform corrections.""" @@ -1740,7 +1736,7 @@ def correct(self, directory="./"): return {"errors": ["Checkpoint reached"], "actions": actions} - def __str__(self): + def __str__(self) -> str: return f"CheckpointHandler with interval {self.interval}" @@ -1762,7 +1758,7 @@ class StoppedRunHandler(ErrorHandler): # itself naturally with the STOPCAR. is_terminating = False - def __init__(self): + def __init__(self) -> None: """Dummy init.""" def check(self, directory="./"): @@ -1794,7 +1790,7 @@ class PositiveEnergyErrorHandler(ErrorHandler): is_monitor = True - def __init__(self, output_filename: str = "OSZICAR"): + def __init__(self, output_filename: str = "OSZICAR") -> None: """Initialize the handler with the output file to check. Args: @@ -1803,7 +1799,7 @@ def __init__(self, output_filename: str = "OSZICAR"): """ self.output_filename = output_filename - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for error.""" try: oszicar = Oszicar(os.path.join(directory, self.output_filename)) diff --git a/custodian/vasp/interpreter.py b/custodian/vasp/interpreter.py index f6c36ea1..80795d27 100644 --- a/custodian/vasp/interpreter.py +++ b/custodian/vasp/interpreter.py @@ -11,7 +11,7 @@ class VaspModder(Modder): """A Modder for VaspInputSets.""" - def __init__(self, actions=None, strict=True, vi=None, directory="./"): + def __init__(self, actions=None, strict=True, vi=None, directory="./") -> None: """Initialize a Modder for VaspInput sets. Args: @@ -24,14 +24,15 @@ def __init__(self, actions=None, strict=True, vi=None, directory="./"): supplied, a ValueError is raised. Defaults to True. vi (VaspInput): A VaspInput object from the current directory. Initialized automatically if not passed (but passing it will - avoid having to reparse the directory). + avoid having to re-parse the directory). + directory (str): The directory containing the VaspInput set. """ self.vi = vi or VaspInput.from_directory(directory) self.directory = directory actions = actions or [FileActions, DictActions] super().__init__(actions, strict) - def apply_actions(self, actions): + def apply_actions(self, actions) -> None: """ Applies a list of actions to the Vasp Input Set and rewrites modified files. diff --git a/custodian/vasp/jobs.py b/custodian/vasp/jobs.py index b734837a..ba7b7258 100644 --- a/custodian/vasp/jobs.py +++ b/custodian/vasp/jobs.py @@ -84,7 +84,7 @@ def __init__( gamma_vasp_cmd=None, copy_magmom=False, auto_continue=False, - ): + ) -> None: """ This constructor is necessarily complex due to the need for flexibility. For standard kinds of runs, it's often better to use one @@ -166,7 +166,7 @@ def __init__( logger.exception(f"Failed to detect VASP path: {vasp_cmd}") scope.set_tag("vasp_cmd", vasp_cmd) - def setup(self, directory="./"): + def setup(self, directory="./") -> None: """ Performs initial setup for VaspJob, including overriding any settings and backing up. @@ -255,7 +255,7 @@ def run(self, directory="./"): # use line buffering for stderr return subprocess.Popen(cmd, cwd=directory, stdout=f_std, stderr=f_err, start_new_session=True) # pylint: disable=R1732 - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: """ Postprocessing includes renaming and gzipping where necessary. Also copies the magmom to the incar if necessary. @@ -315,6 +315,7 @@ def double_relaxation_run( wall-time handler which will write a read-only STOPCAR to prevent VASP from deleting it once it finishes. Defaults to False. + directory (str): Directory where the job was run. Defaults to './'. Returns: List of two jobs corresponding to an AFLOW style run. @@ -382,12 +383,8 @@ def metagga_opt_run( metaGGA = incar.get("METAGGA", "SCAN") # Pre optimize WAVECAR and structure using regular GGA - pre_opt_settings = [ - { - "dict": "INCAR", - "action": {"_set": {"METAGGA": None, "LWAVE": True, "NSW": 0}}, - } - ] + new_settings = {"METAGGA": None, "LWAVE": True, "NSW": 0} + pre_opt_settings = [{"dict": "INCAR", "action": {"_set": new_settings}}] jobs = [ VaspJob( vasp_cmd, @@ -460,6 +457,7 @@ def full_opt_run( half_kpts_first_relax (bool): Whether to halve the kpoint grid for the first relaxation. Speeds up difficult convergence considerably. Defaults to False. + directory (str): Directory where the job was run. Defaults to './'. **vasp_job_kwargs: Passthrough kwargs to VaspJob. See :class:`custodian.vasp.jobs.VaspJob`. @@ -558,6 +556,7 @@ def constrained_opt_run( which is more robust but can be a bit slow. The code does fall back on the bisection when bfgs gives a nonsensical result, e.g., negative lattice params. + directory (str): Directory where the job was run. Defaults to './'. **vasp_job_kwargs: Passthrough kwargs to VaspJob. See :class:`custodian.vasp.jobs.VaspJob`. @@ -571,7 +570,7 @@ def constrained_opt_run( # Set the energy convergence criteria as the EDIFFG (if present) or # 10 x EDIFF (which itself defaults to 1e-4 if not present). - etol = incar["EDIFFG"] if incar.get("EDIFFG") and incar.get("EDIFFG") > 0 else incar.get("EDIFF", 0.0001) * 10 + e_tol = incar["EDIFFG"] if incar.get("EDIFFG") and incar.get("EDIFFG") > 0 else incar.get("EDIFF", 0.0001) * 10 if lattice_direction == "a": lattice_index = 0 @@ -612,7 +611,7 @@ def constrained_opt_run( other = ind - 1 else: other = ind + 1 if energies[sorted_x[ind + 1]] < energies[sorted_x[ind - 1]] else ind - 1 - if abs(energies[min_x] - energies[sorted_x[other]]) < etol: + if abs(energies[min_x] - energies[sorted_x[other]]) < e_tol: logger.info(f"Stopping optimization! Final {lattice_direction} = {min_x}") break @@ -684,7 +683,7 @@ def constrained_opt_run( for key in sorted(energies): file.write(f"{key} {energies[key]}\n") - def terminate(self, directory="./"): + def terminate(self, directory="./") -> None: """ Kill all VASP processes associated with the current job. This is done by looping over all processes and selecting the ones @@ -695,13 +694,13 @@ def terminate(self, directory="./"): simultaneously executed on the same node). However, this should never happen. """ - workdir = directory - logger.info(f"Killing VASP processes in workdir {workdir}.") + work_dir = directory + logger.info(f"Killing VASP processes in {work_dir=}.") for proc in psutil.process_iter(): try: if "vasp" in proc.name().lower(): open_paths = [file.path for file in proc.open_files()] - vasprun_path = os.path.join(workdir, "vasprun.xml") + vasprun_path = os.path.join(work_dir, "vasprun.xml") if (vasprun_path in open_paths) and psutil.pid_exists(proc.pid): proc.kill() return @@ -710,8 +709,7 @@ def terminate(self, directory="./"): continue logger.warning( - f"Killing VASP processes in workdir {workdir} failed with subprocess.Popen.terminate(). " - "Resorting to 'killall'." + f"Killing VASP processes in {work_dir=} failed with subprocess.Popen.terminate(). Resorting to 'killall'." ) cmds = self.vasp_cmd if self.gamma_vasp_cmd: @@ -742,7 +740,7 @@ def __init__( auto_continue=False, gamma_vasp_cmd=None, settings_override=None, - ): + ) -> None: """ This constructor is a simplified version of VaspJob, which satisfies the need for flexibility. For standard kinds of runs, it's often @@ -807,9 +805,8 @@ def __init__( self.auto_continue = auto_continue self.settings_override = settings_override - def setup(self, directory="./"): - """ - Performs initial setup for VaspNEBJob, including overriding any settings + def setup(self, directory="./") -> None: + """Performs initial setup for VaspNEBJob, including overriding any settings and backing up. """ neb_dirs, neb_sub = self._get_neb_dirs(directory) @@ -826,7 +823,6 @@ def setup(self, directory="./"): if self.half_kpts and os.path.isfile(os.path.join(directory, "KPOINTS")): kpts = Kpoints.from_file(os.path.join(directory, "KPOINTS")) kpts.kpts = np.maximum(np.array(kpts.kpts) / 2, 1) - kpts.kpts = kpts.kpts.astype(int).tolist() if tuple(kpts.kpts[0]) == (1, 1, 1): kpt_dic = kpts.as_dict() kpt_dic["generation_style"] = "Gamma" @@ -902,11 +898,11 @@ def run(self, directory="./"): start_new_session=True, ) # pylint: disable=R1732 - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: """Postprocessing includes renaming and gzipping where necessary.""" # Add suffix to all sub_dir/{items} - neb_dirs, neb_sub = self._get_neb_dirs(directory) + neb_dirs, _neb_sub = self._get_neb_dirs(directory) for path in neb_dirs: for file in VASP_NEB_OUTPUT_SUB_FILES: @@ -940,7 +936,7 @@ class GenerateVaspInputJob(Job): used to modify the VASP input files before the next VaspJob. """ - def __init__(self, input_set, contcar_only=True, **kwargs): + def __init__(self, input_set, contcar_only=True, **kwargs) -> None: """ Args: input_set (str): Full path to the input set. E.g., @@ -953,10 +949,10 @@ def __init__(self, input_set, contcar_only=True, **kwargs): self.contcar_only = contcar_only self.kwargs = kwargs - def setup(self, directory="./"): + def setup(self, directory="./") -> None: """Dummy setup.""" - def run(self, directory="./"): + def run(self, directory="./") -> None: """Run the calculation.""" if os.path.isfile(os.path.join(directory, "CONTCAR")): structure = Structure.from_file(os.path.join(directory, "CONTCAR")) @@ -969,5 +965,5 @@ def run(self, directory="./"): vis = getattr(mod, classname)(structure, **self.kwargs) vis.write_input(directory) - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: """Dummy postprocess.""" diff --git a/custodian/vasp/validators.py b/custodian/vasp/validators.py index b6b8827f..569e752f 100644 --- a/custodian/vasp/validators.py +++ b/custodian/vasp/validators.py @@ -1,5 +1,7 @@ """Implements various validators, e.g., check if vasprun.xml is valid, for VASP.""" +from __future__ import annotations + import logging import os from collections import deque @@ -25,12 +27,12 @@ def __init__(self, output_file: str = "vasp.out", stderr_file: str = "std_err.tx self.stderr_file = stderr_file self.logger = logging.getLogger(type(self).__name__) - def check(self, directory="./"): + def check(self, directory="./") -> bool: """Check for errors.""" try: load_vasprun(os.path.join(directory, "vasprun.xml")) except Exception: - exception_context = {} + exception_context: dict[str, str | float] = {} if os.path.isfile(os.path.join(directory, self.output_file)): with open(os.path.join(directory, self.output_file)) as output_file: @@ -65,7 +67,7 @@ class VaspFilesValidator(Validator): normally create upon running. """ - def __init__(self): + def __init__(self) -> None: """Dummy init.""" def check(self, directory="./"): @@ -79,7 +81,7 @@ class VaspNpTMDValidator(Validator): Currently, VASP only have Langevin thermostat (MDALGO = 3) for NpT ensemble. """ - def __init__(self): + def __init__(self) -> None: """Dummy init.""" def check(self, directory="./"): @@ -92,15 +94,13 @@ def check(self, directory="./"): outcar = load_outcar(os.path.join(directory, "OUTCAR")) patterns = {"MDALGO": r"MDALGO\s+=\s+([\d]+)"} outcar.read_pattern(patterns=patterns) - if outcar.data["MDALGO"] == [["3"]]: - return False - return True + return outcar.data["MDALGO"] != [["3"]] class VaspAECCARValidator(Validator): """Check if the data in the AECCAR is corrupted.""" - def __init__(self): + def __init__(self) -> None: """Dummy init.""" def check(self, directory="./"): @@ -111,7 +111,7 @@ def check(self, directory="./"): return check_broken_chgcar(aeccar) -def check_broken_chgcar(chgcar, diff_thresh=None): +def check_broken_chgcar(chgcar, diff_thresh=None) -> bool: """ Check if the charge density file is corrupt Args: diff --git a/docs/_themes/flask_theme_support.py b/docs/_themes/flask_theme_support.py index 76b16b2c..785f75bf 100644 --- a/docs/_themes/flask_theme_support.py +++ b/docs/_themes/flask_theme_support.py @@ -1,5 +1,7 @@ """flasky extensions. flasky pygments style based on tango style.""" +from typing import ClassVar + from pygments.style import Style from pygments.token import ( Comment, @@ -23,7 +25,7 @@ class FlaskyStyle(Style): background_color = "#f8f8f8" default_style = "" - styles = { + styles: ClassVar = { # No corresponding class for the following: # Text: "", # class: '' Whitespace: "underline #f8f8f8", # class: 'w' diff --git a/docs/changelog.md b/docs/changelog.md index e2b8b3d9..e4b4661a 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -6,6 +6,16 @@ nav_order: 2 # Change Log +## 2024.4.18 +- Enable export of environment variables plus lobster run as a command enhancement lobster (@JaGeo) +- New Gaussian plugin (@rashatwi) +- Add missing directory kwarg on QCJob run() method (@Andrew-S-Rosen) +- Add support for directory for Q-Chem (@Andrew-S-Rosen) + +## 2024.3.12 + +* Make Custodian threadsafe with explicit file paths (@zulissimeta). + ## v2024.2.15 ### 🐛 Bug Fixes diff --git a/pyproject.toml b/pyproject.toml index 3a9831ee..6f3bae7f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "custodian" -version = "2024.2.15" +version = "2024.4.18" description = "A simple JIT job management framework in Python." authors = [ { name = "Janosh Riebesell", email = "janosh.riebesell@gmail.com" }, @@ -41,6 +41,7 @@ dependencies = ["monty>=2.0.6", "psutil", "ruamel.yaml>=0.15.6"] vasp = ["pymatgen"] nwchem = ["pymatgen"] qchem = ["pymatgen"] +gaussian = ["pymatgen", "matplotlib"] dev = ["pymatgen", "pytest", "pytest-cov"] lint = ["mypy", "pre-commit", "ruff"] error-statistics = ["sentry-sdk>=0.8.0"] @@ -102,6 +103,7 @@ ignore = [ "B028", # No explicit stacklevel keyword argument found "B904", # Within an except clause, raise exceptions with ... "C408", # unnecessary-collection-call + "COM812", "D105", # Missing docstring in magic method "D205", # 1 blank line required between summary line and description "D212", # Multi-line docstring summary should start at the first line @@ -112,7 +114,8 @@ ignore = [ "PLR", # pylint refactor "PLW2901", # Outer for loop variable overwritten by inner assignment target "PT013", # pytest-incorrect-pytest-import - "RUF012", # Disable checks for mutable class args. This is a non-problem. + "PTH", + "RUF012", # Disable checks for mutable class args "SIM105", # Use contextlib.suppress(OSError) instead of try-except-pass ] pydocstyle.convention = "google" @@ -120,7 +123,7 @@ isort.split-on-trailing-comma = false [tool.ruff.lint.per-file-ignores] "__init__.py" = ["F401"] -"tests/*" = ["D"] +"tests/*" = ["D", "S101"] "tasks.py" = ["D", "E"] [tool.pytest.ini_options] diff --git a/tasks.py b/tasks.py index 72849949..f2cf6aba 100644 --- a/tasks.py +++ b/tasks.py @@ -13,11 +13,11 @@ from custodian import __version__ as CURRENT_VER -NEW_VER = datetime.datetime.today().strftime("%Y.%-m.%-d") +NEW_VER = datetime.datetime.now(tz=datetime.timezone.utc).strftime("%Y.%-m.%-d") @task -def make_doc(ctx): +def make_doc(ctx) -> None: with cd("docs"): ctx.run("touch index.rst") ctx.run("rm custodian.*.rst", warn=True) @@ -39,7 +39,7 @@ def make_doc(ctx): @task -def update_doc(ctx): +def update_doc(ctx) -> None: make_doc(ctx) ctx.run("git add .", warn=True) ctx.run('git commit -a -m "Update dev docs"', warn=True) @@ -47,7 +47,7 @@ def update_doc(ctx): @task -def release_github(ctx): +def release_github(ctx) -> None: payload = { "tag_name": "v" + NEW_VER, "target_commitish": "master", @@ -65,12 +65,12 @@ def release_github(ctx): @task -def test(ctx): +def test(ctx) -> None: ctx.run("pytest custodian") @task -def set_ver(ctx): +def set_ver(ctx) -> None: with open("custodian/__init__.py") as file: lines = [f'__version__ = "{NEW_VER}"' if "__version__" in line else line.rstrip() for line in file] @@ -85,7 +85,7 @@ def set_ver(ctx): @task -def update_changelog(ctx, version=None, sim=False): +def update_changelog(ctx, version=None, sim=False) -> None: """ Create a preliminary change log using the git logs. @@ -135,7 +135,7 @@ def update_changelog(ctx, version=None, sim=False): @task -def release(ctx): +def release(ctx) -> None: set_ver(ctx) update_doc(ctx) release_github(ctx) diff --git a/tests/ansible/test_interpreter.py b/tests/ansible/test_interpreter.py index a0258239..03b641fc 100644 --- a/tests/ansible/test_interpreter.py +++ b/tests/ansible/test_interpreter.py @@ -1,5 +1,9 @@ """Created on Jun 1, 2012.""" +from __future__ import annotations + +from typing import Any + import pytest from custodian.ansible.actions import FileActions @@ -14,10 +18,10 @@ class TestModder: - def test_dict_modify(self): + def test_dict_modify(self) -> None: modder = Modder() dct = {"Hello": "World"} - mod = {"_set": {"Hello": "Universe", "Bye": "World"}} + mod: dict[str, Any] = {"_set": {"Hello": "Universe", "Bye": "World"}} modder.modify(mod, dct) assert dct == {"Bye": "World", "Hello": "Universe"} mod = {"_unset": {"Hello": 1}} @@ -100,7 +104,7 @@ def test_dict_modify(self): modder.modify(mod, dct) assert dct == {"a": {"b": {"c": 102}, "e": {"f": [201, 301]}}} - def test_file_modify(self): + def test_file_modify(self) -> None: modder = Modder(actions=[FileActions]) modder.modify({"_file_create": {"content": "Test data"}}, "test_file") modder.modify({"_file_copy": {"dest": "test_file_copy"}}, "test_file") @@ -115,7 +119,7 @@ def test_file_modify(self): modder.modify({"_file_delete": {"mode": "actual"}}, "test_file_copy1") modder.modify({"_file_delete": {"mode": "actual"}}, "test_file_copy2") - def test_strict_mode(self): + def test_strict_mode(self) -> None: modder = Modder(actions=[FileActions]) dct = {"Hello": "World"} mod = {"_set": {"Hello": "Universe", "Bye": "World"}} @@ -136,7 +140,7 @@ def test_strict_mode(self): "test_file", ) - def test_modify_object(self): + def test_modify_object(self) -> None: modder = Modder() o = MyObject(1) assert o.b["a"] == 1 @@ -145,7 +149,7 @@ def test_modify_object(self): class MyObject: - def __init__(self, a): + def __init__(self, a) -> None: self.b = {"a": a} def as_dict(self): diff --git a/tests/conftest.py b/tests/conftest.py index b6d822b3..ebff57df 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,6 +10,6 @@ @pytest.fixture(autouse=True) -def _patch_get_potential_energy(monkeypatch): +def _patch_get_potential_energy(monkeypatch) -> None: """Monkeypatch the multiprocessing.cpu_count() function to always return 64.""" - monkeypatch.setattr(multiprocessing, "cpu_count", lambda *args, **kwargs: 64) + monkeypatch.setattr(multiprocessing, "cpu_count", lambda: 64) diff --git a/tests/cp2k/test_handlers.py b/tests/cp2k/test_handlers.py index 9bce674f..c7349044 100644 --- a/tests/cp2k/test_handlers.py +++ b/tests/cp2k/test_handlers.py @@ -25,7 +25,7 @@ TEST_FILES_DIR = f"{TEST_FILES}/cp2k" -def clean_dir(dct): +def clean_dir(dct) -> None: for file in glob(os.path.join(dct, "error.*.tar.gz")): os.remove(file) for file in glob(os.path.join(dct, "custodian.chk.*.tar.gz")): @@ -33,7 +33,7 @@ def clean_dir(dct): class HandlerTests(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: warnings.filterwarnings("ignore") clean_dir(TEST_FILES_DIR) @@ -54,7 +54,7 @@ def setUp(self): self.modder = Cp2kModder(filename=self.input_file) - def test(self): + def test(self) -> None: """Ensure modder works""" kwds = KeywordList(keywords=[Keyword("BASIS_SET_FILE_NAME", "FILE1"), Keyword("BASIS_SET_FILE_NAME", "FILE2")]) actions = [ @@ -69,12 +69,12 @@ def test(self): assert self.modder.ci["FORCE_EVAL"]["METHOD"] == Keyword("METHOD", "NOT QA") assert isinstance(self.modder.ci["FORCE_EVAL"]["DFT"]["BASIS_SET_FILE_NAME"], KeywordList) - def test_handler_inits(self): + def test_handler_inits(self) -> None: """Ensure handlers initialize fine without real input/output files""" for handler in (AbortHandler, FrozenJobErrorHandler, NumericalPrecisionHandler, UnconvergedScfErrorHandler): handler() - def test_frozenjobhandler(self): + def test_frozenjobhandler(self) -> None: """Handler for frozen job""" handler = FrozenJobErrorHandler( input_file=self.input_file, output_file=self.output_file_preconditioner, timeout=1 @@ -100,7 +100,7 @@ def test_frozenjobhandler(self): handler = FrozenJobErrorHandler(input_file=self.input_file, output_file=self.output_file_imprecise, timeout=1) handler.check() - def test_unconverged_handler(self): + def test_unconverged_handler(self) -> None: """Handler for SCF handling not working""" ci = StaticSet.from_file(self.input_file) handler = UnconvergedScfErrorHandler(input_file=self.input_file, output_file=self.output_file_unconverged) @@ -119,24 +119,24 @@ def test_unconverged_handler(self): ci = StaticSet.from_file(self.input_file) assert ci["force_eval"]["dft"]["scf"]["MIXING"]["ALPHA"] == Keyword("ALPHA", 0.1) - def test_abort_handler(self): + def test_abort_handler(self) -> None: """Checks if cp2k called abort""" handler = AbortHandler(input_file=self.input_file, output_file=self.output_file_cholesky) assert handler.check() - def test_imprecision_handler(self): + def test_imprecision_handler(self) -> None: """Check for low precision leading to stagnant SCF""" handler = NumericalPrecisionHandler(self.input_file, output_file=self.output_file_imprecise, max_same=3) assert handler.check() c = handler.correct() assert c["errors"], ["Insufficient precision"] - def test_std_out(self): + def test_std_out(self) -> None: """Errors sent to the std out instead of cp2k out""" handler = StdErrHandler(std_err=self.output_file_stderr) assert handler.check() handler.correct() - def test_conv(self): + def test_conv(self) -> None: """Check that SCF convergence can be read""" assert len(get_conv(self.output_file_conv)) == 45 diff --git a/tests/cp2k/test_jobs.py b/tests/cp2k/test_jobs.py index 8d3e2412..4ef1d81a 100644 --- a/tests/cp2k/test_jobs.py +++ b/tests/cp2k/test_jobs.py @@ -17,15 +17,15 @@ cwd = os.getcwd() -def clean_dir(dir): - for file in glob(os.path.join(dir, "error.*.tar.gz")): +def clean_dir(folder) -> None: + for file in glob(os.path.join(folder, "error.*.tar.gz")): os.remove(file) - for file in glob(os.path.join(dir, "custodian.chk.*.tar.gz")): + for file in glob(os.path.join(folder, "custodian.chk.*.tar.gz")): os.remove(file) class HandlerTests(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: warnings.filterwarnings("ignore") clean_dir(TEST_FILES_DIR) @@ -43,7 +43,7 @@ def setUp(self): if os.path.isfile(self.output_file): os.remove(self.output_file) - def test_job(self): + def test_job(self) -> None: job = Cp2kJob( cp2k_cmd=["echo"], input_file=self.input_file, @@ -61,7 +61,7 @@ def test_job(self): if os.path.isfile(self.std_err): os.remove(self.std_err) - def test_double(self): + def test_double(self) -> None: jobs = Cp2kJob.double_job( cp2k_cmd=["echo"], input_file=self.input_file_hybrid, diff --git a/tests/feff/test_handlers.py b/tests/feff/test_handlers.py index b3d49ced..0b46aa49 100644 --- a/tests/feff/test_handlers.py +++ b/tests/feff/test_handlers.py @@ -14,13 +14,13 @@ __date__ = "Oct 18, 2017" -def clean_dir(): +def clean_dir() -> None: for file in glob("error.*.tar.gz"): os.remove(file) class UnconvergedErrorHandlerTest(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: os.chdir(TEST_FILES) subdir = f"{TEST_FILES}/feff_unconverged" os.chdir(subdir) @@ -31,7 +31,7 @@ def setUp(self): shutil.copy("feff.inp", "feff.inp.orig") shutil.copy("log1.dat", "log1.dat.orig") - def test_check_unconverged(self): + def test_check_unconverged(self) -> None: handler = UnconvergedErrorHandler() assert handler.check() dct = handler.correct() diff --git a/tests/feff/test_jobs.py b/tests/feff/test_jobs.py index 9e37ded7..b8483050 100644 --- a/tests/feff/test_jobs.py +++ b/tests/feff/test_jobs.py @@ -17,14 +17,14 @@ TEST_DIR = f"{TEST_FILES}/feff_unconverged" -def test_as_from_dict(): +def test_as_from_dict() -> None: f = FeffJob("hello") f2 = FeffJob.from_dict(f.as_dict()) assert type(f) == type(f2) assert f2.feff_cmd == "hello" -def test_setup(): +def test_setup() -> None: with cd(TEST_DIR), ScratchDir(".", copy_from_current_on_enter=True): f = FeffJob("hello", backup=True) f.setup() @@ -38,7 +38,7 @@ def test_setup(): assert atom == atom_origin -def test_postprocess(): +def test_postprocess() -> None: with cd(TEST_DIR), ScratchDir(".", copy_from_current_on_enter=True): f = FeffJob("hello", backup=True, gzipped=True) f.postprocess() diff --git a/tests/files/gaussian/Checkpoint.chk b/tests/files/gaussian/Checkpoint.chk new file mode 100644 index 00000000..796bf6a6 --- /dev/null +++ b/tests/files/gaussian/Checkpoint.chk @@ -0,0 +1 @@ +This is just a mock .chk file to avoid uploading a large actual .chk to the repository. diff --git a/tests/files/gaussian/Gau-mock.rwf b/tests/files/gaussian/Gau-mock.rwf new file mode 100644 index 00000000..7eea6a36 --- /dev/null +++ b/tests/files/gaussian/Gau-mock.rwf @@ -0,0 +1 @@ +This is just a mock .rwf file to avoid uploading a large actual .rwf to the repository. diff --git a/tests/files/gaussian/Optimization.chk b/tests/files/gaussian/Optimization.chk new file mode 100644 index 00000000..796bf6a6 --- /dev/null +++ b/tests/files/gaussian/Optimization.chk @@ -0,0 +1 @@ +This is just a mock .chk file to avoid uploading a large actual .chk to the repository. diff --git a/tests/files/gaussian/bad_file.com b/tests/files/gaussian/bad_file.com new file mode 100644 index 00000000..c2ab1893 --- /dev/null +++ b/tests/files/gaussian/bad_file.com @@ -0,0 +1,9 @@ +%chk=Checkpoint.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* opt scf=(maxcycle=100) guess=read geom=check + +H10 C4 O2 + +0 1 + diff --git a/tests/files/gaussian/bad_file.out.gz b/tests/files/gaussian/bad_file.out.gz new file mode 100644 index 00000000..5a3b2318 Binary files /dev/null and b/tests/files/gaussian/bad_file.out.gz differ diff --git a/tests/files/gaussian/coord_inputs.com b/tests/files/gaussian/coord_inputs.com new file mode 100644 index 00000000..bf8f8c7f --- /dev/null +++ b/tests/files/gaussian/coord_inputs.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* opt=(z-matrix) scf=(maxcycle=100) + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/coord_inputs.out.gz b/tests/files/gaussian/coord_inputs.out.gz new file mode 100644 index 00000000..495714c4 Binary files /dev/null and b/tests/files/gaussian/coord_inputs.out.gz differ diff --git a/tests/files/gaussian/coords_dict_geom.com b/tests/files/gaussian/coords_dict_geom.com new file mode 100644 index 00000000..531487f8 --- /dev/null +++ b/tests/files/gaussian/coords_dict_geom.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* opt scf=(maxcycle=100) geom=(connectivity, modredundant) + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/coords_dict_geom.out.gz b/tests/files/gaussian/coords_dict_geom.out.gz new file mode 100644 index 00000000..28b7b1ca Binary files /dev/null and b/tests/files/gaussian/coords_dict_geom.out.gz differ diff --git a/tests/files/gaussian/coords_string_geom.com b/tests/files/gaussian/coords_string_geom.com new file mode 100644 index 00000000..513430ec --- /dev/null +++ b/tests/files/gaussian/coords_string_geom.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* opt scf=(maxcycle=100) geom=connectivity + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/coords_string_geom.out.gz b/tests/files/gaussian/coords_string_geom.out.gz new file mode 100644 index 00000000..a3984454 Binary files /dev/null and b/tests/files/gaussian/coords_string_geom.out.gz differ diff --git a/tests/files/gaussian/found_coords.com b/tests/files/gaussian/found_coords.com new file mode 100644 index 00000000..3b5bc085 --- /dev/null +++ b/tests/files/gaussian/found_coords.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* opt scf=(maxcycle=100) geom=check + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/found_coords.out.gz b/tests/files/gaussian/found_coords.out.gz new file mode 100644 index 00000000..218e0e9f Binary files /dev/null and b/tests/files/gaussian/found_coords.out.gz differ diff --git a/tests/files/gaussian/insufficient_memory.com b/tests/files/gaussian/insufficient_memory.com new file mode 100644 index 00000000..894b7637 --- /dev/null +++ b/tests/files/gaussian/insufficient_memory.com @@ -0,0 +1,34 @@ +%chk=Optimization.chk +%Mem=1MB +%NProcShared=24 +#P B3LYP/6-31+G* Opt=(MaxCycles=100) SCF SCRF=(PCM, Solvent=TetraHydroFuran) + +H14 C6 S1 O2 + +2 1 +S 0.663472 -0.352587 0.064345 +O 0.696082 -1.521711 -0.772194 +O 0.443521 -0.388093 1.487643 +C -1.299101 0.746315 -0.713004 +C -2.275225 -0.363997 -0.726814 +C 2.062372 0.772028 -0.348351 +C -1.420735 1.901920 0.197861 +C -2.958634 -0.759411 0.576477 +C 3.376949 0.127510 0.079155 +H -0.806462 0.928092 -1.667707 +H -3.029410 0.031385 -1.440949 +H -1.861021 -1.224276 -1.258062 +H 1.984803 0.916341 -1.425530 +H 1.840910 1.693583 0.187721 +H -0.593518 2.604656 0.119375 +H -2.323356 2.445977 -0.128220 +H -1.589927 1.615025 1.234508 +H -3.495439 0.072548 1.031642 +H -3.680715 -1.550563 0.380871 +H -2.237458 -1.142273 1.300934 +H 3.558819 -0.808040 -0.446142 +H 4.170293 0.829714 -0.187582 +H 3.416346 -0.038534 1.154086 + + + diff --git a/tests/files/gaussian/insufficient_memory.out.gz b/tests/files/gaussian/insufficient_memory.out.gz new file mode 100644 index 00000000..e827bd92 Binary files /dev/null and b/tests/files/gaussian/insufficient_memory.out.gz differ diff --git a/tests/files/gaussian/linear_bend.com b/tests/files/gaussian/linear_bend.com new file mode 100644 index 00000000..29a20aad --- /dev/null +++ b/tests/files/gaussian/linear_bend.com @@ -0,0 +1,47 @@ +%chk=Optimization.chk +%mem=100GB +%NProcShared=28 +#P B3LYP/6-31+G* Opt=(MaxCycles=100) SCF SCRF=(PCM, Solvent=TetraHydroFuran) + +H24 O12 + +0 1 +O -0.535940 2.412480 0.000000 +H 0.434060 2.412480 0.000000 +H -0.859270 1.623360 -0.462230 +O -0.477040 0.515290 0.000000 +H 0.492960 0.515290 0.000000 +H -0.800370 -0.331850 0.344550 +O 1.941420 1.473710 -0.115560 +H 2.911420 1.473710 -0.115560 +H 1.618090 0.650870 -0.514660 +O 1.506410 -2.739190 -0.033320 +H 2.476410 -2.739190 -0.033320 +H 1.183080 -2.953640 0.855710 +O -2.481410 -1.412410 -0.089680 +H -1.511410 -1.412410 -0.089680 +H -2.804740 -1.796910 0.740090 +O 0.267510 -1.375730 -0.089680 +H 1.237510 -1.375730 -0.089680 +H -0.055820 -1.963460 -0.790340 +O 2.079230 3.193980 0.024790 +H 3.049230 3.193980 0.024790 +H 1.755900 3.501110 -0.836620 +O -2.729610 0.744610 0.024790 +H -1.759610 0.744610 0.024790 +H -3.052940 0.984940 -0.857590 +O -2.260190 4.168690 -0.056620 +H -1.290190 4.168690 -0.056620 +H -2.583520 4.985440 -0.468050 +O -2.762450 3.169940 -0.056620 +H -1.792450 3.169940 -0.056620 +H -3.085780 2.467980 0.529570 +O 0.687880 4.885180 -0.056620 +H 1.657880 4.885180 -0.056620 +H 0.364550 5.613770 -0.609360 +O -1.168310 -3.669310 -0.056620 +H -0.198310 -3.669310 -0.056620 +H -1.491640 -3.998110 -0.909990 + + + diff --git a/tests/files/gaussian/linear_bend.out.gz b/tests/files/gaussian/linear_bend.out.gz new file mode 100644 index 00000000..e1706603 Binary files /dev/null and b/tests/files/gaussian/linear_bend.out.gz differ diff --git a/tests/files/gaussian/missing_file.com b/tests/files/gaussian/missing_file.com new file mode 100644 index 00000000..9e142e14 --- /dev/null +++ b/tests/files/gaussian/missing_file.com @@ -0,0 +1,11 @@ +%chk=DME.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* opt scf=(maxcycle=100) geom=check guess=read + +H10 C4 O2 + +0 1 + + + diff --git a/tests/files/gaussian/missing_file.out.gz b/tests/files/gaussian/missing_file.out.gz new file mode 100644 index 00000000..14029f00 Binary files /dev/null and b/tests/files/gaussian/missing_file.out.gz differ diff --git a/tests/files/gaussian/missing_mol.com b/tests/files/gaussian/missing_mol.com new file mode 100644 index 00000000..4b153f2a --- /dev/null +++ b/tests/files/gaussian/missing_mol.com @@ -0,0 +1,12 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* opt scf=(maxcycle=100) guess=read + +H10 C4 O2 + +0 1 + + + + diff --git a/tests/files/gaussian/missing_mol.out.gz b/tests/files/gaussian/missing_mol.out.gz new file mode 100644 index 00000000..f67d19be Binary files /dev/null and b/tests/files/gaussian/missing_mol.out.gz differ diff --git a/tests/files/gaussian/mol_opt.com b/tests/files/gaussian/mol_opt.com new file mode 100644 index 00000000..d12f9de3 --- /dev/null +++ b/tests/files/gaussian/mol_opt.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P B3LYP/6-31+G* opt=(maxcycles=5) scf + +H10 C4 O2 + +0 1 +O 1.724940 -0.522024 -0.002758 +C 2.993771 0.117174 0.001859 +C 0.648237 0.400893 -0.000391 +C -0.646436 -0.400946 0.001289 +O -1.726229 0.518750 0.000170 +C -2.993014 -0.114625 0.000081 +H 3.191019 0.608990 0.965064 +H 3.058841 0.862859 -0.808106 +H 0.686452 1.047437 -0.891425 +H 0.683799 1.048338 0.890377 +H -0.685781 -1.046335 0.893333 +H -0.686351 -1.048611 -0.889176 +H -3.128465 -0.740853 0.896021 +H -3.127802 -0.742042 -0.895128 +H -3.744952 0.678596 -0.000759 +H 3.748207 -0.657166 -0.156531 + + + diff --git a/tests/files/gaussian/mol_opt.out.gz b/tests/files/gaussian/mol_opt.out.gz new file mode 100644 index 00000000..e839ad4e Binary files /dev/null and b/tests/files/gaussian/mol_opt.out.gz differ diff --git a/tests/files/gaussian/opt_steps_better_guess.com b/tests/files/gaussian/opt_steps_better_guess.com new file mode 100644 index 00000000..e1e12953 --- /dev/null +++ b/tests/files/gaussian/opt_steps_better_guess.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P B3LYP/6-31+G* Opt=(MaxCycles=1) SCF Int=Ultrafine + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/opt_steps_better_guess.out.gz b/tests/files/gaussian/opt_steps_better_guess.out.gz new file mode 100644 index 00000000..d4443141 Binary files /dev/null and b/tests/files/gaussian/opt_steps_better_guess.out.gz differ diff --git a/tests/files/gaussian/opt_steps_cycles.com b/tests/files/gaussian/opt_steps_cycles.com new file mode 100644 index 00000000..e457863b --- /dev/null +++ b/tests/files/gaussian/opt_steps_cycles.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P B3LYP/6-31+G* Opt=(MaxCycles=1) SCF + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/opt_steps_cycles.out.gz b/tests/files/gaussian/opt_steps_cycles.out.gz new file mode 100644 index 00000000..c757212e Binary files /dev/null and b/tests/files/gaussian/opt_steps_cycles.out.gz differ diff --git a/tests/files/gaussian/opt_steps_from_structure.com b/tests/files/gaussian/opt_steps_from_structure.com new file mode 100644 index 00000000..963089c9 --- /dev/null +++ b/tests/files/gaussian/opt_steps_from_structure.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P B3LYP/6-31+G* Opt=(MaxCycles=5) SCF + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/opt_steps_from_structure.out.gz b/tests/files/gaussian/opt_steps_from_structure.out.gz new file mode 100644 index 00000000..3552e0ea Binary files /dev/null and b/tests/files/gaussian/opt_steps_from_structure.out.gz differ diff --git a/tests/files/gaussian/opt_steps_int_grid.com b/tests/files/gaussian/opt_steps_int_grid.com new file mode 100644 index 00000000..eb651970 --- /dev/null +++ b/tests/files/gaussian/opt_steps_int_grid.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P B3LYP/6-31+G* Opt=(MaxCycles=1) Int=fine SCF + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/opt_steps_int_grid.out.gz b/tests/files/gaussian/opt_steps_int_grid.out.gz new file mode 100644 index 00000000..46f63b5b Binary files /dev/null and b/tests/files/gaussian/opt_steps_int_grid.out.gz differ diff --git a/tests/files/gaussian/scf_convergence_algorithm.com b/tests/files/gaussian/scf_convergence_algorithm.com new file mode 100644 index 00000000..bbc81e17 --- /dev/null +++ b/tests/files/gaussian/scf_convergence_algorithm.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* Opt SCF=(MaxCycle=1) + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/scf_convergence_algorithm.out.gz b/tests/files/gaussian/scf_convergence_algorithm.out.gz new file mode 100644 index 00000000..4a74ce5d Binary files /dev/null and b/tests/files/gaussian/scf_convergence_algorithm.out.gz differ diff --git a/tests/files/gaussian/scf_convergence_better_guess.com b/tests/files/gaussian/scf_convergence_better_guess.com new file mode 100644 index 00000000..f5435d10 --- /dev/null +++ b/tests/files/gaussian/scf_convergence_better_guess.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* Opt SCF=(MaxCycle=3,xqc) + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/scf_convergence_better_guess.out.gz b/tests/files/gaussian/scf_convergence_better_guess.out.gz new file mode 100644 index 00000000..f3b33aa0 Binary files /dev/null and b/tests/files/gaussian/scf_convergence_better_guess.out.gz differ diff --git a/tests/files/gaussian/scf_convergence_cycles.com b/tests/files/gaussian/scf_convergence_cycles.com new file mode 100644 index 00000000..bbc81e17 --- /dev/null +++ b/tests/files/gaussian/scf_convergence_cycles.com @@ -0,0 +1,27 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* Opt SCF=(MaxCycle=1) + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 + + + diff --git a/tests/files/gaussian/scf_convergence_cycles.out.gz b/tests/files/gaussian/scf_convergence_cycles.out.gz new file mode 100644 index 00000000..b69dc5d0 Binary files /dev/null and b/tests/files/gaussian/scf_convergence_cycles.out.gz differ diff --git a/tests/files/gaussian/solute_solvent_surface.com b/tests/files/gaussian/solute_solvent_surface.com new file mode 100644 index 00000000..f160f720 --- /dev/null +++ b/tests/files/gaussian/solute_solvent_surface.com @@ -0,0 +1,61 @@ +%chk=checkpoint.chk +%mem=45GB +%NProcShared=28 +#P wB97X/Def2TZVP opt=(calcfc,tight) scf=(tight,maxcycle=100) int=ultrafine nosymmetry test scrf=(solvent=generic,read) + +Mg1 H30 C12 O6 + +2 1 +Mg 38.368301 42.090174 23.451563 +O 37.113609 43.752418 23.183780 +O 39.550273 43.673452 24.146353 +C 37.765911 45.015063 23.341397 +C 38.805391 44.863050 24.422604 +C 35.992516 43.807108 22.298046 +C 40.784971 43.583346 24.867190 +H 38.216621 45.300288 22.385449 +H 37.039792 45.778324 23.630471 +H 39.470169 45.729101 24.422335 +H 38.343756 44.774104 25.410442 +H 36.321129 44.133462 21.311001 +H 35.573752 42.804997 22.245480 +H 35.245736 44.497386 22.693097 +H 41.262553 42.647202 24.585240 +H 41.425572 44.424814 24.599097 +H 40.590859 43.598283 25.941736 +O 39.706946 40.635608 24.169899 +O 37.530662 41.588021 25.294479 +C 39.226048 39.931804 25.320610 +C 38.440188 40.905433 26.161124 +C 40.684846 39.900160 23.423742 +C 36.501019 42.295867 25.993108 +H 38.599238 39.095893 24.993268 +H 40.065808 39.540211 25.898067 +H 37.886365 40.369015 26.935171 +H 39.096657 41.639760 26.641926 +H 41.576655 39.753975 24.034551 +H 40.280161 38.930705 23.123015 +H 40.932440 40.489634 22.544083 +H 36.936889 43.052974 26.649103 +H 35.870862 42.772217 25.245288 +H 35.914248 41.592248 26.585136 +O 37.133440 40.778178 22.389068 +O 39.203543 42.136652 21.528868 +C 37.716669 40.325789 21.160407 +C 38.419072 41.496064 20.517817 +C 36.173429 39.862584 22.933301 +C 40.029934 43.169922 20.988926 +H 38.417871 39.513982 21.377668 +H 36.936398 39.951674 20.493831 +H 39.068371 41.139418 19.714717 +H 37.715707 42.227470 20.100652 +H 35.333055 39.761295 22.243564 +H 36.637196 38.885463 23.093076 +H 35.829526 40.271069 23.881750 +H 40.742505 42.741003 20.283157 +H 39.399248 43.906355 20.483244 +H 40.558690 43.634886 21.816904 + +Eps=4.33 +EpsInf=1.69 + diff --git a/tests/files/gaussian/solute_solvent_surface.out.gz b/tests/files/gaussian/solute_solvent_surface.out.gz new file mode 100644 index 00000000..7ce05483 Binary files /dev/null and b/tests/files/gaussian/solute_solvent_surface.out.gz differ diff --git a/tests/files/gaussian/syntax.com b/tests/files/gaussian/syntax.com new file mode 100644 index 00000000..d499d4c8 --- /dev/null +++ b/tests/files/gaussian/syntax.com @@ -0,0 +1,47 @@ +%chk=Optimization.chk +%mem=100GB +%NProcShared=28 +#P B3LYP/6-311++G** Opt=(MaxCycless=100) SCF SCRF=(PCM, Solvent=TetraHydroFuran) + +H24 O12 + +0 1 +O -0.535940 2.412480 0.000000 +H 0.434060 2.412480 0.000000 +H -0.859270 1.623360 -0.462230 +O -0.477040 0.515290 0.000000 +H 0.492960 0.515290 0.000000 +H -0.800370 -0.331850 0.344550 +O 1.941420 1.473710 -0.115560 +H 2.911420 1.473710 -0.115560 +H 1.618090 0.650870 -0.514660 +O 1.506410 -2.739190 -0.033320 +H 2.476410 -2.739190 -0.033320 +H 1.183080 -2.953640 0.855710 +O -2.481410 -1.412410 -0.089680 +H -1.511410 -1.412410 -0.089680 +H -2.804740 -1.796910 0.740090 +O 0.267510 -1.375730 -0.089680 +H 1.237510 -1.375730 -0.089680 +H -0.055820 -1.963460 -0.790340 +O 2.079230 3.193980 0.024790 +H 3.049230 3.193980 0.024790 +H 1.755900 3.501110 -0.836620 +O -2.729610 0.744610 0.024790 +H -1.759610 0.744610 0.024790 +H -3.052940 0.984940 -0.857590 +O -2.260190 4.168690 -0.056620 +H -1.290190 4.168690 -0.056620 +H -2.583520 4.985440 -0.468050 +O -2.762450 3.169940 -0.056620 +H -1.792450 3.169940 -0.056620 +H -3.085780 2.467980 0.529570 +O 0.687880 4.885180 -0.056620 +H 1.657880 4.885180 -0.056620 +H 0.364550 5.613770 -0.609360 +O -1.168310 -3.669310 -0.056620 +H -0.198310 -3.669310 -0.056620 +H -1.491640 -3.998110 -0.909990 + + + diff --git a/tests/files/gaussian/syntax.out.gz b/tests/files/gaussian/syntax.out.gz new file mode 100644 index 00000000..e1e7e7ae Binary files /dev/null and b/tests/files/gaussian/syntax.out.gz differ diff --git a/tests/files/gaussian/walltime.com b/tests/files/gaussian/walltime.com new file mode 100644 index 00000000..eb3b3ffd --- /dev/null +++ b/tests/files/gaussian/walltime.com @@ -0,0 +1,68 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-311++G** Opt SCF=(MaxCycle=100) + +Na1 P2 H26 C13 O3 F12 + +-1 1 +Na 36.212468 30.249355 44.065163 +O 37.122511 32.310852 44.650633 +C 38.144801 32.537551 45.644160 +C 38.396734 34.041829 45.620039 +C 38.029384 34.424994 44.191415 +C 36.838177 33.518326 43.934480 +C 37.691501 31.990825 46.979135 +H 39.039582 31.997453 45.314683 +H 37.729558 34.541089 46.329125 +H 39.424433 34.286484 45.887895 +H 37.781614 35.479720 44.073986 +H 38.846532 34.183611 43.507631 +H 36.690395 33.269270 42.882761 +H 35.908980 33.952853 44.316374 +H 38.467538 32.152153 47.731109 +H 36.779001 32.497798 47.304643 +H 37.498740 30.918498 46.913688 +O 35.202009 29.539703 46.043366 +C 34.833912 29.978728 48.331091 +C 35.655074 28.683113 48.168972 +C 34.359478 30.297649 46.908031 +C 35.442173 28.304142 46.706260 +H 35.452969 30.783768 48.727358 +H 33.986388 29.849488 49.004165 +H 36.714298 28.869790 48.348654 +H 35.332058 27.890866 48.844043 +H 34.448579 31.345887 46.623866 +H 33.319301 29.986863 46.755505 +H 34.564863 27.652437 46.587793 +H 36.307853 27.832367 46.242336 +O 35.482739 28.293805 42.987439 +C 35.523417 26.010209 43.471814 +C 34.071861 26.393458 43.190163 +C 36.271434 27.122050 42.761954 +C 34.112809 27.921412 43.213418 +H 35.787972 25.020887 43.099317 +H 35.736153 26.046209 44.543087 +H 33.774592 26.033749 42.202884 +H 33.370035 25.992692 43.921015 +H 36.337565 26.926410 41.684801 +H 37.271370 27.305086 43.155238 +H 33.812594 28.330432 44.181655 +H 33.488161 28.373502 42.440094 +P 39.425499 28.304426 45.358805 +F 38.142702 29.176614 44.820338 +F 38.477988 26.991760 45.434613 +F 40.683074 27.445323 45.885218 +F 39.795837 27.942235 43.827107 +F 40.343640 29.630859 45.271279 +F 39.022198 28.679038 46.878654 +P 32.959764 32.180266 43.398565 +F 32.397168 31.539652 42.025943 +F 31.572850 32.975013 43.617592 +F 34.372073 31.371665 43.175224 +F 32.383503 30.915952 44.225530 +F 33.563401 33.429230 42.569916 +F 33.549896 32.806506 44.770379 + + + diff --git a/tests/files/gaussian/walltime.out.gz b/tests/files/gaussian/walltime.out.gz new file mode 100644 index 00000000..bc917a8f Binary files /dev/null and b/tests/files/gaussian/walltime.out.gz differ diff --git a/tests/files/gaussian/zmatrix.com b/tests/files/gaussian/zmatrix.com new file mode 100644 index 00000000..00bc654a --- /dev/null +++ b/tests/files/gaussian/zmatrix.com @@ -0,0 +1,24 @@ +%chk=Optimization.chk +%mem=48GB +%NProcShared=28 +#P PBE1PBE/6-31+G* opt scf=(maxcycle=100) + +H10 C4 O2 + +0 1 +O -1.678000 -0.723000 0.319000 +C -2.188000 -1.963000 0.789000 +C -0.258000 -0.683000 0.309000 +C 0.152000 0.687000 -0.221000 +O 1.572000 0.727000 -0.231000 +C 2.092000 1.957000 -0.701000 +H -1.482000 -1.948000 1.593000 +H -1.768000 -2.882000 0.437000 +H 0.142000 -1.473000 -0.351000 +H 0.142000 -0.833000 1.319000 +H -0.248000 1.477000 0.439000 +H -0.248000 0.837000 -1.231000 +H 1.751000 2.752000 -0.072000 +H 1.757000 2.126000 -1.703000 +H 3.161000 1.922000 -0.684000 +H -2.894000 -1.978000 -0.015000 diff --git a/tests/files/gaussian/zmatrix.out.gz b/tests/files/gaussian/zmatrix.out.gz new file mode 100644 index 00000000..ecda00b8 Binary files /dev/null and b/tests/files/gaussian/zmatrix.out.gz differ diff --git a/tests/gaussian/__init__.py b/tests/gaussian/__init__.py new file mode 100644 index 00000000..4cc4ce03 --- /dev/null +++ b/tests/gaussian/__init__.py @@ -0,0 +1 @@ +"""Tests for gaussian package.""" diff --git a/tests/gaussian/test_handlers.py b/tests/gaussian/test_handlers.py new file mode 100644 index 00000000..ec75e85e --- /dev/null +++ b/tests/gaussian/test_handlers.py @@ -0,0 +1,374 @@ +import datetime +import glob +import gzip +import os +import shutil +from unittest import TestCase + +from custodian.gaussian.handlers import GaussianErrorHandler, WallTimeErrorHandler +from tests.conftest import TEST_FILES + +__author__ = "Rasha Atwi" +__version__ = "0.1" +__maintainer__ = "Rasha Atwi" +__email__ = "rasha.atwi@stonybrook.edu" +__status__ = "Alpha" +__date__ = "3/21/24" + +TEST_DIR = f"{TEST_FILES}/gaussian" +SCR_DIR = f"{TEST_DIR}/scratch" +CWD = os.getcwd() + + +def gunzip_file(gauss_file): + output_file = os.path.splitext(gauss_file)[0] + if not os.path.exists(output_file) and os.path.exists(gauss_file): + with gzip.open(gauss_file, "rb") as f_in, open(output_file, "wb") as f_out: + shutil.copyfileobj(f_in, f_out) + return output_file + + +class TestGaussianErrorHandler(TestCase): + def setUp(self): + os.makedirs(SCR_DIR, exist_ok=True) + os.chdir(SCR_DIR) + + def test_opt_steps_cycles(self): + gunzip_file(f"{TEST_DIR}/opt_steps_cycles.out.gz") + for file in ["opt_steps_cycles.com", "opt_steps_cycles.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="opt_steps_cycles.com", + output_file="opt_steps_cycles.out", + opt_max_cycles=100, + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["opt_steps"] + assert dct["actions"] == [ + {"structure": "from_final_structure"}, + {"opt_max_cycles": 100}, + ] + + def test_opt_steps_from_structure(self): + gunzip_file(f"{TEST_DIR}/opt_steps_from_structure.out.gz") + for file in ["opt_steps_from_structure.com", "opt_steps_from_structure.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="opt_steps_from_structure.com", + output_file="opt_steps_from_structure.out", + opt_max_cycles=5, + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["opt_steps"] + assert dct["actions"] == [{"structure": "from_final_structure"}] + + def test_opt_steps_int_grid(self): + gunzip_file(f"{TEST_DIR}/opt_steps_int_grid.out.gz") + for file in ["opt_steps_int_grid.com", "opt_steps_int_grid.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="opt_steps_int_grid.com", + output_file="opt_steps_int_grid.out", + opt_max_cycles=1, + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["opt_steps"] + assert dct["actions"] == [{"integral": "ultra_fine"}] + + def test_opt_steps_better_guess(self): + gunzip_file(f"{TEST_DIR}/opt_steps_better_guess.out.gz") + for file in ["opt_steps_better_guess.com", "opt_steps_better_guess.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="opt_steps_better_guess.com", + output_file="opt_steps_better_guess.out", + opt_max_cycles=1, + lower_functional="HF", + lower_basis_set="STO-3G", + job_type="better_guess", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["opt_steps"] + assert dct["actions"] == [{"opt_level_of_theory": "better_geom_guess"}] + + GaussianErrorHandler.activate_better_guess = False + + def test_scf_convergence_cycles(self): + gunzip_file(f"{TEST_DIR}/scf_convergence_cycles.out.gz") + for file in ["scf_convergence_cycles.com", "scf_convergence_cycles.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="scf_convergence_cycles.com", + output_file="scf_convergence_cycles.out", + scf_max_cycles=100, + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["scf_convergence"] + assert dct["actions"] == [{"scf_max_cycles": 100}] + + def test_scf_convergence_algorithm(self): + gunzip_file(f"{TEST_DIR}/scf_convergence_algorithm.out.gz") + for file in ["scf_convergence_algorithm.com", "scf_convergence_algorithm.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="scf_convergence_algorithm.com", + output_file="scf_convergence_algorithm.out", + scf_max_cycles=1, + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["scf_convergence"] + assert dct["actions"] == [{"scf_algorithm": "xqc"}] + + def test_scf_convergence_better_guess(self): + gunzip_file(f"{TEST_DIR}/scf_convergence_better_guess.out.gz") + for file in [ + "scf_convergence_better_guess.com", + "scf_convergence_better_guess.out", + ]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="scf_convergence_better_guess.com", + output_file="scf_convergence_better_guess.out", + scf_max_cycles=3, + lower_functional="HF", + lower_basis_set="STO-3G", + job_type="better_guess", + ) + handler.activate_better_guess = False + handler.check() + dct = handler.correct() + assert dct["errors"] == ["scf_convergence"] + assert dct["actions"] == [{"scf_level_of_theory": "better_scf_guess"}] + + GaussianErrorHandler.activate_better_guess = False + + def test_linear_bend(self): + gunzip_file(f"{TEST_DIR}/linear_bend.out.gz") + for file in ["linear_bend.com", "linear_bend.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="linear_bend.com", + output_file="linear_bend.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["linear_bend"] + assert dct["actions"] == [{"coords": "rebuild_redundant_internals"}] + + def test_solute_solvent_surface(self): + gunzip_file(f"{TEST_DIR}/solute_solvent_surface.out.gz") + for file in ["solute_solvent_surface.com", "solute_solvent_surface.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="solute_solvent_surface.com", + output_file="solute_solvent_surface.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["solute_solvent_surface"] + assert dct["actions"] == [{"surface": "SAS"}] + + def test_internal_coords(self): + pass + + def test_blank_line(self): + gunzip_file(f"{TEST_DIR}/zmatrix.out.gz") + for file in ["zmatrix.com", "zmatrix.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="zmatrix.com", + output_file="zmatrix.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["zmatrix"] + assert dct["actions"] == [{"blank_lines": "rewrite_input_file"}] + + def test_missing_mol(self): + gunzip_file(f"{TEST_DIR}/missing_mol.out.gz") + for file in ["missing_mol.com", "missing_mol.out", "Optimization.chk"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="missing_mol.com", + output_file="missing_mol.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["missing_mol"] + assert dct["actions"] == [{"mol": "get_from_checkpoint"}] + + def test_found_coords(self): + gunzip_file(f"{TEST_DIR}/found_coords.out.gz") + for file in ["found_coords.com", "found_coords.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="found_coords.com", + output_file="found_coords.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["found_coords"] + assert dct["actions"] == [{"mol": "remove_from_input"}] + + def test_coords_dict_geom(self): + gunzip_file(f"{TEST_DIR}/coords_dict_geom.out.gz") + for file in ["coords_dict_geom.com", "coords_dict_geom.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="coords_dict_geom.com", + output_file="coords_dict_geom.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["coords"] + assert dct["actions"] == [{"coords": "remove_connectivity"}] + + def test_coords_string_geom(self): + gunzip_file(f"{TEST_DIR}/coords_string_geom.out.gz") + for file in ["coords_string_geom.com", "coords_string_geom.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="coords_string_geom.com", + output_file="coords_string_geom.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["coords"] + assert dct["actions"] == [{"coords": "remove_connectivity"}] + + def test_missing_file(self): + gunzip_file(f"{TEST_DIR}/missing_file.out.gz") + for file in ["missing_file.com", "missing_file.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="missing_file.com", + output_file="missing_file.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["missing_file"] + assert dct["actions"] is None + + def test_bad_file(self): + gunzip_file(f"{TEST_DIR}/bad_file.out.gz") + for file in ["bad_file.com", "bad_file.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="bad_file.com", + output_file="bad_file.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["bad_file"] + assert dct["actions"] is None + + def test_coord_inputs(self): + gunzip_file(f"{TEST_DIR}/coord_inputs.out.gz") + for file in ["coord_inputs.com", "coord_inputs.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="coord_inputs.com", + output_file="coord_inputs.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["coord_inputs"] + assert dct["actions"] == [{"coords": "use_zmatrix_format"}] + + def test_syntax(self): + gunzip_file(f"{TEST_DIR}/syntax.out.gz") + for file in ["syntax.com", "syntax.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="syntax.com", + output_file="syntax.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["syntax"] + assert dct["actions"] is None + + def test_insufficient_memory(self): + gunzip_file(f"{TEST_DIR}/insufficient_memory.out.gz") + for file in ["insufficient_memory.com", "insufficient_memory.out"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + handler = GaussianErrorHandler( + input_file="insufficient_memory.com", + output_file="insufficient_memory.out", + ) + handler.check() + dct = handler.correct() + assert dct["errors"] == ["insufficient_mem"] + assert dct["actions"] == [{"memory": "increase_to_gaussian_recommendation"}] + + def tearDown(self): + os.chdir(CWD) + shutil.rmtree(SCR_DIR) + files_to_remove = glob.glob(f"{TEST_DIR}/*.out") + if files_to_remove and glob.glob(f"{TEST_DIR}/*.out.gz"): + for file_path in files_to_remove: + os.remove(file_path) + + +class TestWallTimeErrorHandler(TestCase): + def setUp(self): + os.makedirs(SCR_DIR, exist_ok=True) + os.chdir(SCR_DIR) + os.environ.pop("JOB_START_TIME", None) + gunzip_file(f"{TEST_DIR}/walltime.out.gz") + for file in ["walltime.com", "walltime.out", "Gau-mock.rwf"]: + shutil.copyfile(f"{TEST_DIR}/{file}", f"{SCR_DIR}/{file}") + + def test_walltime_init(self): + handler = WallTimeErrorHandler( + wall_time=3600, + buffer_time=300, + input_file="wall_time.com", + output_file="wall_time.out", + ) + init_time = handler.init_time + assert os.environ.get("JOB_START_TIME") == f"{init_time:%a %b %d %H:%M:%S UTC %Y}" + # Test that walltime persists if new handler is created + handler = WallTimeErrorHandler( + wall_time=3600, + buffer_time=300, + input_file="walltime.com", + output_file="walltime.out", + ) + assert os.environ.get("JOB_START_TIME") == f"{init_time:%a %b %d %H:%M:%S UTC %Y}" + + def test_walltime_check_and_correct(self): + # Try a 1 hr wall time with a 5 mins buffer + handler = WallTimeErrorHandler( + wall_time=3600, + buffer_time=300, + input_file="walltime.com", + output_file="walltime.out", + ) + assert not handler.check() + + # Make sure the check returns True when the remaining time is <= buffer time + handler.init_time = datetime.datetime.now() - datetime.timedelta(minutes=59) + assert handler.check() + + # Test that the input file is written correctly + handler.correct() + assert os.path.exists("walltime.com.wt") + with open("walltime.com.wt") as file: + first_line = file.readline().strip() + # assert first_line == "%rwf=./Gau-mock.rwf" + assert "rwf" in first_line + + def tearDown(self): + os.chdir(CWD) + shutil.rmtree(SCR_DIR) + files_to_remove = glob.glob(f"{TEST_DIR}/*.out") + if files_to_remove and glob.glob(f"{TEST_DIR}/*.out.gz"): + for file_path in files_to_remove: + os.remove(file_path) diff --git a/tests/gaussian/test_jobs.py b/tests/gaussian/test_jobs.py new file mode 100644 index 00000000..8642c78c --- /dev/null +++ b/tests/gaussian/test_jobs.py @@ -0,0 +1,83 @@ +import glob +import gzip +import os +import shutil +from unittest import TestCase + +from custodian.gaussian.jobs import GaussianJob +from tests.conftest import TEST_FILES + +__author__ = "Rasha Atwi" +__version__ = "0.1" +__maintainer__ = "Rasha Atwi" +__email__ = "rasha.atwi@stonybrook.edu" +__status__ = "Alpha" +__date__ = "3/21/24" + +TEST_DIR = f"{TEST_FILES}/gaussian" +SCR_DIR = f"{TEST_DIR}/scratch" +CWD = os.getcwd() + + +class TestGaussianJob(TestCase): + def setUp(self): + self.input_file = "test.com" + self.output_file = "test.out" + self.gaussian_cmd = f"g16 < {self.input_file} > {self.output_file}" + self.stderr_file = "stderr.txt" + self.suffix = ".test" + self.backup = True + self.directory = SCR_DIR + + os.makedirs(SCR_DIR, exist_ok=True) + shutil.copyfile(f"{TEST_DIR}/mol_opt.com", f"{SCR_DIR}/test.com") + os.chdir(SCR_DIR) + + def tearDown(self): + os.chdir(CWD) + shutil.rmtree(SCR_DIR) + files_to_remove = glob.glob(f"{TEST_DIR}/*.out") + if files_to_remove and glob.glob(f"{TEST_DIR}/*.out.gz"): + for file_path in files_to_remove: + os.remove(file_path) + + def test_normal(self): + job = GaussianJob( + self.gaussian_cmd, + self.input_file, + self.output_file, + self.stderr_file, + self.suffix, + self.backup, + ) + job.setup() + assert os.path.exists(f"{SCR_DIR}/test.com.orig") + if not os.path.exists(f"{TEST_DIR}/mol_opt.out") and os.path.exists(f"{TEST_DIR}/mol_opt.out.gz"): + with gzip.open(f"{TEST_DIR}/mol_opt.out.gz", "rb") as f_in, open(f"{TEST_DIR}/mol_opt.out", "wb") as f_out: + shutil.copyfileobj(f_in, f_out) + shutil.copy(f"{TEST_DIR}/mol_opt.out", f"{SCR_DIR}/test.out") + job.postprocess() + assert os.path.exists(f"{SCR_DIR}/test.com{self.suffix}") + assert os.path.exists(f"{SCR_DIR}/test.out{self.suffix}") + + def test_better_guess(self): + job_gen = GaussianJob.generate_better_guess( + self.gaussian_cmd, + self.input_file, + self.output_file, + self.stderr_file, + self.backup, + cart_coords=True, + directory=self.directory, + ) + jobs = list(job_gen) + assert len(jobs) == 1, "One job should be generated under normal conditions." + jobs[0].setup() + assert os.path.exists(f"{SCR_DIR}/test.com.orig") + if not os.path.exists(f"{TEST_DIR}/mol_opt.out") and os.path.exists(f"{TEST_DIR}/mol_opt.out.gz"): + with gzip.open(f"{TEST_DIR}/mol_opt.out.gz", "rb") as f_in, open(f"{TEST_DIR}/mol_opt.out", "wb") as f_out: + shutil.copyfileobj(f_in, f_out) + shutil.copy(f"{TEST_DIR}/mol_opt.out", f"{SCR_DIR}/test.out") + jobs[0].postprocess() + assert os.path.exists(f"{SCR_DIR}/test.com.guess1") + assert os.path.exists(f"{SCR_DIR}/test.out.guess1") diff --git a/tests/lobster/test_handlers.py b/tests/lobster/test_handlers.py index 89119614..0b5a6dbd 100644 --- a/tests/lobster/test_handlers.py +++ b/tests/lobster/test_handlers.py @@ -7,7 +7,7 @@ class TestChargeSpillingValidator: - def test_check_and_correct(self): + def test_check_and_correct(self) -> None: v = ChargeSpillingValidator(output_filename=f"{test_files_lobster}/lobsterout.normal") assert not v.check() @@ -23,7 +23,7 @@ def test_check_and_correct(self): v4 = ChargeSpillingValidator(output_filename=f"{test_files_lobster}/no_spin/lobsterout") assert not v4.check() - def test_as_dict(self): + def test_as_dict(self) -> None: v = ChargeSpillingValidator(output_filename=f"{test_files_lobster}/lobsterout.normal") dct = v.as_dict() v2 = ChargeSpillingValidator.from_dict(dct) @@ -31,22 +31,22 @@ def test_as_dict(self): class TestLobsterFilesValidator: - def test_check_and_correct_1(self): + def test_check_and_correct_1(self) -> None: os.chdir(test_files_lobster) v = LobsterFilesValidator() assert not v.check() - def test_check_and_correct_2(self): + def test_check_and_correct_2(self) -> None: os.chdir(f"{test_files_lobster}/../lobsterins") v2 = LobsterFilesValidator() assert v2.check() - def test_check_and_correct_3(self): + def test_check_and_correct_3(self) -> None: os.chdir(f"{test_files_lobster}/crash") v3 = LobsterFilesValidator() assert v3.check() - def test_as_dict(self): + def test_as_dict(self) -> None: os.chdir(test_files_lobster) v = LobsterFilesValidator() dct = v.as_dict() @@ -55,19 +55,19 @@ def test_as_dict(self): class TestEnoughBandsValidator: - def test_check_and_correct(self): + def test_check_and_correct(self) -> None: v = EnoughBandsValidator(output_filename=f"{test_files_lobster}/lobsterout.normal") assert not v.check() - def test_check_and_correct2(self): + def test_check_and_correct2(self) -> None: v2 = EnoughBandsValidator(output_filename=f"{test_files_lobster}/lobsterout.nocohp") assert v2.check() - def test_check_and_correct3(self): + def test_check_and_correct3(self) -> None: v3 = EnoughBandsValidator(output_filename=f"{test_files_lobster}/nolobsterout/lobsterout") assert not v3.check() - def test_as_dict(self): + def test_as_dict(self) -> None: v = EnoughBandsValidator(output_filename=f"{test_files_lobster}/lobsterout.normal") dct = v.as_dict() v2 = EnoughBandsValidator.from_dict(dct) diff --git a/tests/lobster/test_jobs.py b/tests/lobster/test_jobs.py index 8a366d1f..b3d65e18 100644 --- a/tests/lobster/test_jobs.py +++ b/tests/lobster/test_jobs.py @@ -60,13 +60,13 @@ class TestLobsterJob: """Similar to VaspJobTest. Omit test of run.""" - def test_as_from_dict(self): + def test_as_from_dict(self) -> None: v = LobsterJob(lobster_cmd="hello") v2 = LobsterJob.from_dict(v.as_dict()) assert type(v2) == type(v) assert v2.lobster_cmd == "hello" - def test_setup(self): + def test_setup(self) -> None: with cd(test_files_lobster2): with ScratchDir(".", copy_from_current_on_enter=True): # check if backup is done correctly @@ -79,7 +79,7 @@ def test_setup(self): v.setup() assert not os.path.isfile("lobsterin.orig") - def test_postprocess(self): + def test_postprocess(self) -> None: # test gzipped and zipping of additional files with cd(os.path.join(test_files_lobster3)): with ScratchDir(".", copy_from_current_on_enter=True): diff --git a/tests/nwchem/test_handlers.py b/tests/nwchem/test_handlers.py index aaf397e4..30bb3701 100644 --- a/tests/nwchem/test_handlers.py +++ b/tests/nwchem/test_handlers.py @@ -13,7 +13,7 @@ __date__ = "6/18/13" -def test_check_correct(): +def test_check_correct() -> None: os.chdir(f"{TEST_FILES}/nwchem") shutil.copy("C1N1Cl1_1.nw", "C1N1Cl1_1.nw.orig") handler = NwchemErrorHandler(output_filename="C1N1Cl1_1.nwout") diff --git a/tests/qchem/test_handlers.py b/tests/qchem/test_handlers.py index 9133730c..7b439f0c 100644 --- a/tests/qchem/test_handlers.py +++ b/tests/qchem/test_handlers.py @@ -31,11 +31,11 @@ @skip_if_no_openbabel class QChemErrorHandlerTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) os.chdir(SCR_DIR) - def _check_equivalent_inputs(self, input1, input2): + def _check_equivalent_inputs(self, input1, input2) -> None: QCinput1 = QCInput.from_file(input1) QCinput2 = QCInput.from_file(input2) sections1 = QCInput.find_sections(QCinput1.get_str()) @@ -44,7 +44,7 @@ def _check_equivalent_inputs(self, input1, input2): for key in sections1: assert QCinput1.as_dict().get(key) == QCinput2.as_dict().get(key) - def test_unable_to_determine_lamda(self): + def test_unable_to_determine_lamda(self) -> None: for ii in range(2): shutil.copyfile( f"{TEST_DIR}/unable_to_determine_lamda.qin.{ii}", @@ -70,7 +70,7 @@ def test_unable_to_determine_lamda(self): ] self._check_equivalent_inputs("unable_to_determine_lamda.qin.0", "unable_to_determine_lamda.qin.1") - def test_linear_dependent_basis_and_FileMan(self): + def test_linear_dependent_basis_and_FileMan(self) -> None: for ii in range(1, 3): shutil.copyfile( f"{TEST_DIR}/unable_to_determine_lamda.qin.{ii}", @@ -91,7 +91,7 @@ def test_linear_dependent_basis_and_FileMan(self): assert dct["warnings"]["linear_dependence"] is True assert dct["actions"] == [{"scf_guess_always": "true"}] - def test_failed_to_transform(self): + def test_failed_to_transform(self) -> None: for ii in range(2): shutil.copyfile(f"{TEST_DIR}/qunino_vinyl.qin.{ii}", f"{SCR_DIR}/qunino_vinyl.qin.{ii}") shutil.copyfile(f"{TEST_DIR}/qunino_vinyl.qout.{ii}", f"{SCR_DIR}/qunino_vinyl.qout.{ii}") @@ -106,7 +106,7 @@ def test_failed_to_transform(self): handler = QChemErrorHandler(input_file="qunino_vinyl.qin.1", output_file="qunino_vinyl.qout.1") assert handler.check() is False - def test_scf_failed_to_converge(self): + def test_scf_failed_to_converge(self) -> None: for ii in range(3): shutil.copyfile(f"{TEST_DIR}/crowd_gradient.qin.{ii}", f"{SCR_DIR}/crowd_gradient.qin.{ii}") shutil.copyfile(f"{TEST_DIR}/crowd_gradient.qout.{ii}", f"{SCR_DIR}/crowd_gradient.qout.{ii}") @@ -118,7 +118,7 @@ def test_scf_failed_to_converge(self): assert dct["actions"] == [{"s2thresh": "16"}, {"max_scf_cycles": 100}, {"thresh": "14"}] self._check_equivalent_inputs("crowd_gradient.qin.0", "crowd_gradient.qin.1") - def test_scf_failed_to_converge_gdm_add_cycles(self): + def test_scf_failed_to_converge_gdm_add_cycles(self) -> None: shutil.copyfile(f"{TEST_DIR}/gdm_add_cycles/mol.qin", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/gdm_add_cycles/mol.qin.1", f"{SCR_DIR}/mol.qin.1") shutil.copyfile(f"{TEST_DIR}/gdm_add_cycles/mol.qout", f"{SCR_DIR}/mol.qout") @@ -130,7 +130,7 @@ def test_scf_failed_to_converge_gdm_add_cycles(self): assert dct["actions"] == [{"max_scf_cycles": "500"}] self._check_equivalent_inputs("mol.qin", "mol.qin.1") - def test_advanced_scf_failed_to_converge_1(self): + def test_advanced_scf_failed_to_converge_1(self) -> None: shutil.copyfile(f"{TEST_DIR}/diis_guess_always/mol.qin.0", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/diis_guess_always/mol.qout.0", f"{SCR_DIR}/mol.qout") shutil.copyfile(f"{TEST_DIR}/diis_guess_always/mol.qin.1", f"{SCR_DIR}/mol.qin.1") @@ -142,7 +142,7 @@ def test_advanced_scf_failed_to_converge_1(self): assert dct["actions"] == [{"scf_algorithm": "gdm"}, {"max_scf_cycles": "500"}] self._check_equivalent_inputs("mol.qin", "mol.qin.1") - def test_scf_into_opt(self): + def test_scf_into_opt(self) -> None: shutil.copyfile(f"{TEST_DIR}/scf_into_opt/mol.qin.0", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/scf_into_opt/mol.qout.0", f"{SCR_DIR}/mol.qout") shutil.copyfile(f"{TEST_DIR}/scf_into_opt/mol.qin.1", f"{SCR_DIR}/mol.qin.1") @@ -161,7 +161,7 @@ def test_scf_into_opt(self): assert dct["errors"] == ["out_of_opt_cycles"] assert dct["actions"] == [{"molecule": "molecule_from_last_geometry"}] - def test_custom_smd(self): + def test_custom_smd(self) -> None: shutil.copyfile(f"{TEST_DIR}/custom_smd/mol.qin.0", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/custom_smd/mol.qout.0", f"{SCR_DIR}/mol.qout") shutil.copyfile(f"{TEST_DIR}/custom_smd/mol.qin.1", f"{SCR_DIR}/mol.qin.1") @@ -180,7 +180,7 @@ def test_custom_smd(self): assert dct["errors"] == [] assert dct["actions"] is None - def test_out_of_opt_cycles(self): + def test_out_of_opt_cycles(self) -> None: shutil.copyfile(f"{TEST_DIR}/crowd_gradient.qin.2", f"{SCR_DIR}/crowd_gradient.qin.2") shutil.copyfile(f"{TEST_DIR}/crowd_gradient.qout.2", f"{SCR_DIR}/crowd_gradient.qout.2") shutil.copyfile(f"{TEST_DIR}/crowd_gradient.qin.3", f"{SCR_DIR}/crowd_gradient.qin.3") @@ -192,7 +192,7 @@ def test_out_of_opt_cycles(self): assert dct["actions"] == [{"geom_max_cycles:": 200}, {"molecule": "molecule_from_last_geometry"}] self._check_equivalent_inputs("crowd_gradient.qin.2", "crowd_gradient.qin.3") - def test_advanced_out_of_opt_cycles(self): + def test_advanced_out_of_opt_cycles(self) -> None: shutil.copyfile(f"{TEST_DIR}/2564_complete/error1/mol.qin", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/2564_complete/error1/mol.qout", f"{SCR_DIR}/mol.qout") shutil.copyfile(f"{TEST_DIR}/2564_complete/mol.qin.opt_0", f"{SCR_DIR}/mol.qin.opt_0") @@ -208,13 +208,13 @@ def test_advanced_out_of_opt_cycles(self): handler.check() assert handler.opt_error_history == [] - def test_advanced_out_of_opt_cycles1(self): + def test_advanced_out_of_opt_cycles1(self) -> None: shutil.copyfile(f"{TEST_DIR}/2620_complete/mol.qin.opt_0", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/2620_complete/mol.qout.opt_0", f"{SCR_DIR}/mol.qout") handler = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout") assert handler.check() is False - def test_failed_to_read_input(self): + def test_failed_to_read_input(self) -> None: shutil.copyfile(f"{TEST_DIR}/unable_lamda_weird.qin", f"{SCR_DIR}/unable_lamda_weird.qin") shutil.copyfile(f"{TEST_DIR}/unable_lamda_weird.qout", f"{SCR_DIR}/unable_lamda_weird.qout") handler = QChemErrorHandler(input_file="unable_lamda_weird.qin", output_file="unable_lamda_weird.qout") @@ -224,7 +224,7 @@ def test_failed_to_read_input(self): assert dct["actions"] == [{"rerun_job_no_changes": True}] self._check_equivalent_inputs("unable_lamda_weird.qin.last", "unable_lamda_weird.qin") - def test_input_file_error(self): + def test_input_file_error(self) -> None: shutil.copyfile(f"{TEST_DIR}/bad_input.qin", f"{SCR_DIR}/bad_input.qin") shutil.copyfile(f"{TEST_DIR}/bad_input.qout", f"{SCR_DIR}/bad_input.qout") handler = QChemErrorHandler(input_file="bad_input.qin", output_file="bad_input.qout") @@ -233,7 +233,7 @@ def test_input_file_error(self): assert dct["errors"] == ["input_file_error"] assert dct["actions"] is None - def test_basis_not_supported(self): + def test_basis_not_supported(self) -> None: shutil.copyfile(f"{TEST_DIR}/basis_not_supported.qin", f"{SCR_DIR}/basis_not_supported.qin") shutil.copyfile(f"{TEST_DIR}/basis_not_supported.qout", f"{SCR_DIR}/basis_not_supported.qout") handler = QChemErrorHandler(input_file="basis_not_supported.qin", output_file="basis_not_supported.qout") @@ -242,7 +242,7 @@ def test_basis_not_supported(self): assert dct["errors"] == ["basis_not_supported"] assert dct["actions"] is None - def test_NLebdevPts(self): + def test_NLebdevPts(self) -> None: shutil.copyfile(f"{TEST_DIR}/lebdevpts.qin", f"{SCR_DIR}/lebdevpts.qin") shutil.copyfile(f"{TEST_DIR}/lebdevpts.qout", f"{SCR_DIR}/lebdevpts.qout") handler = QChemErrorHandler(input_file="lebdevpts.qin", output_file="lebdevpts.qout") @@ -251,7 +251,7 @@ def test_NLebdevPts(self): assert dct["errors"] == ["NLebdevPts"] assert dct["actions"] == [{"esp_surface_density": "250"}] - def test_read_error(self): + def test_read_error(self) -> None: shutil.copyfile(f"{TEST_DIR}/molecule_read_error/mol.qin", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/molecule_read_error/mol.qout", f"{SCR_DIR}/mol.qout") handler = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout") @@ -261,7 +261,7 @@ def test_read_error(self): assert dct["actions"] == [{"rerun_job_no_changes": True}] self._check_equivalent_inputs("mol.qin.last", "mol.qin") - def test_never_called_qchem_error(self): + def test_never_called_qchem_error(self) -> None: shutil.copyfile(f"{TEST_DIR}/mpi_error/mol.qin", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/mpi_error/mol.qout", f"{SCR_DIR}/mol.qout") handler = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout") @@ -271,7 +271,7 @@ def test_never_called_qchem_error(self): assert dct["actions"] == [{"rerun_job_no_changes": True}] self._check_equivalent_inputs("mol.qin.last", "mol.qin") - def test_OOS_read_hess(self): + def test_OOS_read_hess(self) -> None: shutil.copyfile(f"{TEST_DIR}/OOS_read_hess.qin", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/OOS_read_hess.qout", f"{SCR_DIR}/mol.qout") handler = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout") @@ -285,7 +285,7 @@ def test_OOS_read_hess(self): ] self._check_equivalent_inputs(f"{TEST_DIR}/OOS_read_hess_next.qin", "mol.qin") - def test_gdm_neg_precon_error(self): + def test_gdm_neg_precon_error(self) -> None: shutil.copyfile(f"{TEST_DIR}/gdm_neg_precon_error.qin", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/gdm_neg_precon_error.qout", f"{SCR_DIR}/mol.qout") handler = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout") @@ -294,7 +294,7 @@ def test_gdm_neg_precon_error(self): assert dct["errors"] == ["gdm_neg_precon_error"] assert dct["actions"] == [{"molecule": "molecule_from_last_geometry"}] - def test_fileman_cpscf_nseg_error(self): + def test_fileman_cpscf_nseg_error(self) -> None: shutil.copyfile(f"{TEST_DIR}/fileman_cpscf.qin", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/fileman_cpscf.qout", f"{SCR_DIR}/mol.qout") handler = QChemErrorHandler(input_file="mol.qin", output_file="mol.qout") @@ -303,6 +303,6 @@ def test_fileman_cpscf_nseg_error(self): assert dct["errors"] == ["premature_end_FileMan_error"] assert dct["actions"] == [{"cpscf_nseg": "3"}] - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) diff --git a/tests/qchem/test_job_handler_interaction.py b/tests/qchem/test_job_handler_interaction.py index 083484a5..a0609f53 100644 --- a/tests/qchem/test_job_handler_interaction.py +++ b/tests/qchem/test_job_handler_interaction.py @@ -30,7 +30,7 @@ @skip_if_no_openbabel class FFOptJobHandlerInteraction(TestCase): - def _check_equivalent_inputs(self, input1, input2): + def _check_equivalent_inputs(self, input1, input2) -> None: QCinput1 = QCInput.from_file(input1) QCinput2 = QCInput.from_file(input2) sections1 = QCInput.find_sections(QCinput1.get_str()) @@ -39,7 +39,7 @@ def _check_equivalent_inputs(self, input1, input2): for key in sections1: assert QCinput1.as_dict().get(key) == QCinput2.as_dict().get(key) - def setUp(self): + def setUp(self) -> None: os.makedirs(f"{SCR_DIR}/scratch", exist_ok=True) shutil.copyfile(f"{TEST_DIR}/job_handler_interaction/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/job_handler_interaction/error.1/mol.qout", f"{SCR_DIR}/mol.qout.error1") @@ -56,11 +56,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/job_handler_interaction/mol.qin.opt_1", f"{SCR_DIR}/mol.qin.opt_1") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem", max_cores=40, diff --git a/tests/qchem/test_jobs.py b/tests/qchem/test_jobs.py index 92964023..1309973a 100644 --- a/tests/qchem/test_jobs.py +++ b/tests/qchem/test_jobs.py @@ -32,17 +32,17 @@ @skip_if_no_openbabel class QCJobTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/no_nbo.qin", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/nbo7.qin", f"{SCR_DIR}/different.qin") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_defaults(self): + def test_defaults(self) -> None: with patch("custodian.qchem.jobs.shutil.copy") as copy_patch: job = QCJob(qchem_command="qchem", max_cores=32) assert job.current_command == "qchem -nt 32 ./mol.qin ./mol.qout scratch" @@ -53,7 +53,7 @@ def test_defaults(self): assert os.environ["QCTHREADS"] == "32" assert os.environ["OMP_NUM_THREADS"] == "32" - def test_not_defaults(self): + def test_not_defaults(self) -> None: job = QCJob( qchem_command="qchem -slurm", multimode="mpi", @@ -71,7 +71,7 @@ def test_not_defaults(self): assert os.environ["NBOEXE"] == "/path/to/nbo7.i4.exe" assert os.environ["KMP_INIT_AT_FORK"] == "FALSE" - def test_save_scratch(self): + def test_save_scratch(self) -> None: with patch("custodian.qchem.jobs.shutil.copy") as copy_patch: job = QCJob( qchem_command="qchem -slurm", @@ -91,7 +91,7 @@ def test_save_scratch(self): @skip_if_no_openbabel class OptFFComplexUnlinkedTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/FF_complex/mol.qin.opt_0", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/FF_complex/mol.qout.opt_0", f"{SCR_DIR}/mol.qout.opt_0") @@ -99,11 +99,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/FF_complex/mol.qin.freq_0", f"{SCR_DIR}/mol.qin.freq_0") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem", max_cores=32, @@ -154,7 +154,7 @@ def test_OptFF(self): @skip_if_no_openbabel class OptFFTestComplexLinkedChangeNsegTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/FF_complex/mol.qin.opt_0", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/FF_complex/mol.qin.opt_0", f"{SCR_DIR}/mol.qin.opt_0") @@ -167,11 +167,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/FF_complex/mol.qout.freq_1", f"{SCR_DIR}/mol.qout.freq_1") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem", max_cores=32, @@ -239,7 +239,7 @@ def test_OptFF(self): @skip_if_no_openbabel class OptFFTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/FF_working/test.qin", f"{SCR_DIR}/test.qin") shutil.copyfile(f"{TEST_DIR}/FF_working/test.qout.opt_0", f"{SCR_DIR}/test.qout.opt_0") @@ -250,11 +250,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/FF_working/test.qout.freq_1", f"{SCR_DIR}/test.qout.freq_1") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem", max_cores=32, @@ -315,22 +315,22 @@ def test_OptFF(self): == QCInput.from_file(os.path.join(SCR_DIR, "test.qin")).as_dict() ) with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class OptFFTest1(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/2620_complete/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/2620_complete/mol.qout.opt_0", f"{SCR_DIR}/mol.qout.opt_0") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, input_file="mol.qin", output_file="mol.qout", linked=False ) @@ -345,12 +345,12 @@ def test_OptFF(self): ).as_dict() assert next(job).as_dict() == expected_next with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class OptFFTest2(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/disconnected_but_converged/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/disconnected_but_converged/mol.qout.opt_0", f"{SCR_DIR}/mol.qout.opt_0") @@ -358,11 +358,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/disconnected_but_converged/mol.qin.freq_0", f"{SCR_DIR}/mol.qin.freq_0") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -395,12 +395,12 @@ def test_OptFF(self): == QCInput.from_file(os.path.join(SCR_DIR, "mol.qin")).as_dict() ) with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class OptFFTestSwitching(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/FF_switching/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/FF_switching/mol.qout.opt_0", f"{SCR_DIR}/mol.qout.opt_0") @@ -414,11 +414,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/FF_switching/mol.qout.freq_2", f"{SCR_DIR}/mol.qout.freq_2") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -507,12 +507,12 @@ def test_OptFF(self): == QCInput.from_file(os.path.join(SCR_DIR, "mol.qin")).as_dict() ) with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class OptFFTest6004(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/6004_frag12/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/6004_frag12/mol.qout.opt_0", f"{SCR_DIR}/mol.qout.opt_0") @@ -526,11 +526,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/6004_frag12/mol.qout.freq_2", f"{SCR_DIR}/mol.qout.freq_2") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -622,7 +622,7 @@ def test_OptFF(self): @skip_if_no_openbabel class OptFFTest5952(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/5952_frag16/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/5952_frag16/mol.qout.opt_0", f"{SCR_DIR}/mol.qout.opt_0") @@ -630,11 +630,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/5952_frag16/mol.qin.freq_0", f"{SCR_DIR}/mol.qin.freq_0") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -667,12 +667,12 @@ def test_OptFF(self): == QCInput.from_file(os.path.join(SCR_DIR, "mol.qin")).as_dict() ) with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class OptFFTest5690(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/5690_frag18/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/5690_frag18/mol.qout.opt_0", f"{SCR_DIR}/mol.qout.opt_0") @@ -686,11 +686,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/5690_frag18/mol.qout.freq_2", f"{SCR_DIR}/mol.qout.freq_2") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -784,7 +784,7 @@ def test_OptFF(self): @skip_if_no_openbabel class OptFFSmallNegFreqTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(f"{SCR_DIR}/scratch", exist_ok=True) shutil.copyfile(f"{TEST_DIR}/small_neg_freq/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/small_neg_freq/mol.qin.opt_0", f"{SCR_DIR}/mol.qin.opt_0") @@ -796,11 +796,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/small_neg_freq/mol.qout.freq_2", f"{SCR_DIR}/mol.qout.freq_2") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -915,12 +915,12 @@ def test_OptFF(self): os.path.join(SCR_DIR, "mol.qin.freq_2"), ) with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class OptFFSingleFreqFragsTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(f"{SCR_DIR}/scratch", exist_ok=True) shutil.copyfile(f"{TEST_DIR}/single_freq_frag/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/single_freq_frag/mol.qin.opt_0", f"{SCR_DIR}/mol.qin.opt_0") @@ -930,11 +930,11 @@ def setUp(self): os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -971,12 +971,12 @@ def test_OptFF(self): shutil.copyfile(f"{SCR_DIR}/mol.qin", f"{SCR_DIR}/mol.qin.freq_0") with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class TSFFTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(SCR_DIR) shutil.copyfile(f"{TEST_DIR}/fftsopt_no_freqfirst/mol.qin.freq_0", f"{SCR_DIR}/test.qin") shutil.copyfile(f"{TEST_DIR}/fftsopt_no_freqfirst/mol.qout.ts_0", f"{SCR_DIR}/test.qout.ts_0") @@ -984,11 +984,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/fftsopt_no_freqfirst/mol.qin.freq_0", f"{SCR_DIR}/test.qin.freq_0") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem", max_cores=32, @@ -1022,12 +1022,12 @@ def test_OptFF(self): == QCInput.from_file(f"{SCR_DIR}/test.qin").as_dict() ) with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class TSFFFreqfirstTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(f"{SCR_DIR}/scratch", exist_ok=True) shutil.copyfile(f"{TEST_DIR}/fftsopt_freqfirst/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/fftsopt_freqfirst/mol.qin.freq_pre", f"{SCR_DIR}/mol.qin.freq_pre") @@ -1036,11 +1036,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/fftsopt_freqfirst/mol.qout.freq_0", f"{SCR_DIR}/mol.qout.freq_0") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -1094,12 +1094,12 @@ def test_OptFF(self): ) shutil.copyfile(f"{SCR_DIR}/mol.qin", f"{SCR_DIR}/mol.qin.freq_0") with pytest.raises(StopIteration): - job.__next__() + next(job) @skip_if_no_openbabel class TSFFFreqFirstMultipleCyclesTest(TestCase): - def setUp(self): + def setUp(self) -> None: os.makedirs(f"{SCR_DIR}/scratch", exist_ok=True) shutil.copyfile(f"{TEST_DIR}/fftsopt_multiple_cycles/mol.qin.orig", f"{SCR_DIR}/mol.qin") shutil.copyfile(f"{TEST_DIR}/fftsopt_multiple_cycles/mol.qin.freq_pre", f"{SCR_DIR}/mol.qin.freq_pre") @@ -1110,11 +1110,11 @@ def setUp(self): shutil.copyfile(f"{TEST_DIR}/fftsopt_multiple_cycles/mol.qout.freq_1", f"{SCR_DIR}/mol.qout.freq_1") os.chdir(SCR_DIR) - def tearDown(self): + def tearDown(self) -> None: os.chdir(CWD) shutil.rmtree(SCR_DIR) - def test_OptFF(self): + def test_OptFF(self) -> None: job = QCJob.opt_with_frequency_flattener( qchem_command="qchem -slurm", max_cores=32, @@ -1201,4 +1201,4 @@ def test_OptFF(self): shutil.copyfile(f"{SCR_DIR}/mol.qin", f"{SCR_DIR}/mol.qin.freq_1") with pytest.raises(StopIteration): - job.__next__() + next(job) diff --git a/tests/test_custodian.py b/tests/test_custodian.py index 9c87ac40..24307963 100644 --- a/tests/test_custodian.py +++ b/tests/test_custodian.py @@ -22,44 +22,44 @@ class ExitCodeJob(Job): - def __init__(self, exitcode=0): + def __init__(self, exitcode=0) -> None: self.exitcode = exitcode - def setup(self, directory="./"): + def setup(self, directory="./") -> None: pass def run(self, directory="./"): return subprocess.Popen(f"exit {self.exitcode}", cwd=directory, shell=True) - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: pass class ExampleJob(Job): - def __init__(self, jobid, params=None): + def __init__(self, jobid, params=None) -> None: if params is None: params = {"initial": 0, "total": 0} self.jobid = jobid self.params = params - def setup(self, directory="./"): + def setup(self, directory="./") -> None: self.params["initial"] = 0 self.params["total"] = 0 - def run(self, directory="./"): + def run(self, directory="./") -> None: sequence = [random.uniform(0, 1) for i in range(100)] self.params["total"] = self.params["initial"] + sum(sequence) - def postprocess(self, directory="./"): + def postprocess(self, directory="./") -> None: pass @property - def name(self): + def name(self) -> str: return f"ExampleJob{self.jobid}" class ExampleHandler(ErrorHandler): - def __init__(self, params): + def __init__(self, params) -> None: self.params = params def check(self, directory="./"): @@ -75,7 +75,7 @@ class ExampleHandler1b(ExampleHandler): This handler always can apply a correction, but will only apply it twice before raising. """ - max_num_corrections = 2 # type: ignore + max_num_corrections = 2 raise_on_max = True @@ -84,7 +84,7 @@ class ExampleHandler1c(ExampleHandler): This handler always can apply a correction, but will only apply it twice and then not anymore. """ - max_num_corrections = 2 # type: ignore + max_num_corrections = 2 raise_on_max = False @@ -93,11 +93,11 @@ class ExampleHandler2(ErrorHandler): This handler always result in an error. """ - def __init__(self, params): + def __init__(self, params) -> None: self.params = params self.has_error = False - def check(self, directory="./"): + def check(self, directory="./") -> bool: return True def correct(self, directory="./"): @@ -118,27 +118,27 @@ def correct(self, directory="./"): class ExampleValidator1(Validator): - def __init__(self): + def __init__(self) -> None: pass - def check(self, directory="./"): + def check(self, directory="./") -> bool: return False class ExampleValidator2(Validator): - def __init__(self): + def __init__(self) -> None: pass - def check(self, directory="./"): + def check(self, directory="./") -> bool: return True class CustodianTest(unittest.TestCase): - def setUp(self): + def setUp(self) -> None: self.cwd = os.getcwd() os.chdir(os.path.abspath(os.path.dirname(__file__))) - def test_exitcode_error(self): + def test_exitcode_error(self) -> None: c = Custodian([], [ExitCodeJob(0)]) c.run() c = Custodian([], [ExitCodeJob(1)]) @@ -150,7 +150,7 @@ def test_exitcode_error(self): c = Custodian([], [ExitCodeJob(1)], terminate_on_nonzero_returncode=False) c.run() - def test_run(self): + def test_run(self) -> None: n_jobs = 100 params = {"initial": 0, "total": 0} c = Custodian( @@ -162,7 +162,7 @@ def test_run(self): assert len(output) == n_jobs ExampleHandler(params).as_dict() - def test_run_interrupted(self): + def test_run_interrupted(self) -> None: n_jobs = 100 params = {"initial": 0, "total": 0} c = Custodian( @@ -181,7 +181,7 @@ def test_run_interrupted(self): assert c.run_interrupted() == n_jobs - total_done total_done += 1 - def test_unrecoverable(self): + def test_unrecoverable(self) -> None: n_jobs = 100 params = {"initial": 0, "total": 0} handler = ExampleHandler2(params) @@ -195,7 +195,7 @@ def test_unrecoverable(self): assert handler.has_error assert c.run_log[-1]["handler"] == handler - def test_max_errors(self): + def test_max_errors(self) -> None: n_jobs = 100 params = {"initial": 0, "total": 0} handler = ExampleHandler(params) @@ -209,7 +209,7 @@ def test_max_errors(self): c.run() assert c.run_log[-1]["max_errors"] - def test_max_errors_per_job(self): + def test_max_errors_per_job(self) -> None: n_jobs = 100 params = {"initial": 0, "total": 0} handler = ExampleHandler(params) @@ -223,7 +223,7 @@ def test_max_errors_per_job(self): c.run() assert c.run_log[-1]["max_errors_per_job"] - def test_max_errors_per_handler_raise(self): + def test_max_errors_per_handler_raise(self) -> None: n_jobs = 100 params = {"initial": 0, "total": 0} handler = ExampleHandler1b(params) @@ -240,7 +240,7 @@ def test_max_errors_per_handler_raise(self): assert c.run_log[-1]["max_errors_per_handler"] assert c.run_log[-1]["handler"] == handler - def test_max_errors_per_handler_warning(self): + def test_max_errors_per_handler_warning(self) -> None: n_jobs = 100 params = {"initial": 0, "total": 0} c = Custodian( @@ -252,7 +252,7 @@ def test_max_errors_per_handler_warning(self): c.run() assert all(len(r["corrections"]) <= 2 for r in c.run_log) - def test_validators(self): + def test_validators(self) -> None: n_jobs = 100 params = {"initial": 0, "total": 0} c = Custodian( @@ -277,7 +277,7 @@ def test_validators(self): c.run() assert c.run_log[-1]["validator"] == v - def test_from_spec(self): + def test_from_spec(self) -> None: spec = """jobs: - jb: custodian.vasp.jobs.VaspJob params: @@ -309,7 +309,7 @@ def test_from_spec(self): assert len(c.handlers) == 3 assert len(c.validators) == 1 - def tearDown(self): + def tearDown(self) -> None: for file in glob("custodian.*.tar.gz"): os.remove(file) try: diff --git a/tests/test_utils.py b/tests/test_utils.py index 718214d8..eafd5967 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,7 +1,7 @@ from custodian.utils import tracked_lru_cache -def test_cache_and_clear(): +def test_cache_and_clear() -> None: n_calls = 0 @tracked_lru_cache diff --git a/tests/vasp/test_handlers.py b/tests/vasp/test_handlers.py index b0702fdf..942d6733 100644 --- a/tests/vasp/test_handlers.py +++ b/tests/vasp/test_handlers.py @@ -42,7 +42,7 @@ @pytest.fixture(autouse=True) -def _clear_tracked_cache(): +def _clear_tracked_cache() -> None: """Clear the cache of the stored functions between the tests.""" from custodian.utils import tracked_lru_cache @@ -61,16 +61,16 @@ def copy_tmp_files(tmp_path: str, *file_paths: str) -> None: class VaspErrorHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, *glob("*", root_dir=TEST_FILES)) - def test_frozen_job(self): + def test_frozen_job(self) -> None: handler = FrozenJobErrorHandler() dct = handler.correct() assert dct["errors"] == ["Frozen job"] assert Incar.from_file("INCAR")["ALGO"] == "Normal" - def test_algotet(self): + def test_algotet(self) -> None: shutil.copy("INCAR.algo_tet_only", "INCAR") handler = VaspErrorHandler("vasp.algo_tet_only") handler.check() @@ -86,7 +86,7 @@ def test_algotet(self): assert handler.error_count["algo_tet"] == 2 assert dct["actions"] == [{"action": {"_set": {"ISMEAR": 0, "SIGMA": 0.05}}, "dict": "INCAR"}] - def test_subspace(self): + def test_subspace(self) -> None: handler = VaspErrorHandler("vasp.subspace") handler.check() dct = handler.correct() @@ -99,7 +99,7 @@ def test_subspace(self): assert dct["errors"] == ["subspacematrix"] assert dct["actions"] == [{"action": {"_set": {"PREC": "Accurate"}}, "dict": "INCAR"}] - def test_check_correct(self): + def test_check_correct(self) -> None: handler = VaspErrorHandler("vasp.teterror") handler.check() dct = handler.correct() @@ -126,14 +126,14 @@ def test_check_correct(self): assert dct["errors"] == ["real_optlay"] assert dct["actions"] == [{"action": {"_set": {"LREAL": False}}, "dict": "INCAR"}] - def test_mesh_symmetry(self): + def test_mesh_symmetry(self) -> None: handler = MeshSymmetryErrorHandler("vasp.ibzkpt") handler.check() dct = handler.correct() assert dct["errors"] == ["mesh_symmetry"] assert dct["actions"] == [{"action": {"_set": {"kpoints": [[4, 4, 4]]}}, "dict": "KPOINTS"}] - def test_brions(self): + def test_brions(self) -> None: shutil.copy("INCAR.ibrion", "INCAR") handler = VaspErrorHandler("vasp.brions") handler.check() @@ -150,7 +150,7 @@ def test_brions(self): assert incar["IBRION"] == 2 assert incar["POTIM"] == pytest.approx(0.5) - def test_dentet(self): + def test_dentet(self) -> None: handler = VaspErrorHandler("vasp.dentet") handler.check() dct = handler.correct() @@ -162,7 +162,7 @@ def test_dentet(self): assert dct["errors"] == ["dentet"] assert dct["actions"] == [{"action": {"_set": {"ISMEAR": 0, "SIGMA": 0.05}}, "dict": "INCAR"}] - def test_zbrent(self): + def test_zbrent(self) -> None: handler = VaspErrorHandler("vasp.zbrent") handler.check() dct = handler.correct() @@ -221,7 +221,7 @@ def test_zbrent(self): assert incar["EDIFF"] == 1e-08 assert incar["NELMIN"] == 8 - def test_brmix(self): + def test_brmix(self) -> None: handler = VaspErrorHandler("vasp.brmix") assert handler.check() is True @@ -253,7 +253,7 @@ def test_brmix(self): dct = handler.correct() assert dct["errors"] == [] - def test_too_few_bands(self): + def test_too_few_bands(self) -> None: shutil.copytree(f"{TEST_FILES}/too_few_bands", self.tmp_path, dirs_exist_ok=True, symlinks=True) os.chdir(self.tmp_path) shutil.copy("INCAR", "INCAR.orig") @@ -263,7 +263,7 @@ def test_too_few_bands(self): assert dct["errors"] == ["too_few_bands"] assert dct["actions"] == [{"action": {"_set": {"NBANDS": 501}}, "dict": "INCAR"}] - def test_rot_matrix(self): + def test_rot_matrix(self) -> None: shutil.copytree(f"{TEST_FILES}/poscar_error", self.tmp_path, dirs_exist_ok=True, symlinks=True) os.chdir(self.tmp_path) shutil.copy("KPOINTS", "KPOINTS.orig") @@ -272,12 +272,12 @@ def test_rot_matrix(self): dct = handler.correct() assert dct["errors"] == ["rot_matrix"] - def test_rot_matrix_vasp6(self): + def test_rot_matrix_vasp6(self) -> None: handler = VaspErrorHandler("vasp6.sgrcon") assert handler.check() is True assert handler.correct()["errors"] == ["rot_matrix"] - def test_coef(self): + def test_coef(self) -> None: handler = VaspErrorHandler("vasp6.coef") handler.check() dct = handler.correct() @@ -288,13 +288,13 @@ def test_coef(self): dct = handler.correct() assert dct["actions"] == [{"file": "WAVECAR", "action": {"_file_delete": {"mode": "actual"}}}] - def test_as_from_dict(self): + def test_as_from_dict(self) -> None: handler = VaspErrorHandler("random_name") h2 = VaspErrorHandler.from_dict(handler.as_dict()) assert type(h2) == type(handler) assert h2.output_filename == "random_name" - def test_pssyevx_pdsyevx(self): + def test_pssyevx_pdsyevx(self) -> None: incar_orig = Incar.from_file("INCAR") # Joining tests for these three tags as they have identical handling for error_name in ("pssyevx", "pdsyevx"): @@ -305,7 +305,7 @@ def test_pssyevx_pdsyevx(self): assert incar["ALGO"] == "Normal" incar_orig.write_file("INCAR") - def test_eddrmm(self): + def test_eddrmm(self) -> None: shutil.copy("CONTCAR.eddav_eddrmm", "CONTCAR") handler = VaspErrorHandler("vasp.eddrmm") assert handler.check() is True @@ -319,14 +319,14 @@ def test_eddrmm(self): c = Structure.from_file("CONTCAR") assert p == c - def test_nicht_konv(self): + def test_nicht_konv(self) -> None: handler = VaspErrorHandler("vasp.nicht_konvergent") assert handler.check() is True assert handler.correct()["errors"] == ["nicht_konv"] incar = Incar.from_file("INCAR") assert incar["LREAL"] is False - def test_edddav(self): + def test_edddav(self) -> None: shutil.copy("CONTCAR.eddav_eddrmm", "CONTCAR") handler = VaspErrorHandler("vasp.edddav2") assert handler.check() is True @@ -345,7 +345,7 @@ def test_edddav(self): c = Structure.from_file("CONTCAR") assert p == c - def test_gradient_not_orthogonal(self): + def test_gradient_not_orthogonal(self) -> None: handler = VaspErrorHandler("vasp.gradient_not_orthogonal") assert handler.check() is True assert "grad_not_orth" in handler.correct()["errors"] @@ -388,7 +388,7 @@ def test_gradient_not_orthogonal(self): incar = Incar.from_file("INCAR") assert incar["ALGO"] == "All" - def test_rhosyg(self): + def test_rhosyg(self) -> None: handler = VaspErrorHandler("vasp.rhosyg") assert handler.check() is True assert handler.correct()["errors"] == ["rhosyg"] @@ -398,7 +398,7 @@ def test_rhosyg(self): incar = Incar.from_file("INCAR") assert incar["ISYM"] == 0 - def test_rhosyg_vasp6(self): + def test_rhosyg_vasp6(self) -> None: handler = VaspErrorHandler("vasp6.rhosyg") assert handler.check() is True assert handler.correct()["errors"] == ["rhosyg"] @@ -408,14 +408,14 @@ def test_rhosyg_vasp6(self): incar = Incar.from_file("INCAR") assert incar["ISYM"] == 0 - def test_hnform(self): + def test_hnform(self) -> None: handler = VaspErrorHandler("vasp.hnform") assert handler.check() is True assert handler.correct()["errors"] == ["hnform"] incar = Incar.from_file("INCAR") assert incar["ISYM"] == 0 - def test_bravais(self): + def test_bravais(self) -> None: handler = VaspErrorHandler("vasp6.bravais") assert handler.check() is True assert handler.correct()["errors"] == ["bravais"] @@ -457,21 +457,21 @@ def test_posmap_and_pricelv(self) -> None: incar_orig.write_file("INCAR") - def test_point_group(self): + def test_point_group(self) -> None: handler = VaspErrorHandler("vasp.point_group") assert handler.check() is True assert handler.correct()["errors"] == ["point_group"] incar = Incar.from_file("INCAR") assert incar["ISYM"] == 0 - def test_symprec_noise(self): + def test_symprec_noise(self) -> None: handler = VaspErrorHandler("vasp.symprec_noise") assert handler.check() is True assert handler.correct()["errors"] == ["symprec_noise"] incar = Incar.from_file("INCAR") assert incar["SYMPREC"] == 1e-06 - def test_dfpt_ncore(self): + def test_dfpt_ncore(self) -> None: handler = VaspErrorHandler("vasp.dfpt_ncore") assert handler.check() is True assert handler.correct()["errors"] == ["dfpt_ncore"] @@ -479,7 +479,7 @@ def test_dfpt_ncore(self): assert "NPAR" not in incar assert "NCORE" not in incar - def test_finite_difference_ncore(self): + def test_finite_difference_ncore(self) -> None: handler = VaspErrorHandler("vasp.fd_ncore") assert handler.check() is True assert handler.correct()["errors"] == ["dfpt_ncore"] @@ -487,7 +487,7 @@ def test_finite_difference_ncore(self): assert "NPAR" not in incar assert "NCORE" not in incar - def test_point_group_vasp6(self): + def test_point_group_vasp6(self) -> None: # the error message is formatted differently in VASP6 compared to VASP5 handler = VaspErrorHandler("vasp6.point_group") assert handler.check() is True @@ -495,7 +495,7 @@ def test_point_group_vasp6(self): incar = Incar.from_file("INCAR") assert incar["ISYM"] == 0 - def test_inv_rot_matrix_vasp6(self): + def test_inv_rot_matrix_vasp6(self) -> None: # the error message is formatted differently in VASP6 compared to VASP5 handler = VaspErrorHandler("vasp6.inv_rot_mat") assert handler.check() is True @@ -503,7 +503,7 @@ def test_inv_rot_matrix_vasp6(self): incar = Incar.from_file("INCAR") assert incar["SYMPREC"] == 1e-08 - def test_bzint_vasp6(self): + def test_bzint_vasp6(self) -> None: # the BZINT error message is formatted differently in VASP6 compared to VASP5 handler = VaspErrorHandler("vasp6.bzint") assert handler.check() is True @@ -520,7 +520,7 @@ def test_bzint_vasp6(self): assert incar["ISMEAR"] == 0 assert incar["SIGMA"] == 0.05 - def test_too_large_kspacing(self): + def test_too_large_kspacing(self) -> None: shutil.copy("INCAR.kspacing", "INCAR") vi = VaspInput.from_directory(".") handler = VaspErrorHandler("vasp.teterror") @@ -531,14 +531,14 @@ def test_too_large_kspacing(self): {"action": {"_set": {"KSPACING": vi["INCAR"].get("KSPACING") * 0.8}}, "dict": "INCAR"} ] - def test_nbands_not_sufficient(self): + def test_nbands_not_sufficient(self) -> None: handler = VaspErrorHandler("vasp.nbands_not_sufficient") assert handler.check() is True dct = handler.correct() assert dct["errors"] == ["nbands_not_sufficient"] assert dct["actions"] is None - def test_too_few_bands_round_error(self): + def test_too_few_bands_round_error(self) -> None: # originally there are NBANDS= 7 # correction should increase it shutil.copy("INCAR.too_few_bands_round_error", "INCAR") @@ -548,21 +548,21 @@ def test_too_few_bands_round_error(self): assert dct["errors"] == ["too_few_bands"] assert dct["actions"] == [{"dict": "INCAR", "action": {"_set": {"NBANDS": 8}}}] - def test_set_core_wf(self): + def test_set_core_wf(self) -> None: handler = VaspErrorHandler("vasp.set_core_wf") assert handler.check() is True dct = handler.correct() assert dct["errors"] == ["set_core_wf"] assert dct["actions"] is None - def test_read_error(self): + def test_read_error(self) -> None: handler = VaspErrorHandler("vasp.read_error") assert handler.check() is True dct = handler.correct() assert dct["errors"] == ["read_error"] assert dct["actions"] is None - def test_amin(self): + def test_amin(self) -> None: # Cell with at least one dimension >= 50 A, but AMIN > 0.01, and calculation not yet complete shutil.copy("INCAR.amin", "INCAR") handler = VaspErrorHandler("vasp.amin") @@ -571,7 +571,7 @@ def test_amin(self): assert dct["errors"] == ["amin"] assert dct["actions"] == [{"action": {"_set": {"AMIN": 0.01}}, "dict": "INCAR"}] - def test_eddiag(self): + def test_eddiag(self) -> None: # subspace rotation error os.remove("CONTCAR") shutil.copy("INCAR.amin", "INCAR") @@ -596,10 +596,10 @@ def test_eddiag(self): class AliasingErrorHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, *glob("aliasing/*", root_dir=TEST_FILES)) - def test_aliasing(self): + def test_aliasing(self) -> None: handler = AliasingErrorHandler("vasp.aliasing") handler.check() dct = handler.correct() @@ -611,7 +611,7 @@ def test_aliasing(self): {"file": "WAVECAR", "action": {"_file_delete": {"mode": "actual"}}}, ] - def test_aliasing_incar(self): + def test_aliasing_incar(self) -> None: shutil.copy("INCAR", "INCAR.orig") handler = AliasingErrorHandler("vasp.aliasing_incar") handler.check() @@ -633,10 +633,10 @@ def test_aliasing_incar(self): class UnconvergedErrorHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, *glob("unconverged/*", root_dir=TEST_FILES)) - def test_check_correct_electronic(self): + def test_check_correct_electronic(self) -> None: shutil.copy("vasprun.xml.electronic", "vasprun.xml") handler = UnconvergedErrorHandler() assert handler.check() @@ -685,23 +685,23 @@ def test_check_correct_electronic(self): assert handler.check() dct = handler.correct() assert dct["errors"] == ["Unconverged"] - assert [{"dict": "INCAR", "action": {"_set": {"ALGO": "Damped", "TIME": 0.5}}}] == dct["actions"] + assert dct["actions"] == [{"dict": "INCAR", "action": {"_set": {"ALGO": "Damped", "TIME": 0.5}}}] - def test_check_correct_electronic_repeat(self): + def test_check_correct_electronic_repeat(self) -> None: shutil.copy("vasprun.xml.electronic2", "vasprun.xml") handler = UnconvergedErrorHandler() assert handler.check() dct = handler.correct() assert dct == {"actions": [{"action": {"_set": {"ALGO": "All"}}, "dict": "INCAR"}], "errors": ["Unconverged"]} - def test_check_correct_ionic(self): + def test_check_correct_ionic(self) -> None: shutil.copy("vasprun.xml.ionic", "vasprun.xml") handler = UnconvergedErrorHandler() assert handler.check() dct = handler.correct() assert dct["errors"] == ["Unconverged"] - def test_check_correct_scan(self): + def test_check_correct_scan(self) -> None: shutil.copy("vasprun.xml.scan", "vasprun.xml") handler = UnconvergedErrorHandler() assert handler.check() @@ -709,20 +709,20 @@ def test_check_correct_scan(self): assert dct["errors"] == ["Unconverged"] assert {"dict": "INCAR", "action": {"_set": {"ALGO": "All"}}} in dct["actions"] - def test_amin(self): + def test_amin(self) -> None: shutil.copy("vasprun.xml.electronic_amin", "vasprun.xml") handler = UnconvergedErrorHandler() assert handler.check() dct = handler.correct() - assert [{"dict": "INCAR", "action": {"_set": {"AMIN": 0.01}}}] == dct["actions"] + assert dct["actions"] == [{"dict": "INCAR", "action": {"_set": {"AMIN": 0.01}}}] - def test_as_from_dict(self): + def test_as_from_dict(self) -> None: handler = UnconvergedErrorHandler("random_name.xml") h2 = UnconvergedErrorHandler.from_dict(handler.as_dict()) assert type(h2) == UnconvergedErrorHandler assert h2.output_filename == "random_name.xml" - def test_correct_normal_with_condition(self): + def test_correct_normal_with_condition(self) -> None: shutil.copy("vasprun.xml.electronic_normal", "vasprun.xml") # Reuse an existing file handler = UnconvergedErrorHandler() assert handler.check() @@ -730,7 +730,7 @@ def test_correct_normal_with_condition(self): assert dct["errors"] == ["Unconverged"] assert dct == {"actions": [{"action": {"_set": {"ALGO": "All"}}, "dict": "INCAR"}], "errors": ["Unconverged"]} - def test_psmaxn(self): + def test_psmaxn(self) -> None: shutil.copy("vasprun.xml.electronic", "vasprun.xml") shutil.copy(f"{TEST_FILES}/large_cell_real_optlay/OUTCAR", "OUTCAR") handler = UnconvergedErrorHandler() @@ -743,7 +743,7 @@ def test_psmaxn(self): ] tracked_lru_cache.tracked_cache_clear() - def test_uncorrectable(self): + def test_uncorrectable(self) -> None: shutil.copy("vasprun.xml.unconverged_unfixable", "vasprun.xml") handler = UnconvergedErrorHandler() assert handler.check() @@ -753,10 +753,10 @@ def test_uncorrectable(self): class IncorrectSmearingHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "scan_metal/INCAR", "scan_metal/vasprun.xml") - def test_check_correct_scan_metal(self): + def test_check_correct_scan_metal(self) -> None: handler = IncorrectSmearingHandler() assert handler.check() dct = handler.correct() @@ -767,28 +767,28 @@ def test_check_correct_scan_metal(self): class IncorrectSmearingHandlerStaticTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "static_smearing/INCAR", "static_smearing/vasprun.xml") - def test_check_correct_scan_metal(self): + def test_check_correct_scan_metal(self) -> None: handler = IncorrectSmearingHandler() assert not handler.check() class IncorrectSmearingHandlerFermiTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "fermi_smearing/INCAR", "fermi_smearing/vasprun.xml") - def test_check_correct_scan_metal(self): + def test_check_correct_scan_metal(self) -> None: handler = IncorrectSmearingHandler() assert not handler.check() class KspacingMetalHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "scan_metal/INCAR", "scan_metal/vasprun.xml") - def test_check_correct_scan_metal(self): + def test_check_correct_scan_metal(self) -> None: handler = KspacingMetalHandler() assert handler.check() dct = handler.correct() @@ -796,7 +796,7 @@ def test_check_correct_scan_metal(self): assert Incar.from_file("INCAR")["KSPACING"] == 0.22 os.remove("vasprun.xml") - def test_check_with_non_kspacing_wf(self): + def test_check_with_non_kspacing_wf(self) -> None: os.chdir(TEST_FILES) shutil.copy("INCAR", f"{self.tmp_path}/INCAR") shutil.copy("vasprun.xml", f"{self.tmp_path}/vasprun.xml") @@ -809,12 +809,12 @@ def test_check_with_non_kspacing_wf(self): class LargeSigmaHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files( self.tmp_path, "large_sigma/INCAR", "large_sigma/vasprun.xml", "large_sigma/OUTCAR", "large_sigma/POSCAR" ) - def test_check_correct_large_sigma(self): + def test_check_correct_large_sigma(self) -> None: handler = LargeSigmaHandler() assert handler.check() dct = handler.correct() @@ -824,7 +824,7 @@ def test_check_correct_large_sigma(self): class ZpotrfErrorHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files( self.tmp_path, "zpotrf/INCAR", @@ -834,7 +834,7 @@ def setUp(self): "zpotrf/OSZICAR.one_step", ) - def test_first_step(self): + def test_first_step(self) -> None: shutil.copy("OSZICAR.empty", "OSZICAR") s1 = Structure.from_file("POSCAR") handler = VaspErrorHandler("vasp.out") @@ -848,7 +848,7 @@ def test_first_step(self): assert s2.volume == pytest.approx(s1.volume) assert s1.volume == pytest.approx(64.346221) - def test_potim_correction(self): + def test_potim_correction(self) -> None: shutil.copy("OSZICAR.one_step", "OSZICAR") s1 = Structure.from_file("POSCAR") handler = VaspErrorHandler("vasp.out") @@ -860,7 +860,7 @@ def test_potim_correction(self): assert s1.volume == pytest.approx(64.3462) assert Incar.from_file("INCAR")["POTIM"] == pytest.approx(0.25) - def test_static_run_correction(self): + def test_static_run_correction(self) -> None: shutil.copy("OSZICAR.empty", "OSZICAR") s1 = Structure.from_file("POSCAR") incar = Incar.from_file("INCAR") @@ -879,7 +879,7 @@ def test_static_run_correction(self): class ZpotrfErrorHandlerSmallTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files( self.tmp_path, "zpotrf_small/INCAR", @@ -888,7 +888,7 @@ def setUp(self): "zpotrf_small/vasp.out", ) - def test_small(self): + def test_small(self) -> None: handler = VaspErrorHandler("vasp.out") shutil.copy("OSZICAR.empty", "OSZICAR") assert handler.check() @@ -901,11 +901,11 @@ def test_small(self): class WalltimeHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: os.chdir(f"{TEST_FILES}/postprocess") os.environ.pop("CUSTODIAN_WALLTIME_START", None) - def test_walltime_start(self): + def test_walltime_start(self) -> None: # checks the walltime handlers starttime initialization handler = WalltimeHandler(wall_time=3600) new_starttime = handler.start_time @@ -914,7 +914,7 @@ def test_walltime_start(self): handler = WalltimeHandler(wall_time=3600) assert os.environ.get("CUSTODIAN_WALLTIME_START") == new_starttime.strftime("%a %b %d %H:%M:%S UTC %Y") - def test_check_and_correct(self): + def test_check_and_correct(self) -> None: # Try a 1 hr wall time with a 2 min buffer handler = WalltimeHandler(wall_time=3600, buffer_time=120) assert not handler.check() @@ -951,16 +951,16 @@ def test_check_and_correct(self): os.remove("STOPCAR") @classmethod - def tearDown(cls): + def tearDown(cls) -> None: os.environ.pop("CUSTODIAN_WALLTIME_START", None) os.chdir(CWD) class PositiveEnergyHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "positive_energy/INCAR", "positive_energy/POSCAR", "positive_energy/OSZICAR") - def test_check_correct(self): + def test_check_correct(self) -> None: handler = PositiveEnergyErrorHandler() assert handler.check() dct = handler.correct() @@ -974,10 +974,10 @@ def test_check_correct(self): class PotimHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "potim/INCAR", "potim/POSCAR", "potim/OSZICAR") - def test_check_correct(self): + def test_check_correct(self) -> None: incar = Incar.from_file("INCAR") original_potim = incar["POTIM"] @@ -996,10 +996,10 @@ def test_check_correct(self): class LrfCommHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "lrf_comm/INCAR", "lrf_comm/OUTCAR", "lrf_comm/std_err.txt") - def test_lrf_comm(self): + def test_lrf_comm(self) -> None: handler = LrfCommutatorHandler("std_err.txt") assert handler.check() is True dct = handler.correct() @@ -1009,10 +1009,10 @@ def test_lrf_comm(self): class KpointsTransHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "KPOINTS", "std_err.txt.kpoints_trans") - def test_kpoints_trans(self): + def test_kpoints_trans(self) -> None: handler = StdErrHandler("std_err.txt.kpoints_trans") assert handler.check() is True dct = handler.correct() @@ -1026,10 +1026,10 @@ def test_kpoints_trans(self): class OutOfMemoryHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "INCAR", "std_err.txt.oom") - def test_oom(self): + def test_oom(self) -> None: vi = VaspInput.from_directory(".") from custodian.vasp.interpreter import VaspModder @@ -1042,10 +1042,10 @@ def test_oom(self): class DriftErrorHandlerTest(PymatgenTest): - def setUp(self): + def setUp(self) -> None: copy_tmp_files(self.tmp_path, "INCAR", "drift/OUTCAR", "drift/CONTCAR") - def test_check(self): + def test_check(self) -> None: handler = DriftErrorHandler(max_drift=0.05, to_average=11) assert not handler.check() @@ -1066,7 +1066,7 @@ def test_check(self): handler.check() assert handler.max_drift == 0.01 - def test_correct(self): + def test_correct(self) -> None: handler = DriftErrorHandler(max_drift=0.0001, enaug_multiply=2) handler.check() handler.correct() @@ -1136,7 +1136,7 @@ def test_correct(self) -> None: incar = Incar.from_file("INCAR") assert incar["ALGO"].lower() == "all" - def test_as_from_dict(self): + def test_as_from_dict(self) -> None: handler = NonConvergingErrorHandler("OSZICAR_random") h2 = NonConvergingErrorHandler.from_dict(handler.as_dict()) assert type(h2) == type(handler) diff --git a/tests/vasp/test_io.py b/tests/vasp/test_io.py index a84fa262..17389197 100644 --- a/tests/vasp/test_io.py +++ b/tests/vasp/test_io.py @@ -6,7 +6,7 @@ @pytest.fixture(autouse=True) -def _clear_tracked_cache(): +def _clear_tracked_cache() -> None: """Clear the cache of the stored functions between the tests.""" from custodian.utils import tracked_lru_cache @@ -14,7 +14,7 @@ def _clear_tracked_cache(): class TestIO: - def test_load_outcar(self): + def test_load_outcar(self) -> None: outcar = load_outcar(f"{TEST_FILES}/large_sigma/OUTCAR") assert outcar is not None outcar2 = load_outcar(f"{TEST_FILES}/large_sigma/OUTCAR") @@ -23,7 +23,7 @@ def test_load_outcar(self): assert len(tracked_lru_cache.cached_functions) == 1 - def test_load_vasprun(self): + def test_load_vasprun(self) -> None: vr = load_vasprun(f"{TEST_FILES}/large_sigma/vasprun.xml") assert vr is not None vr2 = load_vasprun(f"{TEST_FILES}/large_sigma/vasprun.xml") diff --git a/tests/vasp/test_jobs.py b/tests/vasp/test_jobs.py index 18460adc..fefd8aa0 100644 --- a/tests/vasp/test_jobs.py +++ b/tests/vasp/test_jobs.py @@ -16,13 +16,13 @@ class TestVaspJob: - def test_as_from_dict(self): + def test_as_from_dict(self) -> None: v = VaspJob(["hello"]) v2 = VaspJob.from_dict(v.as_dict()) assert type(v2) == type(v) assert v2.vasp_cmd == ("hello",) - def test_setup(self): + def test_setup(self) -> None: with cd(TEST_FILES), ScratchDir(".", copy_from_current_on_enter=True): v = VaspJob(["hello"], auto_npar=True) v.setup() @@ -32,7 +32,7 @@ def test_setup(self): if count > 3: assert incar["NPAR"] > 1 - def test_setup_run_no_kpts(self): + def test_setup_run_no_kpts(self) -> None: # just make sure v.setup() and v.run() exit cleanly when no KPOINTS file is present with cd(f"{TEST_FILES}/kspacing"), ScratchDir(".", copy_from_current_on_enter=True): v = VaspJob(["hello"], auto_npar=True) @@ -44,7 +44,7 @@ def test_setup_run_no_kpts(self): # directory. v.run() - def test_postprocess(self): + def test_postprocess(self) -> None: with cd(f"{TEST_FILES}/postprocess"), ScratchDir(".", copy_from_current_on_enter=True): shutil.copy("INCAR", "INCAR.backup") @@ -69,7 +69,7 @@ def test_postprocess(self): assert incar["MAGMOM"] == pytest.approx([3.007, 1.397, -0.189, -0.189]) assert incar_prev["MAGMOM"] == pytest.approx([5, -5, 0.6, 0.6]) - def test_continue(self): + def test_continue(self) -> None: # Test the continuation functionality with cd(f"{TEST_FILES}/postprocess"): # Test default functionality @@ -94,19 +94,19 @@ def test_continue(self): assert Incar.from_file("INCAR")["ISTART"] == 1 v.postprocess() - def test_static(self): + def test_static(self) -> None: # Just a basic test of init. VaspJob.double_relaxation_run(["vasp"]) class TestVaspNEBJob: - def test_as_from_dict(self): + def test_as_from_dict(self) -> None: v = VaspNEBJob(["hello"]) v2 = VaspNEBJob.from_dict(v.as_dict()) assert type(v2) == type(v) assert v2.vasp_cmd == ("hello",) - def test_setup(self): + def test_setup(self) -> None: with cd(f"{TEST_FILES}/setup_neb"), ScratchDir(".", copy_from_current_on_enter=True): v = VaspNEBJob("hello", half_kpts=True) v.setup() @@ -121,7 +121,7 @@ def test_setup(self): assert kpt_pre.style.name == "Monkhorst" assert kpt.style.name == "Gamma" - def test_postprocess(self): + def test_postprocess(self) -> None: neb_outputs = ["INCAR", "KPOINTS", "POTCAR", "vasprun.xml"] neb_sub_outputs = [ "CHG", @@ -160,7 +160,7 @@ def test_postprocess(self): class TestGenerateVaspInputJob: - def test_run(self): + def test_run(self) -> None: with ScratchDir("."): for file in ("INCAR", "POSCAR", "POTCAR", "KPOINTS"): shutil.copy(f"{TEST_FILES}/{file}", file) diff --git a/tests/vasp/test_validators.py b/tests/vasp/test_validators.py index a9a5d7b4..23bf3f3a 100644 --- a/tests/vasp/test_validators.py +++ b/tests/vasp/test_validators.py @@ -8,7 +8,7 @@ @pytest.fixture(autouse=True) -def _clear_tracked_cache(): +def _clear_tracked_cache() -> None: """Clear the cache of the stored functions between the tests.""" from custodian.utils import tracked_lru_cache @@ -16,7 +16,7 @@ def _clear_tracked_cache(): class TestVasprunXMLValidator: - def test_check_and_correct(self): + def test_check_and_correct(self) -> None: os.chdir(f"{TEST_FILES}/bad_vasprun") handler = VasprunXMLValidator() assert handler.check() @@ -27,7 +27,7 @@ def test_check_and_correct(self): assert not handler.check() os.remove("vasprun.xml") - def test_as_dict(self): + def test_as_dict(self) -> None: handler = VasprunXMLValidator() dct = handler.as_dict() h2 = VasprunXMLValidator.from_dict(dct) @@ -35,7 +35,7 @@ def test_as_dict(self): class TestVaspFilesValidator: - def test_check_and_correct(self): + def test_check_and_correct(self) -> None: # just an example where CONTCAR is not present os.chdir(f"{TEST_FILES}/positive_energy") handler = VaspFilesValidator() @@ -44,7 +44,7 @@ def test_check_and_correct(self): os.chdir(f"{TEST_FILES}/postprocess") assert not handler.check() - def test_as_dict(self): + def test_as_dict(self) -> None: handler = VaspFilesValidator() dct = handler.as_dict() h2 = VaspFilesValidator.from_dict(dct) @@ -52,7 +52,7 @@ def test_as_dict(self): class TestVaspNpTMDValidator: - def test_check_and_correct(self): + def test_check_and_correct(self) -> None: # NPT-AIMD using correct VASP os.chdir(f"{TEST_FILES}/npt_common") handler = VaspNpTMDValidator() @@ -66,7 +66,7 @@ def test_check_and_correct(self): os.chdir(f"{TEST_FILES}/npt_bad_vasp") assert handler.check() - def test_as_dict(self): + def test_as_dict(self) -> None: handler = VaspNpTMDValidator() dct = handler.as_dict() h2 = VaspNpTMDValidator.from_dict(dct) @@ -74,7 +74,7 @@ def test_as_dict(self): class TestVaspAECCARValidator: - def test_check_and_correct(self): + def test_check_and_correct(self) -> None: # NPT-AIMD using correct VASP os.chdir(f"{TEST_FILES}/bad_aeccar") handler = VaspAECCARValidator()