Skip to content

Commit

Permalink
Merge pull request #20 from lsst/tickets/DM-39857
Browse files Browse the repository at this point in the history
DM-39857: Stop using pytest-flake8 and add ruff configuration
  • Loading branch information
timj authored Jul 6, 2023
2 parents 6c54cfc + 6a33532 commit edd9f4c
Show file tree
Hide file tree
Showing 8 changed files with 99 additions and 43 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.10"]
python-version: ["3.10", "3.11"]

steps:
- uses: actions/checkout@v3
Expand All @@ -37,7 +37,7 @@ jobs:
# We have two cores so we can speed up the testing with xdist
- name: Install pytest packages
run: |
pip install "flake8<5" pytest pytest-flake8 pytest-xdist pytest-openfiles pytest-cov
pip install pytest pytest-xdist pytest-openfiles pytest-cov
- name: List installed packages
run: |
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/lint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,3 +9,8 @@ on:
jobs:
call-workflow:
uses: lsst/rubin_workflows/.github/workflows/lint.yaml@main
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: chartboost/ruff-action@v1
7 changes: 4 additions & 3 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@ repos:
hooks:
- id: isort
name: isort (python)
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.0.275
hooks:
- id: flake8
- id: ruff
8 changes: 8 additions & 0 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,11 @@
html_short_title = project
doxylink = {}
exclude_patterns = ["changes/*"]

# Add intersphinx entries.
intersphinx_mapping["intersphinx"] = ("https://networkx.org/documentation/stable/", None) # noqa
intersphinx_mapping["lsst"] = ("https://pipelines.lsst.io/v/daily/", None) # noqa

nitpick_ignore_regex = [
("py:obj", "htcondor\\..*"),
]
48 changes: 43 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@ dynamic = ["version"]
[project.optional-dependencies]
test = [
"pytest >= 3.2",
"flake8 >= 3.7.5",
"pytest-flake8 >= 1.0.4",
"pytest-openfiles >= 0.5.0"
]

Expand Down Expand Up @@ -98,8 +96,6 @@ line_length = 110
write_to = "python/lsst/ctrl/bps/htcondor/version.py"

[tool.pytest.ini_options]
addopts = "--flake8"
flake8-ignore = ["W503", "E203", "N802", "N803", "N806", "N812", "N815", "N816"]

[tool.pydocstyle]
convention = "numpy"
Expand All @@ -109,4 +105,46 @@ convention = "numpy"
# Docstring at the very first line is not required
# D200, D205 and D400 all complain if the first sentence of the docstring does
# not fit on one line.
add-ignore = ["D107", "D105", "D102", "D100", "D200", "D205", "D400"]
add-ignore = ["D107", "D105", "D102", "D100", "D200", "D205", "D400", "D104"]

[tool.ruff]
exclude = [
"__init__.py",
]
ignore = [
"N802",
"N803",
"N806",
"N812",
"N815",
"N816",
"N999",
"D107",
"D105",
"D102",
"D104",
"D100",
"D200",
"D205",
"D400",
]
line-length = 110
select = [
"E", # pycodestyle
"F", # pyflakes
"N", # pep8-naming
"W", # pycodestyle
"D", # pydocstyle
"UP", # pyupgrade
"C4",
]
target-version = "py310"
extend-select = [
"RUF100", # Warn about unused noqa
]

[tool.ruff.pycodestyle]
max-doc-length = 79

[tool.ruff.pydocstyle]
convention = "numpy"
4 changes: 2 additions & 2 deletions python/lsst/ctrl/bps/htcondor/htcondor_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -1308,7 +1308,7 @@ def _add_run_info(wms_path, job):
else:
_LOG.debug("_add_run_info: subfile = %s", subfile)
try:
with open(subfile, "r", encoding="utf-8") as fh:
with open(subfile, encoding="utf-8") as fh:
for line in fh:
if line.startswith("+bps_"):
m = re.match(r"\+(bps_[^\s]+)\s*=\s*(.+)$", line)
Expand Down Expand Up @@ -1659,7 +1659,7 @@ def _wms_id_to_cluster(wms_id):
elif id_type == WmsIdType.PATH:
try:
job_info = read_dag_info(wms_id)
except (FileNotFoundError, PermissionError, IOError):
except (FileNotFoundError, PermissionError, OSError):
pass
else:
schedd_name = next(iter(job_info))
Expand Down
62 changes: 31 additions & 31 deletions python/lsst/ctrl/bps/htcondor/lssthtc.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def __init__(self, valid_keys, init_data=()):
self.update(init_data)

def __getitem__(self, key):
"""Returns value for given key if exists.
"""Return value for given key if exists.
Parameters
----------
Expand All @@ -206,7 +206,7 @@ def __getitem__(self, key):
Returns
-------
value : `Any`
value : `~collections.abc.Any`
Value associated with given key.
Raises
Expand All @@ -217,7 +217,7 @@ def __getitem__(self, key):
return self.data[key]

def __delitem__(self, key):
"""Deletes value for given key if exists.
"""Delete value for given key if exists.
Parameters
----------
Expand All @@ -232,13 +232,13 @@ def __delitem__(self, key):
del self.data[key]

def __setitem__(self, key, value):
"""Stores key,value in internal dict only if key is valid
"""Store key,value in internal dict only if key is valid.
Parameters
----------
key : `str`
Identifier to associate with given value.
value : `Any`
value : `~collections.abc.Any`
Value to store.
Raises
Expand Down Expand Up @@ -353,12 +353,12 @@ def htc_escape(value):
Parameters
----------
value : `Any`
value : `~collections.abc.Any`
Value that needs to have characters escaped if string.
Returns
-------
new_value : `Any`
new_value : `~collections.abc.Any`
Given value with characters escaped appropriate for HTCondor if string.
"""
if isinstance(value, str):
Expand All @@ -374,10 +374,10 @@ def htc_write_attribs(stream, attrs):
Parameters
----------
stream : `TextIOBase`
Output text stream (typically an open file)
stream : `~io.TextIOBase`
Output text stream (typically an open file).
attrs : `dict`
HTCondor job attributes (dictionary of attribute key, value)
HTCondor job attributes (dictionary of attribute key, value).
"""
for key, value in attrs.items():
# Make sure strings are syntactically correct for HTCondor.
Expand All @@ -390,14 +390,14 @@ def htc_write_attribs(stream, attrs):


def htc_write_condor_file(filename, job_name, job, job_attrs):
"""Main function to write an HTCondor submit file.
"""Write an HTCondor submit file.
Parameters
----------
filename : `str`
Filename for the HTCondor submit file
Filename for the HTCondor submit file.
job_name : `str`
Job name to use in submit file
Job name to use in submit file.
job : `RestrictedDict`
Submit script information.
job_attrs : `dict`
Expand Down Expand Up @@ -559,7 +559,7 @@ def htc_create_submit_from_file(submit_file):
An object representing a job submit description.
"""
descriptors = {}
with open(submit_file, "r") as fh:
with open(submit_file) as fh:
for line in fh:
line = line.strip()
if not line.startswith("#") and not line == "queue":
Expand All @@ -581,7 +581,7 @@ def _htc_write_job_commands(stream, name, jobs):
Parameters
----------
stream : `TextIOBase`
stream : `~io.TextIOBase`
Writeable text stream (typically an opened file).
name : `str`
Job name.
Expand Down Expand Up @@ -717,7 +717,7 @@ def dump(self, fh):
Parameters
----------
fh : `TextIOBase`
fh : `~io.TextIOBase`
Output stream
"""
printer = pprint.PrettyPrinter(indent=4, stream=fh)
Expand Down Expand Up @@ -773,9 +773,9 @@ def add_job(self, job, parent_names=None, child_names=None):
----------
job : `HTCJob`
HTCJob to add to the HTCDag
parent_names : `Iterable` [`str`], optional
parent_names : `~collections.abc.Iterable` [`str`], optional
Names of parent jobs
child_names : `Iterable` [`str`], optional
child_names : `~collections.abc.Iterable` [`str`], optional
Names of child jobs
"""
assert isinstance(job, HTCJob)
Expand Down Expand Up @@ -873,7 +873,7 @@ def dump(self, fh):
Parameters
----------
fh : `IO` or `str`
fh : `io.IO` or `str`
Where to dump DAG info as text.
"""
for key, value in self.graph:
Expand Down Expand Up @@ -1091,7 +1091,7 @@ def summary_from_dag(dir_name):
counts = defaultdict(int)
job_name_to_pipetask = {}
try:
with open(dag, "r") as fh:
with open(dag) as fh:
for line in fh:
if line.startswith("JOB"):
m = re.match(r"JOB ([^\s]+) jobs/([^/]+)/", line)
Expand Down Expand Up @@ -1171,7 +1171,7 @@ def read_dag_status(wms_path):
try:
node_stat_file = next(Path(wms_path).glob("*.node_status"))
_LOG.debug("Reading Node Status File %s", node_stat_file)
with open(node_stat_file, "r") as infh:
with open(node_stat_file) as infh:
dag_ad = classad.parseNext(infh) # pylint: disable=E1101
except StopIteration:
pass
Expand All @@ -1180,7 +1180,7 @@ def read_dag_status(wms_path):
# Pegasus check here
try:
metrics_file = next(Path(wms_path).glob("*.dag.metrics"))
with open(metrics_file, "r") as infh:
with open(metrics_file) as infh:
metrics = json.load(infh)
dag_ad["NodesTotal"] = metrics.get("jobs", 0)
dag_ad["NodesFailed"] = metrics.get("jobs_failed", 0)
Expand All @@ -1189,7 +1189,7 @@ def read_dag_status(wms_path):
except StopIteration:
try:
metrics_file = next(Path(wms_path).glob("*.metrics"))
with open(metrics_file, "r") as infh:
with open(metrics_file) as infh:
metrics = json.load(infh)
dag_ad["NodesTotal"] = metrics["wf_metrics"]["total_jobs"]
dag_ad["pegasus_version"] = metrics.get("version", "")
Expand Down Expand Up @@ -1236,7 +1236,7 @@ def read_node_status(wms_path):
jobs = {}
fake_id = -1.0 # For nodes that do not yet have a job id, give fake one
try:
with open(node_status, "r") as fh:
with open(node_status) as fh:
ads = classad.parseAds(fh)

for jclassad in ads:
Expand Down Expand Up @@ -1291,7 +1291,7 @@ def read_dag_log(wms_path):
-------
wms_workflow_id : `str`
HTCondor job id (i.e., <ClusterId>.<ProcId>) of the DAGMan job.
dag_info : `dict` [`str`, `Any`]
dag_info : `dict` [`str`, `~collections.abc.Any`]
HTCondor job information read from the log file mapped to HTCondor
job id.
Expand Down Expand Up @@ -1396,20 +1396,20 @@ def read_dag_info(wms_path):
try:
with open(filename) as fh:
dag_info = json.load(fh)
except (IOError, PermissionError) as exc:
except (OSError, PermissionError) as exc:
_LOG.debug("Retrieving DAGMan job information failed: %s", exc)
dag_info = {}
return dag_info


def write_dag_info(filename, dag_info):
"""Writes custom job information about DAGMan job.
"""Write custom job information about DAGMan job.
Parameters
----------
filename : `str`
Name of the file where the information will be stored.
dag_info : `dict` [`str` `dict` [`str` Any]]
dag_info : `dict` [`str` `dict` [`str`, Any]]
Information about the DAGMan job.
"""
schedd_name = next(iter(dag_info))
Expand All @@ -1423,7 +1423,7 @@ def write_dag_info(filename, dag_info):
}
}
json.dump(info, fh)
except (KeyError, IOError, PermissionError) as exc:
except (KeyError, OSError, PermissionError) as exc:
_LOG.debug("Persisting DAGMan job information failed: %s", exc)


Expand Down Expand Up @@ -1498,14 +1498,14 @@ def htc_check_dagman_output(wms_path):

message = ""
try:
with open(filename, "r") as fh:
with open(filename) as fh:
last_submit_failed = ""
for line in fh:
m = re.match(r"(\d\d/\d\d/\d\d \d\d:\d\d:\d\d) Job submit try \d+/\d+ failed", line)
if m:
last_submit_failed = m.group(1)
if last_submit_failed:
message = f"Warn: Job submission issues (last: {last_submit_failed})"
except (IOError, PermissionError):
except (OSError, PermissionError):
message = f"Warn: Could not read dagman output file from {wms_path}."
return message
4 changes: 4 additions & 0 deletions tests/test_lssthtc.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,16 @@


class SimpleTestCase(unittest.TestCase):
"""Simplest possible import test."""

def test_version(self):
self.assertIsNotNone(version)


@unittest.skipIf(htcondor is None, "Warning: Missing HTCondor API. Skipping")
class TestLsstHtc(unittest.TestCase):
"""Test basic usage."""

def testHtcEscapeInt(self):
self.assertEqual(lssthtc.htc_escape(100), 100)

Expand Down

0 comments on commit edd9f4c

Please sign in to comment.