Skip to content

Commit

Permalink
fix: correct pre-commit lint config
Browse files Browse the repository at this point in the history
Ruff actually gets applied correctly now
  • Loading branch information
nfrasser committed Aug 26, 2024
1 parent 440607c commit ccb9917
Show file tree
Hide file tree
Showing 4 changed files with 16 additions and 8 deletions.
12 changes: 10 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,21 @@ repos:
- id: check-yaml
- id: check-added-large-files
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.6
rev: v0.6.2
hooks:
- id: ruff
name: ruff check
alias: check
args: [--fix]
- id: ruff
name: ruff check imports
alias: check-imports
args: [--fix, --select, I, --exit-non-zero-on-fix]
- id: ruff-format
name: ruff format
alias: format
- repo: https://github.com/RobertCraigie/pyright-python
rev: v1.1.374
rev: v1.1.377
hooks:
- id: pyright
additional_dependencies: [cython, httpretty, numpy, pytest]
4 changes: 2 additions & 2 deletions cryosparc/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -501,7 +501,7 @@ def innerjoin_many(cls, *datasets: "Dataset"):
# [0,1,2]}), …]. This is faster than doing the innerjoin for all columns
# and safer because we don't have to worry about not updating Python
# string reference counts in the resulting dataset.
indexed_dsets = [Dataset({"uid": d["uid"], f"idx{i}": n.arange(len(d))}) for i, d in enumerate(datasets)]
indexed_dsets = [cls({"uid": d["uid"], f"idx{i}": n.arange(len(d))}) for i, d in enumerate(datasets)]
indexed_dset = reduce(lambda dr, ds: cls(dr._data.innerjoin("uid", ds._data)), indexed_dsets)
result = cls({"uid": indexed_dset["uid"]})
result.add_fields(all_fields)
Expand Down Expand Up @@ -856,7 +856,7 @@ def __eq__(self, other: object):
"""
return (
isinstance(other, type(self))
and type(self) == type(other)
and type(self) is type(other)
and len(self) == len(other)
and self.descr() == other.descr()
and all(n.array_equal(self[c1], other[c2]) for c1, c2 in zip(self, other))
Expand Down
4 changes: 2 additions & 2 deletions docs/examples/connect_series_to_class3D.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,11 @@
}
],
"source": [
"import zipfile\n",
"\n",
"unzip_path = series_path[:-4]\n",
"print(unzip_path)\n",
"\n",
"import zipfile\n",
"\n",
"with zipfile.ZipFile(series_path, \"r\") as z:\n",
" z.extractall(unzip_path)"
]
Expand Down
4 changes: 2 additions & 2 deletions tests/test_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,8 +254,8 @@ def test_pickle_unpickle():


def test_column_aggregation(t20s_dset):
assert type(t20s_dset["uid"]) == Column
assert type(n.max(t20s_dset["uid"])) == n.uint64
assert type(t20s_dset["uid"]) is Column
assert type(n.max(t20s_dset["uid"])) is n.uint64
assert isinstance(n.mean(t20s_dset["uid"]), n.number)
assert not isinstance(n.mean(t20s_dset["uid"]), n.ndarray)

Expand Down

0 comments on commit ccb9917

Please sign in to comment.