Skip to content

Commit

Permalink
Use strict=True for all zip calls
Browse files Browse the repository at this point in the history
  • Loading branch information
timj committed Jul 24, 2023
1 parent 3c84fee commit 05bb4df
Show file tree
Hide file tree
Showing 19 changed files with 44 additions and 37 deletions.
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/_quantum_backed.py
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,7 @@ def collect_and_transfer(
"""
grouped_refs = defaultdict(list)
summary_records: dict[str, DatastoreRecordData] = {}
for quantum, provenance_for_quantum in zip(quanta, provenance):
for quantum, provenance_for_quantum in zip(quanta, provenance, strict=True):
quantum_refs_by_id = {
ref.id: ref
for ref in itertools.chain.from_iterable(quantum.outputs.values())
Expand Down
4 changes: 3 additions & 1 deletion python/lsst/daf/butler/core/dimensions/_schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,9 @@ def columns(self) -> Mapping[ColumnTag, str]:
element's records (`~collections.abc.Mapping`).
"""
result: dict[ColumnTag, str] = {}
for dimension_name, field_name in zip(self.element.dimensions.names, self.dimensions.names):
for dimension_name, field_name in zip(
self.element.dimensions.names, self.dimensions.names, strict=True
):
result[DimensionKeyColumnTag(dimension_name)] = field_name
for field_name in self.facts.names:
result[DimensionRecordColumnTag(self.element.name, field_name)] = field_name
Expand Down
4 changes: 2 additions & 2 deletions python/lsst/daf/butler/core/named.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def byName(self) -> dict[str, V_co]:
``self``, with `str` names as keys. This is always a new object,
not a view.
"""
return dict(zip(self.names, self.values()))
return dict(zip(self.names, self.values(), strict=True))

@abstractmethod
def keys(self) -> NamedValueAbstractSet[K]: # type: ignore
Expand Down Expand Up @@ -195,7 +195,7 @@ def names(self) -> KeysView[str]:

def byName(self) -> dict[str, V]:
"""Return a `dict` with names as keys and the ``self`` values."""
return dict(zip(self._names.keys(), self._dict.values()))
return dict(zip(self._names.keys(), self._dict.values(), strict=True))

def __len__(self) -> int:
return len(self._dict)
Expand Down
10 changes: 5 additions & 5 deletions python/lsst/daf/butler/datastores/chainedDatastore.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def setConfigRoot(cls, root: str, config: Config, full: Config, overwrite: bool

containerKey = cls.containerKey
for idx, (child, fullChild) in enumerate(
zip(datastoreConfig[containerKey], fullDatastoreConfig[containerKey])
zip(datastoreConfig[containerKey], fullDatastoreConfig[containerKey], strict=True)
):
childConfig = DatastoreConfig(child, mergeDefaults=False)
fullChildConfig = DatastoreConfig(fullChild, mergeDefaults=False)
Expand Down Expand Up @@ -417,7 +417,7 @@ def put(self, inMemoryDataset: Any, ref: DatasetRef) -> None:
nsuccess = 0
npermanent = 0
nephemeral = 0
for datastore, constraints in zip(self.datastores, self.datastoreConstraints):
for datastore, constraints in zip(self.datastores, self.datastoreConstraints, strict=True):
if (
constraints is not None and not constraints.isAcceptable(ref)
) or not datastore.constraints.isAcceptable(ref):
Expand Down Expand Up @@ -497,7 +497,7 @@ def isDatasetAcceptable(dataset: FileDataset, *, name: str, constraints: Constra
# ...and remember whether all of the failures are due to
# NotImplementedError being raised.
allFailuresAreNotImplementedError = True
for datastore, constraints in zip(self.datastores, self.datastoreConstraints):
for datastore, constraints in zip(self.datastores, self.datastoreConstraints, strict=True):
okForChild: list[FileDataset]
if constraints is not None:
okForChild = [
Expand Down Expand Up @@ -1024,7 +1024,7 @@ def export(
# Try the next datastore.
continue

for ref, export in zip(filtered, this_export):
for ref, export in zip(filtered, this_export, strict=True):
# Get the position and also delete it from the list.
exported[ref_positions.pop(ref)] = export

Expand Down Expand Up @@ -1097,7 +1097,7 @@ def transfer_from(
# Already transferred all datasets known to this datastore.
continue

for datastore, constraints in zip(self.datastores, self.datastoreConstraints):
for datastore, constraints in zip(self.datastores, self.datastoreConstraints, strict=True):
if constraints is not None:
filtered_refs = []
for ref in these_refs:
Expand Down
4 changes: 2 additions & 2 deletions python/lsst/daf/butler/datastores/fileDatastore.py
Original file line number Diff line number Diff line change
Expand Up @@ -367,7 +367,7 @@ def _delete_artifact(self, location: Location) -> None:

def addStoredItemInfo(self, refs: Iterable[DatasetRef], infos: Iterable[StoredFileInfo]) -> None:
# Docstring inherited from GenericBaseDatastore
records = [info.rebase(ref).to_record() for ref, info in zip(refs, infos)]
records = [info.rebase(ref).to_record() for ref, info in zip(refs, infos, strict=True)]
self._table.insert(*records, transaction=self._transaction)

def getStoredItemsInfo(self, ref: DatasetIdRef) -> list[StoredFileInfo]:
Expand Down Expand Up @@ -1434,7 +1434,7 @@ def _process_mexists_records(
# to the remote URI.
if self.cacheManager.file_count > 0:
ref = id_to_ref[ref_id]
for uri, storedFileInfo in zip(uris, infos):
for uri, storedFileInfo in zip(uris, infos, strict=True):
check_ref = ref
if not ref.datasetType.isComponent() and (component := storedFileInfo.component):
check_ref = ref.makeComponentRef(component)
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/datastores/inMemoryDatastore.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def bridge(self) -> DatastoreRegistryBridge:

def addStoredItemInfo(self, refs: Iterable[DatasetRef], infos: Iterable[StoredMemoryItemInfo]) -> None:
# Docstring inherited from GenericBaseDatastore.
for ref, info in zip(refs, infos):
for ref, info in zip(refs, infos, strict=True):
self.records[ref.id] = info
self.related.setdefault(info.parentID, set()).add(ref.id)

Expand Down
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/registries/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,7 @@ def _importDatasets(
"dimension row is missing."
) from err
# Check that imported dataset IDs match the input
for imported_ref, input_ref in zip(refs, datasets):
for imported_ref, input_ref in zip(refs, datasets, strict=True):
if imported_ref.id != input_ref.id:
raise RegistryConsistencyError(
"Imported dataset ID differs from input dataset ID, "
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -613,12 +613,12 @@ def insert(
}
tagsRows = [
dict(protoTagsRow, dataset_id=row["id"], **dataId.byName())
for dataId, row in zip(dataIdList, rows)
for dataId, row in zip(dataIdList, rows, strict=True)
]
# Insert those rows into the tags table.
self._db.insert(self._tags, *tagsRows)

for dataId, row in zip(dataIdList, rows):
for dataId, row in zip(dataIdList, rows, strict=True):
yield DatasetRef(
datasetType=self.datasetType,
dataId=dataId,
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/registry/dimensions/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def __init__(
self._fetchColumns: dict[str, sqlalchemy.sql.ColumnElement] = {
dimension.name: self._table.columns[name]
for dimension, name in zip(
self._element.dimensions, self._element.RecordClass.fields.dimensions.names
self._element.dimensions, self._element.RecordClass.fields.dimensions.names, strict=True
)
}
self._skypix_overlap_tables = skypix_overlap_tables
Expand Down
4 changes: 2 additions & 2 deletions python/lsst/daf/butler/registry/interfaces/_database.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ def addTableTuple(self, specs: tuple[ddl.TableSpec, ...]) -> tuple[sqlalchemy.sc
we cannot represent this with type annotations.
"""
return specs._make( # type: ignore
self.addTable(name, spec) for name, spec in zip(specs._fields, specs) # type: ignore
self.addTable(name, spec) for name, spec in zip(specs._fields, specs, strict=True) # type: ignore
)

def addInitializer(self, initializer: Callable[[Database], None]) -> None:
Expand Down Expand Up @@ -1404,7 +1404,7 @@ def format_bad(inconsistencies: dict[str, Any]) -> str:
return None, inserted_or_updated
else:
assert result is not None
return {k: v for k, v in zip(returning, result)}, inserted_or_updated
return {k: v for k, v in zip(returning, result, strict=True)}, inserted_or_updated

def insert(
self,
Expand Down
8 changes: 5 additions & 3 deletions python/lsst/daf/butler/registry/tests/_database.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,15 +409,15 @@ def testInsertQueryDelete(self):
r._asdict()
for r in self.query_list(db, tables.b.select().where(tables.b.columns.id > results[1]["id"]))
]
expected = [dict(row, id=id) for row, id in zip(rows, ids)]
expected = [dict(row, id=id) for row, id in zip(rows, ids, strict=True)]
self.assertCountEqual(results, expected)
self.assertTrue(all(result["id"] is not None for result in results))
# Insert multiple rows into a table with an autoincrement primary key,
# then use the returned IDs to insert into a dynamic table.
rows = [{"b_id": results[0]["id"]}, {"b_id": None}]
ids = db.insert(tables.c, *rows, returnIds=True)
results = [r._asdict() for r in self.query_list(db, tables.c.select())]
expected = [dict(row, id=id) for row, id in zip(rows, ids)]
expected = [dict(row, id=id) for row, id in zip(rows, ids, strict=True)]
self.assertCountEqual(results, expected)
self.assertTrue(all(result["id"] is not None for result in results))
# Add the dynamic table.
Expand Down Expand Up @@ -865,7 +865,9 @@ def testTimespanDatabaseRepresentation(self):
# Make another list of timespans that span the full range but don't
# overlap. This is a subset of the previous list.
bTimespans = [Timespan(begin=None, end=timestamps[0])]
bTimespans.extend(Timespan(begin=t1, end=t2) for t1, t2 in zip(timestamps[:-1], timestamps[1:]))
bTimespans.extend(
Timespan(begin=t1, end=t2) for t1, t2 in zip(timestamps[:-1], timestamps[1:], strict=True)
)
bTimespans.append(Timespan(begin=timestamps[-1], end=None))
# Make a database and create a table with that database's timespan
# representation. This one will have no exclusion constraint and
Expand Down
7 changes: 5 additions & 2 deletions python/lsst/daf/butler/registry/tests/_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -1845,7 +1845,10 @@ def range_set_hull(
child_regions_large = [
range_set_hull(htm6.envelope(c.getBoundingCircle()), htm6) for c in child_regions_small
]
assert all(large.contains(small) for large, small in zip(child_regions_large, child_regions_small))
assert all(
large.contains(small)
for large, small in zip(child_regions_large, child_regions_small, strict=True)
)
parent_region_large = lsst.sphgeom.ConvexPolygon(
list(itertools.chain.from_iterable(c.getVertices() for c in child_regions_large))
)
Expand All @@ -1858,7 +1861,7 @@ def range_set_hull(
# real tests later involve what's in the database, not just post-query
# filtering of regions.
child_difference_indices = []
for large, small in zip(child_regions_large, child_regions_small):
for large, small in zip(child_regions_large, child_regions_small, strict=True):
difference = list(unpack_range_set(commonSkyPix.envelope(large) - commonSkyPix.envelope(small)))
assert difference, "if this is empty, we can't test anything useful with these regions"
assert all(
Expand Down
2 changes: 1 addition & 1 deletion python/lsst/daf/butler/tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,7 @@ def assertAstropyTablesEqual(
if isinstance(expectedTables, AstropyTable):
expectedTables = [expectedTables]
self.assertEqual(len(tables), len(expectedTables))
for table, expected in zip(tables, expectedTables):
for table, expected in zip(tables, expectedTables, strict=True):
# Assert that we are testing what we think we are testing:
self.assertIsInstance(table, AstropyTable)
self.assertIsInstance(expected, AstropyTable)
Expand Down
6 changes: 3 additions & 3 deletions python/lsst/daf/butler/transfers/_yaml.py
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,7 @@ def __init__(self, stream: IO, registry: Registry):
id=refid if not isinstance(refid, int) else _refIntId2UUID[refid],
)
for dataId, refid in zip(
ensure_iterable(d["data_id"]), ensure_iterable(d["dataset_id"])
ensure_iterable(d["data_id"]), ensure_iterable(d["dataset_id"]), strict=True
)
],
formatter=doImportType(d.get("formatter")) if "formatter" in d else None,
Expand Down Expand Up @@ -458,12 +458,12 @@ def load(
resolvedRefs = self.registry._importDatasets(datasets)
# Populate our dictionary that maps int dataset_id values from the
# export file to the new DatasetRefs
for fileId, ref in zip(dataset_ids, resolvedRefs):
for fileId, ref in zip(dataset_ids, resolvedRefs, strict=True):
self.refsByFileId[fileId] = ref
# Now iterate over the original records, and install the new
# resolved DatasetRefs to replace the unresolved ones as we
# reorganize the collection information.
for sliceForFileDataset, fileDataset in zip(slices, records):
for sliceForFileDataset, fileDataset in zip(slices, records, strict=True):
fileDataset.refs = resolvedRefs[sliceForFileDataset]
if directory is not None:
fileDataset.path = ResourcePath(directory, forceDirectory=True).join(fileDataset.path)
Expand Down
4 changes: 2 additions & 2 deletions tests/data/registry/spatial.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ def make_plots(detector_grid: bool, patch_grid: bool):
index_labels(color="black", alpha=0.5),
)
colors = iter(["red", "blue", "cyan", "green"])
for (visit_id, visit_data), color in zip(VISIT_DATA.items(), colors):
for (visit_id, visit_data), color in zip(VISIT_DATA.items(), colors, strict=True):
for detector_id, pixel_indices in visit_data["detector_regions"].items():
label = f"visit={visit_id}"
if label in labels_used:
Expand All @@ -274,7 +274,7 @@ def make_plots(detector_grid: bool, patch_grid: bool):
color=color,
),
)
for (tract_id, tract_data), color in zip(TRACT_DATA.items(), colors):
for (tract_id, tract_data), color in zip(TRACT_DATA.items(), colors, strict=True):
for patch_id, patch_data in tract_data["patches"].items():
label = f"tract={tract_id}"
if label in labels_used:
Expand Down
2 changes: 1 addition & 1 deletion tests/test_connectionString.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def testBuilder(self):
regConfigs = [RegistryConfig(os.path.join(self.configDir, name)) for name in self.configFiles]

conStrFactory = ConnectionStringFactory()
for regConf, fileName in zip(regConfigs, self.configFiles):
for regConf, fileName in zip(regConfigs, self.configFiles, strict=True):
conStr = conStrFactory.fromConfig(regConf)
with self.subTest(confFile=fileName):
self.assertEqual(
Expand Down
8 changes: 4 additions & 4 deletions tests/test_datastore.py
Original file line number Diff line number Diff line change
Expand Up @@ -1369,7 +1369,7 @@ def testConstraints(self) -> None:
self.assertTrue(datastore.exists(ref))

# Check each datastore inside the chained datastore
for childDatastore, expected in zip(datastore.datastores, accept):
for childDatastore, expected in zip(datastore.datastores, accept, strict=True):
self.assertEqual(
childDatastore.exists(ref),
expected,
Expand All @@ -1384,7 +1384,7 @@ def testConstraints(self) -> None:
self.assertTrue(datastore.exists(ref))

# Check each datastore inside the chained datastore
for childDatastore, expected in zip(datastore.datastores, accept):
for childDatastore, expected in zip(datastore.datastores, accept, strict=True):
# Ephemeral datastores means InMemory at the moment
# and that does not accept ingest of files.
if childDatastore.isEphemeral:
Expand Down Expand Up @@ -1622,7 +1622,7 @@ def assertCache(self, cache_manager: DatastoreCacheManager) -> None:

def testNoCache(self) -> None:
cache_manager = DatastoreDisabledCacheManager("", universe=self.universe)
for uri, ref in zip(self.files, self.refs):
for uri, ref in zip(self.files, self.refs, strict=True):
self.assertFalse(cache_manager.should_be_cached(ref))
self.assertIsNone(cache_manager.move_to_cache(uri, ref))
self.assertFalse(cache_manager.known_to_cache(ref))
Expand Down Expand Up @@ -1722,7 +1722,7 @@ def testCacheExpiryDatasetsComposite(self) -> None:

n_datasets = 3
for i in range(n_datasets):
for component_file, component_ref in zip(self.comp_files[i], self.comp_refs[i]):
for component_file, component_ref in zip(self.comp_files[i], self.comp_refs[i], strict=True):
cached = cache_manager.move_to_cache(component_file, component_ref)
self.assertIsNotNone(cached)
self.assertTrue(cache_manager.known_to_cache(component_ref))
Expand Down
2 changes: 1 addition & 1 deletion tests/test_dimensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ def testSchemaGeneration(self):
self.assertIn(foreignKey.table, tableSpecs)
self.assertIn(foreignKey.table, element.graph.dimensions.names)
self.assertEqual(len(foreignKey.source), len(foreignKey.target))
for source, target in zip(foreignKey.source, foreignKey.target):
for source, target in zip(foreignKey.source, foreignKey.target, strict=True):
self.assertIn(source, tableSpec.fields.names)
self.assertIn(target, tableSpecs[foreignKey.table].fields.names)
self.assertEqual(
Expand Down
4 changes: 2 additions & 2 deletions tests/test_logging.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,15 +68,15 @@ def testRecordCapture(self):

self.assertEqual(len(self.handler.records), len(expected))

for given, record in zip(expected, self.handler.records):
for given, record in zip(expected, self.handler.records, strict=True):
self.assertEqual(given[0], record.levelno)
self.assertEqual(given[1], record.message)

# Check that we can serialize the records
json = self.handler.records.json()

records = ButlerLogRecords.parse_raw(json)
for original_record, new_record in zip(self.handler.records, records):
for original_record, new_record in zip(self.handler.records, records, strict=True):
self.assertEqual(new_record, original_record)
self.assertEqual(str(records), str(self.handler.records))

Expand Down

0 comments on commit 05bb4df

Please sign in to comment.