diff --git a/python/lsst/daf/butler/_quantum_backed.py b/python/lsst/daf/butler/_quantum_backed.py index 6b3b083c72..6604b7084e 100644 --- a/python/lsst/daf/butler/_quantum_backed.py +++ b/python/lsst/daf/butler/_quantum_backed.py @@ -692,7 +692,7 @@ def collect_and_transfer( """ grouped_refs = defaultdict(list) summary_records: dict[str, DatastoreRecordData] = {} - for quantum, provenance_for_quantum in zip(quanta, provenance): + for quantum, provenance_for_quantum in zip(quanta, provenance, strict=True): quantum_refs_by_id = { ref.id: ref for ref in itertools.chain.from_iterable(quantum.outputs.values()) diff --git a/python/lsst/daf/butler/core/dimensions/_schema.py b/python/lsst/daf/butler/core/dimensions/_schema.py index 01cdeaf8f4..4509d4f205 100644 --- a/python/lsst/daf/butler/core/dimensions/_schema.py +++ b/python/lsst/daf/butler/core/dimensions/_schema.py @@ -248,7 +248,9 @@ def columns(self) -> Mapping[ColumnTag, str]: element's records (`~collections.abc.Mapping`). """ result: dict[ColumnTag, str] = {} - for dimension_name, field_name in zip(self.element.dimensions.names, self.dimensions.names): + for dimension_name, field_name in zip( + self.element.dimensions.names, self.dimensions.names, strict=True + ): result[DimensionKeyColumnTag(dimension_name)] = field_name for field_name in self.facts.names: result[DimensionRecordColumnTag(self.element.name, field_name)] = field_name diff --git a/python/lsst/daf/butler/core/named.py b/python/lsst/daf/butler/core/named.py index b00f8cb594..4e187692eb 100644 --- a/python/lsst/daf/butler/core/named.py +++ b/python/lsst/daf/butler/core/named.py @@ -103,7 +103,7 @@ def byName(self) -> dict[str, V_co]: ``self``, with `str` names as keys. This is always a new object, not a view. """ - return dict(zip(self.names, self.values())) + return dict(zip(self.names, self.values(), strict=True)) @abstractmethod def keys(self) -> NamedValueAbstractSet[K]: # type: ignore @@ -195,7 +195,7 @@ def names(self) -> KeysView[str]: def byName(self) -> dict[str, V]: """Return a `dict` with names as keys and the ``self`` values.""" - return dict(zip(self._names.keys(), self._dict.values())) + return dict(zip(self._names.keys(), self._dict.values(), strict=True)) def __len__(self) -> int: return len(self._dict) diff --git a/python/lsst/daf/butler/datastores/chainedDatastore.py b/python/lsst/daf/butler/datastores/chainedDatastore.py index 5fb0ad2faf..0f3b01e8e8 100644 --- a/python/lsst/daf/butler/datastores/chainedDatastore.py +++ b/python/lsst/daf/butler/datastores/chainedDatastore.py @@ -153,7 +153,7 @@ def setConfigRoot(cls, root: str, config: Config, full: Config, overwrite: bool containerKey = cls.containerKey for idx, (child, fullChild) in enumerate( - zip(datastoreConfig[containerKey], fullDatastoreConfig[containerKey]) + zip(datastoreConfig[containerKey], fullDatastoreConfig[containerKey], strict=True) ): childConfig = DatastoreConfig(child, mergeDefaults=False) fullChildConfig = DatastoreConfig(fullChild, mergeDefaults=False) @@ -417,7 +417,7 @@ def put(self, inMemoryDataset: Any, ref: DatasetRef) -> None: nsuccess = 0 npermanent = 0 nephemeral = 0 - for datastore, constraints in zip(self.datastores, self.datastoreConstraints): + for datastore, constraints in zip(self.datastores, self.datastoreConstraints, strict=True): if ( constraints is not None and not constraints.isAcceptable(ref) ) or not datastore.constraints.isAcceptable(ref): @@ -497,7 +497,7 @@ def isDatasetAcceptable(dataset: FileDataset, *, name: str, constraints: Constra # ...and remember whether all of the failures are due to # NotImplementedError being raised. allFailuresAreNotImplementedError = True - for datastore, constraints in zip(self.datastores, self.datastoreConstraints): + for datastore, constraints in zip(self.datastores, self.datastoreConstraints, strict=True): okForChild: list[FileDataset] if constraints is not None: okForChild = [ @@ -1024,7 +1024,7 @@ def export( # Try the next datastore. continue - for ref, export in zip(filtered, this_export): + for ref, export in zip(filtered, this_export, strict=True): # Get the position and also delete it from the list. exported[ref_positions.pop(ref)] = export @@ -1097,7 +1097,7 @@ def transfer_from( # Already transferred all datasets known to this datastore. continue - for datastore, constraints in zip(self.datastores, self.datastoreConstraints): + for datastore, constraints in zip(self.datastores, self.datastoreConstraints, strict=True): if constraints is not None: filtered_refs = [] for ref in these_refs: diff --git a/python/lsst/daf/butler/datastores/fileDatastore.py b/python/lsst/daf/butler/datastores/fileDatastore.py index b7de3c6d14..60b544cc3c 100644 --- a/python/lsst/daf/butler/datastores/fileDatastore.py +++ b/python/lsst/daf/butler/datastores/fileDatastore.py @@ -367,7 +367,7 @@ def _delete_artifact(self, location: Location) -> None: def addStoredItemInfo(self, refs: Iterable[DatasetRef], infos: Iterable[StoredFileInfo]) -> None: # Docstring inherited from GenericBaseDatastore - records = [info.rebase(ref).to_record() for ref, info in zip(refs, infos)] + records = [info.rebase(ref).to_record() for ref, info in zip(refs, infos, strict=True)] self._table.insert(*records, transaction=self._transaction) def getStoredItemsInfo(self, ref: DatasetIdRef) -> list[StoredFileInfo]: @@ -1434,7 +1434,7 @@ def _process_mexists_records( # to the remote URI. if self.cacheManager.file_count > 0: ref = id_to_ref[ref_id] - for uri, storedFileInfo in zip(uris, infos): + for uri, storedFileInfo in zip(uris, infos, strict=True): check_ref = ref if not ref.datasetType.isComponent() and (component := storedFileInfo.component): check_ref = ref.makeComponentRef(component) diff --git a/python/lsst/daf/butler/datastores/inMemoryDatastore.py b/python/lsst/daf/butler/datastores/inMemoryDatastore.py index ade6ad5999..a7e6609293 100644 --- a/python/lsst/daf/butler/datastores/inMemoryDatastore.py +++ b/python/lsst/daf/butler/datastores/inMemoryDatastore.py @@ -180,7 +180,7 @@ def bridge(self) -> DatastoreRegistryBridge: def addStoredItemInfo(self, refs: Iterable[DatasetRef], infos: Iterable[StoredMemoryItemInfo]) -> None: # Docstring inherited from GenericBaseDatastore. - for ref, info in zip(refs, infos): + for ref, info in zip(refs, infos, strict=True): self.records[ref.id] = info self.related.setdefault(info.parentID, set()).add(ref.id) diff --git a/python/lsst/daf/butler/registries/sql.py b/python/lsst/daf/butler/registries/sql.py index ac3d013419..81d3d164c7 100644 --- a/python/lsst/daf/butler/registries/sql.py +++ b/python/lsst/daf/butler/registries/sql.py @@ -662,7 +662,7 @@ def _importDatasets( "dimension row is missing." ) from err # Check that imported dataset IDs match the input - for imported_ref, input_ref in zip(refs, datasets): + for imported_ref, input_ref in zip(refs, datasets, strict=True): if imported_ref.id != input_ref.id: raise RegistryConsistencyError( "Imported dataset ID differs from input dataset ID, " diff --git a/python/lsst/daf/butler/registry/datasets/byDimensions/_storage.py b/python/lsst/daf/butler/registry/datasets/byDimensions/_storage.py index c801242f87..d08530d8da 100644 --- a/python/lsst/daf/butler/registry/datasets/byDimensions/_storage.py +++ b/python/lsst/daf/butler/registry/datasets/byDimensions/_storage.py @@ -613,12 +613,12 @@ def insert( } tagsRows = [ dict(protoTagsRow, dataset_id=row["id"], **dataId.byName()) - for dataId, row in zip(dataIdList, rows) + for dataId, row in zip(dataIdList, rows, strict=True) ] # Insert those rows into the tags table. self._db.insert(self._tags, *tagsRows) - for dataId, row in zip(dataIdList, rows): + for dataId, row in zip(dataIdList, rows, strict=True): yield DatasetRef( datasetType=self.datasetType, dataId=dataId, diff --git a/python/lsst/daf/butler/registry/dimensions/table.py b/python/lsst/daf/butler/registry/dimensions/table.py index 4969eede78..d89575f687 100644 --- a/python/lsst/daf/butler/registry/dimensions/table.py +++ b/python/lsst/daf/butler/registry/dimensions/table.py @@ -98,7 +98,7 @@ def __init__( self._fetchColumns: dict[str, sqlalchemy.sql.ColumnElement] = { dimension.name: self._table.columns[name] for dimension, name in zip( - self._element.dimensions, self._element.RecordClass.fields.dimensions.names + self._element.dimensions, self._element.RecordClass.fields.dimensions.names, strict=True ) } self._skypix_overlap_tables = skypix_overlap_tables diff --git a/python/lsst/daf/butler/registry/interfaces/_database.py b/python/lsst/daf/butler/registry/interfaces/_database.py index 6c57eb8d84..bc11d31625 100644 --- a/python/lsst/daf/butler/registry/interfaces/_database.py +++ b/python/lsst/daf/butler/registry/interfaces/_database.py @@ -146,7 +146,7 @@ def addTableTuple(self, specs: tuple[ddl.TableSpec, ...]) -> tuple[sqlalchemy.sc we cannot represent this with type annotations. """ return specs._make( # type: ignore - self.addTable(name, spec) for name, spec in zip(specs._fields, specs) # type: ignore + self.addTable(name, spec) for name, spec in zip(specs._fields, specs, strict=True) # type: ignore ) def addInitializer(self, initializer: Callable[[Database], None]) -> None: @@ -1404,7 +1404,7 @@ def format_bad(inconsistencies: dict[str, Any]) -> str: return None, inserted_or_updated else: assert result is not None - return {k: v for k, v in zip(returning, result)}, inserted_or_updated + return {k: v for k, v in zip(returning, result, strict=True)}, inserted_or_updated def insert( self, diff --git a/python/lsst/daf/butler/registry/tests/_database.py b/python/lsst/daf/butler/registry/tests/_database.py index 5d6143294d..2db9049e27 100644 --- a/python/lsst/daf/butler/registry/tests/_database.py +++ b/python/lsst/daf/butler/registry/tests/_database.py @@ -409,7 +409,7 @@ def testInsertQueryDelete(self): r._asdict() for r in self.query_list(db, tables.b.select().where(tables.b.columns.id > results[1]["id"])) ] - expected = [dict(row, id=id) for row, id in zip(rows, ids)] + expected = [dict(row, id=id) for row, id in zip(rows, ids, strict=True)] self.assertCountEqual(results, expected) self.assertTrue(all(result["id"] is not None for result in results)) # Insert multiple rows into a table with an autoincrement primary key, @@ -417,7 +417,7 @@ def testInsertQueryDelete(self): rows = [{"b_id": results[0]["id"]}, {"b_id": None}] ids = db.insert(tables.c, *rows, returnIds=True) results = [r._asdict() for r in self.query_list(db, tables.c.select())] - expected = [dict(row, id=id) for row, id in zip(rows, ids)] + expected = [dict(row, id=id) for row, id in zip(rows, ids, strict=True)] self.assertCountEqual(results, expected) self.assertTrue(all(result["id"] is not None for result in results)) # Add the dynamic table. @@ -865,7 +865,9 @@ def testTimespanDatabaseRepresentation(self): # Make another list of timespans that span the full range but don't # overlap. This is a subset of the previous list. bTimespans = [Timespan(begin=None, end=timestamps[0])] - bTimespans.extend(Timespan(begin=t1, end=t2) for t1, t2 in zip(timestamps[:-1], timestamps[1:])) + bTimespans.extend( + Timespan(begin=t1, end=t2) for t1, t2 in zip(timestamps[:-1], timestamps[1:], strict=True) + ) bTimespans.append(Timespan(begin=timestamps[-1], end=None)) # Make a database and create a table with that database's timespan # representation. This one will have no exclusion constraint and diff --git a/python/lsst/daf/butler/registry/tests/_registry.py b/python/lsst/daf/butler/registry/tests/_registry.py index b5dc20eaf2..af8aaaaf81 100644 --- a/python/lsst/daf/butler/registry/tests/_registry.py +++ b/python/lsst/daf/butler/registry/tests/_registry.py @@ -1845,7 +1845,10 @@ def range_set_hull( child_regions_large = [ range_set_hull(htm6.envelope(c.getBoundingCircle()), htm6) for c in child_regions_small ] - assert all(large.contains(small) for large, small in zip(child_regions_large, child_regions_small)) + assert all( + large.contains(small) + for large, small in zip(child_regions_large, child_regions_small, strict=True) + ) parent_region_large = lsst.sphgeom.ConvexPolygon( list(itertools.chain.from_iterable(c.getVertices() for c in child_regions_large)) ) @@ -1858,7 +1861,7 @@ def range_set_hull( # real tests later involve what's in the database, not just post-query # filtering of regions. child_difference_indices = [] - for large, small in zip(child_regions_large, child_regions_small): + for large, small in zip(child_regions_large, child_regions_small, strict=True): difference = list(unpack_range_set(commonSkyPix.envelope(large) - commonSkyPix.envelope(small))) assert difference, "if this is empty, we can't test anything useful with these regions" assert all( diff --git a/python/lsst/daf/butler/tests/utils.py b/python/lsst/daf/butler/tests/utils.py index 93e76a8ba5..ff90c1ab23 100644 --- a/python/lsst/daf/butler/tests/utils.py +++ b/python/lsst/daf/butler/tests/utils.py @@ -155,7 +155,7 @@ def assertAstropyTablesEqual( if isinstance(expectedTables, AstropyTable): expectedTables = [expectedTables] self.assertEqual(len(tables), len(expectedTables)) - for table, expected in zip(tables, expectedTables): + for table, expected in zip(tables, expectedTables, strict=True): # Assert that we are testing what we think we are testing: self.assertIsInstance(table, AstropyTable) self.assertIsInstance(expected, AstropyTable) diff --git a/python/lsst/daf/butler/transfers/_yaml.py b/python/lsst/daf/butler/transfers/_yaml.py index 46e1950ab1..fc3e7362ad 100644 --- a/python/lsst/daf/butler/transfers/_yaml.py +++ b/python/lsst/daf/butler/transfers/_yaml.py @@ -393,7 +393,7 @@ def __init__(self, stream: IO, registry: Registry): id=refid if not isinstance(refid, int) else _refIntId2UUID[refid], ) for dataId, refid in zip( - ensure_iterable(d["data_id"]), ensure_iterable(d["dataset_id"]) + ensure_iterable(d["data_id"]), ensure_iterable(d["dataset_id"]), strict=True ) ], formatter=doImportType(d.get("formatter")) if "formatter" in d else None, @@ -458,12 +458,12 @@ def load( resolvedRefs = self.registry._importDatasets(datasets) # Populate our dictionary that maps int dataset_id values from the # export file to the new DatasetRefs - for fileId, ref in zip(dataset_ids, resolvedRefs): + for fileId, ref in zip(dataset_ids, resolvedRefs, strict=True): self.refsByFileId[fileId] = ref # Now iterate over the original records, and install the new # resolved DatasetRefs to replace the unresolved ones as we # reorganize the collection information. - for sliceForFileDataset, fileDataset in zip(slices, records): + for sliceForFileDataset, fileDataset in zip(slices, records, strict=True): fileDataset.refs = resolvedRefs[sliceForFileDataset] if directory is not None: fileDataset.path = ResourcePath(directory, forceDirectory=True).join(fileDataset.path) diff --git a/tests/data/registry/spatial.py b/tests/data/registry/spatial.py index 7bcc110140..d18ff224fb 100644 --- a/tests/data/registry/spatial.py +++ b/tests/data/registry/spatial.py @@ -252,7 +252,7 @@ def make_plots(detector_grid: bool, patch_grid: bool): index_labels(color="black", alpha=0.5), ) colors = iter(["red", "blue", "cyan", "green"]) - for (visit_id, visit_data), color in zip(VISIT_DATA.items(), colors): + for (visit_id, visit_data), color in zip(VISIT_DATA.items(), colors, strict=True): for detector_id, pixel_indices in visit_data["detector_regions"].items(): label = f"visit={visit_id}" if label in labels_used: @@ -274,7 +274,7 @@ def make_plots(detector_grid: bool, patch_grid: bool): color=color, ), ) - for (tract_id, tract_data), color in zip(TRACT_DATA.items(), colors): + for (tract_id, tract_data), color in zip(TRACT_DATA.items(), colors, strict=True): for patch_id, patch_data in tract_data["patches"].items(): label = f"tract={tract_id}" if label in labels_used: diff --git a/tests/test_connectionString.py b/tests/test_connectionString.py index b61695123c..72aa552294 100644 --- a/tests/test_connectionString.py +++ b/tests/test_connectionString.py @@ -54,7 +54,7 @@ def testBuilder(self): regConfigs = [RegistryConfig(os.path.join(self.configDir, name)) for name in self.configFiles] conStrFactory = ConnectionStringFactory() - for regConf, fileName in zip(regConfigs, self.configFiles): + for regConf, fileName in zip(regConfigs, self.configFiles, strict=True): conStr = conStrFactory.fromConfig(regConf) with self.subTest(confFile=fileName): self.assertEqual( diff --git a/tests/test_datastore.py b/tests/test_datastore.py index f54ded75ee..92da810ec5 100644 --- a/tests/test_datastore.py +++ b/tests/test_datastore.py @@ -1369,7 +1369,7 @@ def testConstraints(self) -> None: self.assertTrue(datastore.exists(ref)) # Check each datastore inside the chained datastore - for childDatastore, expected in zip(datastore.datastores, accept): + for childDatastore, expected in zip(datastore.datastores, accept, strict=True): self.assertEqual( childDatastore.exists(ref), expected, @@ -1384,7 +1384,7 @@ def testConstraints(self) -> None: self.assertTrue(datastore.exists(ref)) # Check each datastore inside the chained datastore - for childDatastore, expected in zip(datastore.datastores, accept): + for childDatastore, expected in zip(datastore.datastores, accept, strict=True): # Ephemeral datastores means InMemory at the moment # and that does not accept ingest of files. if childDatastore.isEphemeral: @@ -1622,7 +1622,7 @@ def assertCache(self, cache_manager: DatastoreCacheManager) -> None: def testNoCache(self) -> None: cache_manager = DatastoreDisabledCacheManager("", universe=self.universe) - for uri, ref in zip(self.files, self.refs): + for uri, ref in zip(self.files, self.refs, strict=True): self.assertFalse(cache_manager.should_be_cached(ref)) self.assertIsNone(cache_manager.move_to_cache(uri, ref)) self.assertFalse(cache_manager.known_to_cache(ref)) @@ -1722,7 +1722,7 @@ def testCacheExpiryDatasetsComposite(self) -> None: n_datasets = 3 for i in range(n_datasets): - for component_file, component_ref in zip(self.comp_files[i], self.comp_refs[i]): + for component_file, component_ref in zip(self.comp_files[i], self.comp_refs[i], strict=True): cached = cache_manager.move_to_cache(component_file, component_ref) self.assertIsNotNone(cached) self.assertTrue(cache_manager.known_to_cache(component_ref)) diff --git a/tests/test_dimensions.py b/tests/test_dimensions.py index 21c309822b..f510c61d47 100644 --- a/tests/test_dimensions.py +++ b/tests/test_dimensions.py @@ -346,7 +346,7 @@ def testSchemaGeneration(self): self.assertIn(foreignKey.table, tableSpecs) self.assertIn(foreignKey.table, element.graph.dimensions.names) self.assertEqual(len(foreignKey.source), len(foreignKey.target)) - for source, target in zip(foreignKey.source, foreignKey.target): + for source, target in zip(foreignKey.source, foreignKey.target, strict=True): self.assertIn(source, tableSpec.fields.names) self.assertIn(target, tableSpecs[foreignKey.table].fields.names) self.assertEqual( diff --git a/tests/test_logging.py b/tests/test_logging.py index 8c122b3f5f..f4aee1636a 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -68,7 +68,7 @@ def testRecordCapture(self): self.assertEqual(len(self.handler.records), len(expected)) - for given, record in zip(expected, self.handler.records): + for given, record in zip(expected, self.handler.records, strict=True): self.assertEqual(given[0], record.levelno) self.assertEqual(given[1], record.message) @@ -76,7 +76,7 @@ def testRecordCapture(self): json = self.handler.records.json() records = ButlerLogRecords.parse_raw(json) - for original_record, new_record in zip(self.handler.records, records): + for original_record, new_record in zip(self.handler.records, records, strict=True): self.assertEqual(new_record, original_record) self.assertEqual(str(records), str(self.handler.records))