diff --git a/python/lsst/daf/butler/_butler.py b/python/lsst/daf/butler/_butler.py index 0005af9375..b8cb73005a 100644 --- a/python/lsst/daf/butler/_butler.py +++ b/python/lsst/daf/butler/_butler.py @@ -210,7 +210,7 @@ def __init__( "Cannot pass 'config', 'searchPaths', or 'writeable' arguments with 'butler' argument." ) self.registry = butler.registry.copy(defaults) - self.datastore = butler.datastore + self._datastore = butler._datastore self.storageClasses = butler.storageClasses self._config: ButlerConfig = butler._config else: @@ -225,7 +225,7 @@ def __init__( self.registry = Registry.fromConfig( self._config, butlerRoot=butlerRoot, writeable=writeable, defaults=defaults ) - self.datastore = Datastore.fromConfig( + self._datastore = Datastore.fromConfig( self._config, self.registry.getDatastoreBridgeManager(), butlerRoot=butlerRoot ) self.storageClasses = StorageClassFactory() @@ -240,7 +240,7 @@ def __init__( # dependency-inversion trick. This is not used by regular butler, # but we do not have a way to distinguish regular butler from execution # butler. - self.datastore.set_retrieve_dataset_type_method(self._retrieve_dataset_type) + self._datastore.set_retrieve_dataset_type_method(self._retrieve_dataset_type) if "run" in self._config or "collection" in self._config: raise ValueError("Passing a run or collection via configuration is no longer supported.") @@ -524,7 +524,7 @@ def __reduce__(self) -> tuple: def __str__(self) -> str: return "Butler(collections={}, run={}, datastore='{}', registry='{}')".format( - self.collections, self.run, self.datastore, self.registry + self.collections, self.run, self._datastore, self.registry ) def isWriteable(self) -> bool: @@ -538,7 +538,7 @@ def transaction(self) -> Iterator[None]: Transactions can be nested. """ with self.registry.transaction(): - with self.datastore.transaction(): + with self._datastore.transaction(): yield def _standardizeArgs( @@ -1163,13 +1163,13 @@ def put( self.registry._importDatasets([datasetRefOrType], expand=True) # Before trying to write to the datastore check that it does not # know this dataset. This is prone to races, of course. - if self.datastore.knows(datasetRefOrType): + if self._datastore.knows(datasetRefOrType): raise ConflictingDefinitionError(f"Datastore already contains dataset: {datasetRefOrType}") # Try to write dataset to the datastore, if it fails due to a race # with another write, the content of stored data may be # unpredictable. try: - self.datastore.put(obj, datasetRefOrType) + self._datastore.put(obj, datasetRefOrType) except IntegrityError as e: raise ConflictingDefinitionError(f"Datastore already contains dataset: {e}") return datasetRefOrType @@ -1185,7 +1185,7 @@ def put( # Add Registry Dataset entry. dataId = self.registry.expandDataId(dataId, graph=datasetType.dimensions, **kwargs) (ref,) = self.registry.insertDatasets(datasetType, run=run, dataIds=[dataId]) - self.datastore.put(obj, ref) + self._datastore.put(obj, ref) return ref @@ -1223,7 +1223,7 @@ def getDirect( obj : `object` The dataset. """ - return self.datastore.get(ref, parameters=parameters, storageClass=storageClass) + return self._datastore.get(ref, parameters=parameters, storageClass=storageClass) @deprecated( reason="Butler.getDeferred() now behaves like getDirectDeferred() when given a DatasetRef. " @@ -1266,7 +1266,7 @@ def getDirectDeferred( Raised if no matching dataset exists in the `Registry`. """ # Check thad dataset actuall exists. - if not self.datastore.exists(ref): + if not self._datastore.exists(ref): raise LookupError(f"Dataset reference {ref} does not exist.") return DeferredDatasetHandle(butler=self, ref=ref, parameters=parameters, storageClass=storageClass) @@ -1325,7 +1325,7 @@ def getDeferred( TypeError Raised if no collections were provided. """ - if isinstance(datasetRefOrType, DatasetRef) and not self.datastore.exists(datasetRefOrType): + if isinstance(datasetRefOrType, DatasetRef) and not self._datastore.exists(datasetRefOrType): raise LookupError(f"Dataset reference {datasetRefOrType} does not exist.") ref = self._findDatasetRef(datasetRefOrType, dataId, collections=collections, **kwargs) return DeferredDatasetHandle(butler=self, ref=ref, parameters=parameters, storageClass=storageClass) @@ -1396,7 +1396,7 @@ def get( """ log.debug("Butler get: %s, dataId=%s, parameters=%s", datasetRefOrType, dataId, parameters) ref = self._findDatasetRef(datasetRefOrType, dataId, collections=collections, **kwargs) - return self.datastore.get(ref, parameters=parameters, storageClass=storageClass) + return self._datastore.get(ref, parameters=parameters, storageClass=storageClass) def getURIs( self, @@ -1445,7 +1445,7 @@ def getURIs( ref = self._findDatasetRef( datasetRefOrType, dataId, predict=predict, run=run, collections=collections, **kwargs ) - return self.datastore.getURIs(ref, predict) + return self._datastore.getURIs(ref, predict) def getURI( self, @@ -1562,7 +1562,7 @@ def retrieveArtifacts( a hierarchical data structure in a NoSQL database may well be stored as a JSON file. """ - return self.datastore.retrieveArtifacts( + return self._datastore.retrieveArtifacts( refs, ResourcePath(destination), transfer=transfer, @@ -1645,11 +1645,11 @@ def exists( return existence existence |= DatasetExistence.RECORDED - if self.datastore.knows(ref): + if self._datastore.knows(ref): existence |= DatasetExistence.DATASTORE if full_check: - if self.datastore.exists(ref): + if self._datastore.exists(ref): existence |= DatasetExistence._ARTIFACT elif existence.value != DatasetExistence.UNRECOGNIZED.value: # Do not add this flag if we have no other idea about a dataset. @@ -1706,13 +1706,13 @@ def _exists_many( existence[ref] |= DatasetExistence.RECORDED # Ask datastore if it knows about these refs. - knows = self.datastore.knows_these(refs) + knows = self._datastore.knows_these(refs) for ref, known in knows.items(): if known: existence[ref] |= DatasetExistence.DATASTORE if full_check: - mexists = self.datastore.mexists(refs) + mexists = self._datastore.mexists(refs) for ref, exists in mexists.items(): if exists: existence[ref] |= DatasetExistence._ARTIFACT @@ -1776,7 +1776,7 @@ def datasetExists( ) else: ref = self._findDatasetRef(datasetRefOrType, dataId, collections=collections, **kwargs) - return self.datastore.exists(ref) + return self._datastore.exists(ref) def removeRuns(self, names: Iterable[str], unstore: bool = True) -> None: """Remove one or more `~CollectionType.RUN` collections and the @@ -1808,17 +1808,17 @@ def removeRuns(self, names: Iterable[str], unstore: bool = True) -> None: if collectionType is not CollectionType.RUN: raise TypeError(f"The collection type of '{name}' is {collectionType.name}, not RUN.") refs.extend(self.registry.queryDatasets(..., collections=name, findFirst=True)) - with self.datastore.transaction(): + with self._datastore.transaction(): with self.registry.transaction(): if unstore: - self.datastore.trash(refs) + self._datastore.trash(refs) else: - self.datastore.forget(refs) + self._datastore.forget(refs) for name in names: self.registry.removeCollection(name) if unstore: # Point of no return for removing artifacts - self.datastore.emptyTrash() + self._datastore.emptyTrash() def pruneDatasets( self, @@ -1863,10 +1863,10 @@ def pruneDatasets( # mutating the Registry (it can _look_ at Datastore-specific things, # but shouldn't change them), and hence all operations here are # Registry operations. - with self.datastore.transaction(): + with self._datastore.transaction(): with self.registry.transaction(): if unstore: - self.datastore.trash(refs) + self._datastore.trash(refs) if purge: self.registry.removeDatasets(refs) elif disassociate: @@ -1884,7 +1884,7 @@ def pruneDatasets( # in the dataset_location_trash table. if unstore: # Point of no return for removing artifacts - self.datastore.emptyTrash() + self._datastore.emptyTrash() @transactional def ingest( @@ -2056,7 +2056,9 @@ def ingest( # (_importDatasets only complains if they exist but differ) so # we have to catch IntegrityError explicitly. try: - self.datastore.ingest(*datasets, transfer=transfer, record_validation_info=record_validation_info) + self._datastore.ingest( + *datasets, transfer=transfer, record_validation_info=record_validation_info + ) except IntegrityError as e: raise ConflictingDefinitionError(f"Datastore already contains one or more datasets: {e}") @@ -2136,7 +2138,7 @@ def export( backend = BackendClass(stream, universe=self.dimensions) try: helper = RepoExportContext( - self.registry, self.datastore, backend=backend, directory=directory, transfer=transfer + self.registry, self._datastore, backend=backend, directory=directory, transfer=transfer ) yield helper except BaseException: @@ -2230,7 +2232,7 @@ def doImport(importStream: TextIO | ResourceHandleProtocol) -> None: backend.register() with self.transaction(): backend.load( - self.datastore, + self._datastore, directory=directory, transfer=transfer, skip_dimensions=skip_dimensions, @@ -2315,7 +2317,7 @@ def transfer_from( # this with no datastore records. artifact_existence: dict[ResourcePath, bool] = {} if skip_missing: - dataset_existence = source_butler.datastore.mexists( + dataset_existence = source_butler._datastore.mexists( source_refs, artifact_existence=artifact_existence ) source_refs = [ref for ref, exists in dataset_existence.items() if exists] @@ -2442,8 +2444,8 @@ def transfer_from( # Ask the datastore to transfer. The datastore has to check that # the source datastore is compatible with the target datastore. - accepted, rejected = self.datastore.transfer_from( - source_butler.datastore, + accepted, rejected = self._datastore.transfer_from( + source_butler._datastore, source_refs, transfer=transfer, artifact_existence=artifact_existence, @@ -2536,13 +2538,13 @@ def validateConfiguration( datastoreErrorStr = None try: - self.datastore.validateConfiguration(entities, logFailures=logFailures) + self._datastore.validateConfiguration(entities, logFailures=logFailures) except ValidationError as e: datastoreErrorStr = str(e) # Also check that the LookupKeys used by the datastores match # registry and storage class definitions - keys = self.datastore.getLookupKeys() + keys = self._datastore.getLookupKeys() failedNames = set() failedDataId = set() diff --git a/python/lsst/daf/butler/_limited_butler.py b/python/lsst/daf/butler/_limited_butler.py index 14e295d59a..c71987a3d3 100644 --- a/python/lsst/daf/butler/_limited_butler.py +++ b/python/lsst/daf/butler/_limited_butler.py @@ -161,7 +161,7 @@ def get( to use a resolved `DatasetRef`. Subclasses can support more options. """ log.debug("Butler get: %s, parameters=%s, storageClass: %s", ref, parameters, storageClass) - return self.datastore.get(ref, parameters=parameters, storageClass=storageClass) + return self._datastore.get(ref, parameters=parameters, storageClass=storageClass) @deprecated( reason="Butler.get() now behaves like Butler.getDirect() when given a DatasetRef." @@ -197,7 +197,7 @@ def getDirect( obj : `object` The dataset. """ - return self.datastore.get(ref, parameters=parameters, storageClass=storageClass) + return self._datastore.get(ref, parameters=parameters, storageClass=storageClass) @deprecated( reason="Butler.getDeferred() now behaves like getDirectDeferred() when given a DatasetRef. " @@ -289,7 +289,7 @@ def stored(self, ref: DatasetRef) -> bool: Whether the dataset artifact exists in the datastore and can be retrieved. """ - return self.datastore.exists(ref) + return self._datastore.exists(ref) def stored_many( self, @@ -309,7 +309,7 @@ def stored_many( Mapping from given dataset refs to boolean indicating artifact existence. """ - return self.datastore.mexists(refs) + return self._datastore.mexists(refs) @deprecated( reason="Butler.datasetExistsDirect() has been replaced by Butler.stored(). " @@ -410,13 +410,19 @@ def dimensions(self) -> DimensionUniverse: """ raise NotImplementedError() - datastore: Datastore - """The object that manages actual dataset storage (`Datastore`). + @property + @deprecated( + reason="The Butler.datastore property is now deprecated. Butler APIs should now exist with the " + "relevant functionality. Will be removed after v27.0.", + version="v26.0", + category=FutureWarning, + ) + def datastore(self) -> Datastore: + """The object that manages actual dataset storage. (`Datastore`)""" + return self._datastore - Direct user access to the datastore should rarely be necessary; the primary - exception is the case where a `Datastore` implementation provides extra - functionality beyond what the base class defines. - """ + _datastore: Datastore + """The object that manages actual dataset storage (`Datastore`).""" storageClasses: StorageClassFactory """An object that maps known storage class names to objects that fully diff --git a/python/lsst/daf/butler/_quantum_backed.py b/python/lsst/daf/butler/_quantum_backed.py index 9a6b311fd2..e0c193eb7c 100644 --- a/python/lsst/daf/butler/_quantum_backed.py +++ b/python/lsst/daf/butler/_quantum_backed.py @@ -170,12 +170,12 @@ def __init__( self._unavailable_inputs: set[DatasetId] = set() self._actual_inputs: set[DatasetId] = set() self._actual_output_refs: set[DatasetRef] = set() - self.datastore = datastore + self._datastore = datastore self.storageClasses = storageClasses self._dataset_types: Mapping[str, DatasetType] = {} if dataset_types is not None: self._dataset_types = dataset_types - self.datastore.set_retrieve_dataset_type_method(self._retrieve_dataset_type) + self._datastore.set_retrieve_dataset_type_method(self._retrieve_dataset_type) @classmethod def initialize( @@ -227,7 +227,7 @@ def initialize( predicted_outputs=predicted_outputs, dimensions=dimensions, filename=filename, - datastore_records=quantum.datastore_records, + datastore_records=quantum._datastore_records, OpaqueManagerClass=OpaqueManagerClass, BridgeManagerClass=BridgeManagerClass, search_paths=search_paths, @@ -491,7 +491,7 @@ def put(self, obj: Any, ref: DatasetRef, /) -> DatasetRef: # Docstring inherited. if ref.id not in self._predicted_outputs: raise RuntimeError("Cannot `put` dataset that was not predicted as an output.") - self.datastore.put(obj, ref) + self._datastore.put(obj, ref) self._actual_output_refs.add(ref) return ref @@ -523,7 +523,7 @@ def pruneDatasets( raise ValueError(f"Can not prune a component of a dataset (ref={ref})") if unstore: - self.datastore.trash(refs) + self._datastore.trash(refs) if purge: for ref in refs: # We only care about removing them from actual output refs, @@ -531,7 +531,7 @@ def pruneDatasets( if unstore: # Point of no return for removing artifacts - self.datastore.emptyTrash() + self._datastore.emptyTrash() def extract_provenance_data(self) -> QuantumProvenanceData: """Extract provenance information and datastore records from this @@ -582,7 +582,7 @@ def extract_provenance_data(self) -> QuantumProvenanceData: "recorded in provenance may be incomplete.", self._predicted_inputs - checked_inputs, ) - datastore_records = self.datastore.export_records(self._actual_output_refs) + datastore_records = self._datastore.export_records(self._actual_output_refs) provenance_records = { datastore_name: records.to_simple() for datastore_name, records in datastore_records.items() } @@ -716,7 +716,7 @@ def collect_and_transfer( for refs in grouped_refs.values(): butler.registry._importDatasets(refs) - butler.datastore.import_records(summary_records) + butler._datastore.import_records(summary_records) @classmethod def parse_raw(cls, *args: Any, **kwargs: Any) -> QuantumProvenanceData: diff --git a/python/lsst/daf/butler/script/queryDatasets.py b/python/lsst/daf/butler/script/queryDatasets.py index d78968aa87..0063e7adce 100644 --- a/python/lsst/daf/butler/script/queryDatasets.py +++ b/python/lsst/daf/butler/script/queryDatasets.py @@ -195,7 +195,7 @@ def getTables(self) -> list[AstropyTable]: tables[dataset_ref.datasetType.name].add(dataset_ref) else: d = list(self.datasets) - ref_uris = self.butler.datastore.getManyURIs(d, predict=True) + ref_uris = self.butler._datastore.getManyURIs(d, predict=True) for ref, uris in ref_uris.items(): if uris.primaryURI: tables[ref.datasetType.name].add(ref, uris.primaryURI) diff --git a/python/lsst/daf/butler/server.py b/python/lsst/daf/butler/server.py index 57e00b7576..e6996ac134 100644 --- a/python/lsst/daf/butler/server.py +++ b/python/lsst/daf/butler/server.py @@ -149,7 +149,7 @@ def get_uri(id: DatasetId, butler: Butler = Depends(butler_readonly_dependency)) if not ref: raise HTTPException(status_code=404, detail=f"Dataset with id {id} does not exist.") - uri = butler.datastore.getURI(ref) + uri = butler.getURI(ref) # In reality would have to convert this to a signed URL return str(uri) diff --git a/python/lsst/daf/butler/tests/_examplePythonTypes.py b/python/lsst/daf/butler/tests/_examplePythonTypes.py index 9a3ce9a287..c7599dac6d 100644 --- a/python/lsst/daf/butler/tests/_examplePythonTypes.py +++ b/python/lsst/daf/butler/tests/_examplePythonTypes.py @@ -141,7 +141,7 @@ class : `lsst.daf.butler.StorageClass` The newly created storage class, or the class of the same name previously found in the repository. """ - storageRegistry = butler.datastore.storageClassFactory + storageRegistry = butler._datastore.storageClassFactory storage = StorageClass(name, *args, **kwargs) try: @@ -149,7 +149,7 @@ class : `lsst.daf.butler.StorageClass` except ValueError: storage = storageRegistry.getStorageClass(name) - for registry in _getAllFormatterRegistries(butler.datastore): + for registry in _getAllFormatterRegistries(butler._datastore): registry.registerFormatter(storage, formatter) return storage diff --git a/python/lsst/daf/butler/tests/_testRepo.py b/python/lsst/daf/butler/tests/_testRepo.py index 0f69c20f84..bd38302c52 100644 --- a/python/lsst/daf/butler/tests/_testRepo.py +++ b/python/lsst/daf/butler/tests/_testRepo.py @@ -541,9 +541,9 @@ class DatastoreMock: @staticmethod def apply(butler: Butler) -> None: """Apply datastore mocks to a butler.""" - butler.datastore.export = DatastoreMock._mock_export # type: ignore - butler.datastore.get = DatastoreMock._mock_get # type: ignore - butler.datastore.ingest = MagicMock() # type: ignore + butler._datastore.export = DatastoreMock._mock_export # type: ignore + butler._datastore.get = DatastoreMock._mock_get # type: ignore + butler._datastore.ingest = MagicMock() # type: ignore @staticmethod def _mock_export( diff --git a/tests/test_butler.py b/tests/test_butler.py index 7e94633b8c..b6a5022c1f 100644 --- a/tests/test_butler.py +++ b/tests/test_butler.py @@ -312,7 +312,7 @@ def runPutGetTest(self, storageClass: StorageClass, datasetTypeName: str) -> But ) # Can the artifacts themselves be retrieved? - if not butler.datastore.isEphemeral: + if not butler._datastore.isEphemeral: root_uri = ResourcePath(self.root) for preserve_path in (True, False): @@ -341,7 +341,7 @@ def runPutGetTest(self, storageClass: StorageClass, datasetTypeName: str) -> But else: self.assertEqual(num_seps, 0) - primary_uri, secondary_uris = butler.datastore.getURIs(ref) + primary_uri, secondary_uris = butler.getURIs(ref) n_uris = len(secondary_uris) if primary_uri: n_uris += 1 @@ -591,7 +591,7 @@ def testConstructor(self) -> None: butler2 = Butler(butler=butler, collections=["other"]) self.assertEqual(butler2.collections, ("other",)) self.assertIsNone(butler2.run) - self.assertIs(butler.datastore, butler2.datastore) + self.assertIs(butler._datastore, butler2._datastore) # Test that we can use an environment variable to find this # repository. @@ -697,7 +697,7 @@ def testCompositePutGetVirtual(self) -> None: self.assertEqual(len(datasets), 1) uri, components = butler.getURIs(datasets[0]) - if butler.datastore.isEphemeral: + if butler._datastore.isEphemeral: # Never disassemble in-memory datastore self.assertIsInstance(uri, ResourcePath) self.assertFalse(components) @@ -715,7 +715,7 @@ def testCompositePutGetVirtual(self) -> None: dataId = {"instrument": "DummyCamComp", "visit": 424} uri, components = butler.getURIs(datasets[0].datasetType, dataId=dataId, predict=True) - if butler.datastore.isEphemeral: + if butler._datastore.isEphemeral: # Never disassembled self.assertIsInstance(uri, ResourcePath) self.assertFalse(components) @@ -949,7 +949,7 @@ def testIngest(self) -> None: # Check that the datastore recorded no file size. # Not all datastores can support this. try: - infos = butler.datastore.getStoredItemsInfo(datasets[0].refs[0]) # type: ignore[attr-defined] + infos = butler._datastore.getStoredItemsInfo(datasets[0].refs[0]) # type: ignore[attr-defined] self.assertEqual(infos[0].file_size, -1) except AttributeError: pass @@ -1161,7 +1161,7 @@ def testStringification(self) -> None: if self.registryStr is not None: self.assertIn(self.registryStr, butlerStr) - datastoreName = butler.datastore.name + datastoreName = butler._datastore.name if self.datastoreName is not None: for testStr in self.datastoreName: self.assertIn(testStr, datastoreName) @@ -1270,8 +1270,8 @@ def testPutTemplates(self) -> None: ) # Check the template based on dimensions - if hasattr(butler.datastore, "templates"): - butler.datastore.templates.validateTemplates([ref]) + if hasattr(butler._datastore, "templates"): + butler._datastore.templates.validateTemplates([ref]) # Put with extra data ID keys (physical_filter is an optional # dependency); should not change template (at least the way we're @@ -1285,8 +1285,8 @@ def testPutTemplates(self) -> None: ) # Check the template based on dimensions - if hasattr(butler.datastore, "templates"): - butler.datastore.templates.validateTemplates([ref]) + if hasattr(butler._datastore, "templates"): + butler._datastore.templates.validateTemplates([ref]) # Use a template that has a typo in dimension record metadata. # Easier to test with a butler that has a ref with records attached. @@ -1415,8 +1415,8 @@ def testRemoveRuns(self) -> None: butler.registry.getCollectionType(run1) with self.assertRaises(MissingCollectionError): butler.registry.getCollectionType(run2) - self.assertFalse(butler.datastore.exists(ref1)) - self.assertFalse(butler.datastore.exists(ref2)) + self.assertFalse(butler.stored(ref1)) + self.assertFalse(butler.stored(ref2)) # The ref we unstored should be gone according to the URI, but the # one we forgot should still be around. self.assertFalse(uri1.exists()) @@ -1467,8 +1467,8 @@ def testExportTransferCopy(self) -> None: datasets = list(exportButler.registry.queryDatasets(..., collections=...)) self.assertGreater(len(datasets), 0) uris = [exportButler.getURI(d) for d in datasets] - assert isinstance(exportButler.datastore, FileDatastore) - datastoreRoot = exportButler.datastore.root + assert isinstance(exportButler._datastore, FileDatastore) + datastoreRoot = exportButler._datastore.root pathsInStore = [uri.relative_to(datastoreRoot) for uri in uris] @@ -1491,7 +1491,7 @@ def testExportTransferCopy(self) -> None: def testPruneDatasets(self) -> None: storageClass = self.storageClassFactory.getStorageClass("StructuredDataNoComponents") butler = Butler(self.tmpConfigFile, writeable=True) - assert isinstance(butler.datastore, FileDatastore) + assert isinstance(butler._datastore, FileDatastore) # Load registry data with dimensions to hang datasets off of. registryDataDir = os.path.normpath(os.path.join(TESTDIR, "data", "registry")) butler.import_(filename=os.path.join(registryDataDir, "base.yaml")) @@ -1574,11 +1574,11 @@ def testPruneDatasets(self) -> None: butler.pruneDatasets([ref1], purge=False, unstore=True, disassociate=False) # File has been removed. - uri2 = butler.datastore.getURI(ref2) + uri2 = butler.getURI(ref2) uri2.remove() # Datastore has lost track. - butler.datastore.forget([ref3]) + butler._datastore.forget([ref3]) # First test with a standard butler. exists_many = butler._exists_many([ref0, ref1, ref2, ref3], full_check=True) @@ -1599,7 +1599,7 @@ def testPruneDatasets(self) -> None: self.assertEqual(butler.exists(ref, full_check=False), exists) # Test again with a trusting butler. - butler.datastore.trustGetRequest = True + butler._datastore.trustGetRequest = True exists_many = butler._exists_many([ref0, ref1, ref2, ref3], full_check=True) self.assertEqual(exists_many[ref0], DatasetExistence.UNRECOGNIZED) self.assertEqual(exists_many[ref1], DatasetExistence.RECORDED) @@ -1622,7 +1622,7 @@ def testPruneDatasets(self) -> None: self.assertEqual(exists, exists_many[ref2]) # Remove everything and start from scratch. - butler.datastore.trustGetRequest = False + butler._datastore.trustGetRequest = False butler.pruneDatasets(refs, purge=True, unstore=True) for ref in refs: butler.put(metric, ref) @@ -1631,32 +1631,32 @@ def testPruneDatasets(self) -> None: # datastore in an odd state. Do them at the end. # Check that in normal mode, deleting the record will lead to # trash not touching the file. - uri1 = butler.datastore.getURI(ref1) - butler.datastore.bridge.moveToTrash([ref1], transaction=None) # Update the dataset_location table - butler.datastore.forget([ref1]) - butler.datastore.trash(ref1) - butler.datastore.emptyTrash() + uri1 = butler.getURI(ref1) + butler._datastore.bridge.moveToTrash([ref1], transaction=None) # Update the dataset_location table + butler._datastore.forget([ref1]) + butler._datastore.trash(ref1) + butler._datastore.emptyTrash() self.assertTrue(uri1.exists()) uri1.remove() # Clean it up. # Simulate execution butler setup by deleting the datastore # record but keeping the file around and trusting. - butler.datastore.trustGetRequest = True - uri2 = butler.datastore.getURI(ref2) - uri3 = butler.datastore.getURI(ref3) + butler._datastore.trustGetRequest = True + uri2 = butler.getURI(ref2) + uri3 = butler.getURI(ref3) self.assertTrue(uri2.exists()) self.assertTrue(uri3.exists()) # Remove the datastore record. - butler.datastore.bridge.moveToTrash([ref2], transaction=None) # Update the dataset_location table - butler.datastore.forget([ref2]) + butler._datastore.bridge.moveToTrash([ref2], transaction=None) # Update the dataset_location table + butler._datastore.forget([ref2]) self.assertTrue(uri2.exists()) - butler.datastore.trash([ref2, ref3]) + butler._datastore.trash([ref2, ref3]) # Immediate removal for ref2 file self.assertFalse(uri2.exists()) # But ref3 has to wait for the empty. self.assertTrue(uri3.exists()) - butler.datastore.emptyTrash() + butler._datastore.emptyTrash() self.assertFalse(uri3.exists()) # Clear out the datasets from registry. @@ -2065,7 +2065,7 @@ def testTransferMissing(self) -> None: self.create_butlers() # Configure the source butler to allow trust. - self._enable_trust(self.source_butler.datastore) + self._enable_trust(self.source_butler._datastore) self.assertButlerTransfers(purge=True) @@ -2077,7 +2077,7 @@ def testTransferMissingDisassembly(self) -> None: self.create_butlers() # Configure the source butler to allow trust. - self._enable_trust(self.source_butler.datastore) + self._enable_trust(self.source_butler._datastore) # Test disassembly. self.assertButlerTransfers(purge=True, storageClassName="StructuredComposite") @@ -2204,10 +2204,10 @@ def assertButlerTransfers(self, purge: bool = False, storageClassName: str = "St # file out. # Access the individual datastores. datastores = [] - if hasattr(butler.datastore, "datastores"): - datastores.extend(butler.datastore.datastores) + if hasattr(butler._datastore, "datastores"): + datastores.extend(butler._datastore.datastores) else: - datastores.append(butler.datastore) + datastores.append(butler._datastore) if not deleted: # For a chained datastore we need to remove @@ -2295,16 +2295,16 @@ def assertButlerTransfers(self, purge: bool = False, storageClassName: str = "St # Also do an explicit low-level transfer to trigger some # edge cases. with self.assertLogs(level=logging.DEBUG) as log_cm: - self.target_butler.datastore.transfer_from(self.source_butler.datastore, source_refs) + self.target_butler._datastore.transfer_from(self.source_butler._datastore, source_refs) log_output = ";".join(log_cm.output) self.assertIn("no file artifacts exist", log_output) with self.assertRaises((TypeError, AttributeError)): - self.target_butler.datastore.transfer_from(self.source_butler, source_refs) # type: ignore + self.target_butler._datastore.transfer_from(self.source_butler, source_refs) # type: ignore with self.assertRaises(ValueError): - self.target_butler.datastore.transfer_from( - self.source_butler.datastore, source_refs, transfer="split" + self.target_butler._datastore.transfer_from( + self.source_butler._datastore, source_refs, transfer="split" ) # Now try to get the same refs from the new butler. diff --git a/tests/test_cliCmdQueryDatasets.py b/tests/test_cliCmdQueryDatasets.py index 948a6c63de..4d1a311c0d 100644 --- a/tests/test_cliCmdQueryDatasets.py +++ b/tests/test_cliCmdQueryDatasets.py @@ -160,7 +160,7 @@ def testChained(self): self.assertAstropyTablesEqual( tables, - expectedFilesystemDatastoreTables(testRepo.butler.datastore.datastores[1].root), + expectedFilesystemDatastoreTables(testRepo.butler._datastore.datastores[1].root), filterColumns=True, ) @@ -171,7 +171,7 @@ def testShowURI(self): tables = self._queryDatasets(repo=self.repoDir, show_uri=True) self.assertAstropyTablesEqual( - tables, expectedFilesystemDatastoreTables(testRepo.butler.datastore.root), filterColumns=True + tables, expectedFilesystemDatastoreTables(testRepo.butler._datastore.root), filterColumns=True ) def testNoShowURI(self): @@ -271,7 +271,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "foo/test_metric_comp.data/test_metric_comp_v00000424_fDummyCamComp_data.yaml" ), ), @@ -282,7 +282,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "423", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.data/" "test_metric_comp_v00000423_fDummyCamComp_data.yaml" ), @@ -294,7 +294,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.data/" "test_metric_comp_v00000424_fDummyCamComp_data.yaml" ), @@ -313,7 +313,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "foo/test_metric_comp.output/" "test_metric_comp_v00000424_fDummyCamComp_output.yaml" ), @@ -325,7 +325,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "423", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.output/" "test_metric_comp_v00000423_fDummyCamComp_output.yaml" ), @@ -337,7 +337,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.output/" "test_metric_comp_v00000424_fDummyCamComp_output.yaml" ), @@ -356,7 +356,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "foo/test_metric_comp.summary/" "test_metric_comp_v00000424_fDummyCamComp_summary.yaml" ), @@ -368,7 +368,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "423", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.summary/" "test_metric_comp_v00000423_fDummyCamComp_summary.yaml" ), @@ -380,7 +380,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.summary/" "test_metric_comp_v00000424_fDummyCamComp_summary.yaml" ), @@ -410,7 +410,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "foo/test_metric_comp.data/test_metric_comp_v00000424_fDummyCamComp_data.yaml" ), ), @@ -421,7 +421,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "423", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.data/" "test_metric_comp_v00000423_fDummyCamComp_data.yaml" ), @@ -440,7 +440,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "foo/test_metric_comp.output/" "test_metric_comp_v00000424_fDummyCamComp_output.yaml" ), @@ -452,7 +452,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "423", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.output/" "test_metric_comp_v00000423_fDummyCamComp_output.yaml" ), @@ -471,7 +471,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "424", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "foo/test_metric_comp.summary/" "test_metric_comp_v00000424_fDummyCamComp_summary.yaml" ), @@ -483,7 +483,7 @@ def testFindFirstAndCollections(self): "DummyCamComp", "d-r", "423", - testRepo.butler.datastore.root.join( + testRepo.butler._datastore.root.join( "ingest/run/test_metric_comp.summary/" "test_metric_comp_v00000423_fDummyCamComp_summary.yaml" ), diff --git a/tests/test_quantumBackedButler.py b/tests/test_quantumBackedButler.py index f2382d50b9..18c6c898e7 100644 --- a/tests/test_quantumBackedButler.py +++ b/tests/test_quantumBackedButler.py @@ -104,13 +104,13 @@ def tearDown(self) -> None: def make_quantum(self, step: int = 1) -> Quantum: """Make a Quantum which includes datastore records.""" if step == 1: - datastore_records = self.butler.datastore.export_records(self.all_input_refs) + datastore_records = self.butler._datastore.export_records(self.all_input_refs) predictedInputs = {self.datasetTypeInput: self.input_refs} outputs = {self.datasetTypeOutput: self.output_refs} initInputs = {self.datasetTypeInit: self.init_inputs_refs[0]} elif step == 2: # The result should be empty, this is just to test that it works. - datastore_records = self.butler.datastore.export_records(self.output_refs) + datastore_records = self.butler._datastore.export_records(self.output_refs) predictedInputs = {self.datasetTypeInput: self.output_refs} outputs = {self.datasetTypeOutput2: self.output_refs2} initInputs = {} @@ -155,7 +155,7 @@ def test_initialize_repo_index(self) -> None: def test_from_predicted(self) -> None: """Test for from_predicted factory method""" - datastore_records = self.butler.datastore.export_records(self.all_input_refs) + datastore_records = self.butler._datastore.export_records(self.all_input_refs) qbb = QuantumBackedButler.from_predicted( config=self.config, predicted_inputs=[ref.id for ref in self.all_input_refs],