diff --git a/abis_mapping/base/mapper.py b/abis_mapping/base/mapper.py index 194b8527..1001aeee 100644 --- a/abis_mapping/base/mapper.py +++ b/abis_mapping/base/mapper.py @@ -55,25 +55,133 @@ def apply_validation( frictionless.Report: Validation report for the data. """ - @abc.abstractmethod def apply_mapping( self, + *, data: base_types.ReadableType, - dataset_iri: Optional[rdflib.URIRef] = None, - base_iri: Optional[rdflib.Namespace] = None, + chunk_size: int | None, + dataset_iri: rdflib.URIRef | None = None, + base_iri: rdflib.Namespace | None = None, **kwargs: Any, ) -> Iterator[rdflib.Graph]: """Applies Mapping from Raw Data to ABIS conformant RDF. Args: - data (ReadableType): Readable raw data. - dataset_iri (Optional[rdflib.URIRef]): Optional dataset IRI. - base_iri (Optional[rdflib.Namespace]): Optional mapping base IRI. - **kwargs (Any): Additional keyword arguments. + data: Readable raw data. + chunk_size: Size of chunks to split raw data into. None to disabled chunking. + dataset_iri: Optional dataset IRI. + base_iri: Optional mapping base IRI. + **kwargs: Additional keyword arguments. Yields: rdflib.Graph: ABIS Conformant RDF Sub-Graph from Raw Data Chunk. """ + # Check chunk size + if chunk_size is not None and chunk_size <= 0: + raise ValueError("chunk_size must be greater than zero") + + # Construct Schema and extra fields schema + schema = self.extra_fields_schema( + data=data, + full_schema=True, + ) + extra_schema = self.extra_fields_schema( + data=data, + full_schema=False, + ) + + # Construct Resource + resource = frictionless.Resource( + source=data, + format="csv", # TODO -> Hardcoded to csv for now + schema=schema, + encoding="utf-8", + ) + + # Initialise Graph + graph = utils.rdf.create_graph() + graph_has_rows: bool = False + + # Check if Dataset IRI Supplied + if not dataset_iri: + # If not supplied, create example "default" Dataset IRI + dataset_iri = utils.rdf.uri(f"dataset/{self.DATASET_DEFAULT_NAME}", base_iri) + + # Add the default dataset + self.add_default_dataset( + uri=dataset_iri, + base_iri=base_iri, + graph=graph, + ) + + # Add per-chunk mapping for first chunk + self.apply_mapping_chunk(dataset=dataset_iri, graph=graph) + + # Open the Resource to allow row streaming + with resource.open() as r: + # Loop through rows + for row_num, row in enumerate(r.row_stream, start=1): + # Map row + self.apply_mapping_row( + row=row, + dataset=dataset_iri, + graph=graph, + extra_schema=extra_schema, + base_iri=base_iri, + **kwargs, + ) + graph_has_rows = True + + # yield chunk if required + if chunk_size is not None and row_num % chunk_size == 0: + yield graph + # Initialise New Graph for next chunk + graph = utils.rdf.create_graph() + graph_has_rows = False + self.apply_mapping_chunk(dataset=dataset_iri, graph=graph) + + # yield final chunk, or whole graph if not chunking. + if graph_has_rows or chunk_size is None: + yield graph + + def apply_mapping_chunk( + self, + *, + dataset: rdflib.URIRef, + graph: rdflib.Graph, + ) -> None: + """Applies mapping for RDF that should be present in every chunk. + + This method can be extended by subclasses, remember to call super()! + + Args: + dataset: The Dataset URI + graph: The graph for the chunk to add the mapping to. + """ + # This should be in every chunk, so the type of the dataset can be resolved. + graph.add((dataset, a, utils.namespaces.TERN.Dataset)) + + @abc.abstractmethod + def apply_mapping_row( + self, + *, + row: frictionless.Row, + dataset: rdflib.URIRef, + graph: rdflib.Graph, + extra_schema: frictionless.Schema, + base_iri: rdflib.Namespace | None, + **kwargs: Any, + ) -> None: + """Applies Mapping for a Row in the template by mutating the passed Graph. + + Args: + row: Row from the template to be processed. + dataset: Dataset URI. + graph: Graph to map row into. + extra_schema: Template schema including any extra fields. + base_iri: Optional base IRI namespace to use for mapping. + kwargs: Additional keyword arguments. + """ def add_default_dataset( self, diff --git a/abis_mapping/templates/incidental_occurrence_data_v3/mapping.py b/abis_mapping/templates/incidental_occurrence_data_v3/mapping.py index 2976da95..988ca330 100644 --- a/abis_mapping/templates/incidental_occurrence_data_v3/mapping.py +++ b/abis_mapping/templates/incidental_occurrence_data_v3/mapping.py @@ -15,7 +15,7 @@ from abis_mapping import vocabs # Typing -from typing import Iterator, Optional, Any +from typing import Any # Constants and Shortcuts @@ -141,104 +141,16 @@ def apply_validation( # Return Validation Report return report - def apply_mapping( - self, - data: base.types.ReadableType, - dataset_iri: Optional[rdflib.URIRef] = None, - base_iri: Optional[rdflib.Namespace] = None, - **kwargs: Any, - ) -> Iterator[rdflib.Graph]: - """Applies Mapping for the `incidental_occurrence_data.csv` Template - - Args: - data (base.types.ReadableType): Valid raw data to be mapped. - dataset_iri (Optional[rdflib.URIRef]): Optional dataset IRI. - base_iri (Optional[rdflib.Namespace]): Optional mapping base IRI. - - Keyword Args: - chunk_size (Optional[int]): How many rows of the original data to - ingest before yielding a graph. `None` will ingest all rows. - - Yields: - rdflib.Graph: ABIS Conformant RDF Sub-Graph from Raw Data Chunk. - """ - # Extract keyword arguments - chunk_size = kwargs.get("chunk_size") - if not isinstance(chunk_size, int) or chunk_size < 1: - chunk_size = None - - # Construct Schema - schema = self.extra_fields_schema( - data=data, - full_schema=True, - ) - extra_schema = self.extra_fields_schema( - data=data, - full_schema=False, - ) - - # Construct Resource - resource = frictionless.Resource( - source=data, - format="csv", # TODO -> Hardcoded to csv for now - schema=schema, - encoding="utf-8", - ) - - # Initialise Graph - graph = utils.rdf.create_graph() - - # Check if Dataset IRI Supplied - if dataset_iri: - # If supplied, add just the dataset type. - graph.add((dataset_iri, a, utils.namespaces.TERN.Dataset)) - else: - # If not supplied, create example "default" Dataset IRI - dataset_iri = utils.rdf.uri(f"dataset/{self.DATASET_DEFAULT_NAME}", base_iri) - - # Add Example Default Dataset if not Supplied - self.add_default_dataset( - uri=dataset_iri, - base_iri=base_iri, - graph=graph, - ) - - # Open the Resource to allow row streaming - with resource.open() as r: - # Loop through Rows - for row in r.row_stream: - # Map Row - self.apply_mapping_row( - row=row, - dataset=dataset_iri, - graph=graph, - extra_schema=extra_schema, - base_iri=base_iri, - ) - - # Check Whether to Yield a Chunk - # The row_number needs to be reduced by one as the numbering of rows - # in a Resource includes the header - if chunk_size is not None and (row.row_number - 1) % chunk_size == 0: - # Yield Chunk - yield graph - - # Initialise New Graph - graph = utils.rdf.create_graph() - # Every chunk should have this node - graph.add((dataset_iri, a, utils.namespaces.TERN.Dataset)) - - # Yield - yield graph - def apply_mapping_row( self, + *, row: frictionless.Row, dataset: rdflib.URIRef, graph: rdflib.Graph, extra_schema: frictionless.Schema, - base_iri: Optional[rdflib.Namespace] = None, - ) -> rdflib.Graph: + base_iri: rdflib.Namespace | None, + **kwargs: Any, + ) -> None: """Applies Mapping for a Row in the `incidental_occurrence_data.csv` Template Args: @@ -1166,9 +1078,6 @@ def apply_mapping_row( extra_schema=extra_schema, ) - # Return - return graph - def add_provider_identified( self, uri: rdflib.URIRef, diff --git a/abis_mapping/templates/survey_metadata_v2/mapping.py b/abis_mapping/templates/survey_metadata_v2/mapping.py index c0ad79a4..9d897f7a 100644 --- a/abis_mapping/templates/survey_metadata_v2/mapping.py +++ b/abis_mapping/templates/survey_metadata_v2/mapping.py @@ -15,7 +15,7 @@ from abis_mapping import utils # Typing -from typing import Optional, Iterator, Any +from typing import Any # Constants / shortcuts @@ -106,82 +106,15 @@ def apply_validation(self, data: base.types.ReadableType, **kwargs: Any) -> fric # Return validation report return report - def apply_mapping( - self, - data: base.types.ReadableType, - dataset_iri: Optional[rdflib.URIRef] = None, - base_iri: Optional[rdflib.Namespace] = None, - **kwargs: Any, - ) -> Iterator[rdflib.Graph]: - """Applies mapping for the `survey_metadata.csv` template. - - Args: - data (base.types.ReadableType): Valid raw data to be mapped. - dataset_iri (Optional[rdflib.URIRef]): Optional dataset IRI. - base_iri (Optional[rdflib.Namespace]): Optional mapping base IRI. - **kwargs (Any): Additional keyword arguments. - - Yields: - rdflib.Graph: ABIS conformant RDF sub-graph from raw data chunk. - """ - # Construct Schema - schema = self.extra_fields_schema( - data=data, - full_schema=True, - ) - extra_schema = self.extra_fields_schema( - data=data, - full_schema=False, - ) - - # Construct Resource - resource = frictionless.Resource( - source=data, - format="csv", # TODO -> Hardcoded to csv for now - schema=schema, - encoding="utf-8", - ) - - # Initialise Graph - graph = utils.rdf.create_graph() - - # Check if Dataset IRI Supplied - if dataset_iri: - # If supplied, add just the dataset type. - graph.add((dataset_iri, a, utils.namespaces.TERN.Dataset)) - else: - # If not supplied, create example "default" Dataset IRI - dataset_iri = utils.rdf.uri(f"dataset/{self.DATASET_DEFAULT_NAME}", base_iri) - - # Add the default dataset - self.add_default_dataset( - uri=dataset_iri, - base_iri=base_iri, - graph=graph, - ) - - # Open the Resource to allow row streaming - with resource.open() as r: - # Loop through rows - for row in r.row_stream: - # Map row - self.apply_mapping_row( - row=row, - dataset=dataset_iri, - graph=graph, - extra_schema=extra_schema, - base_iri=base_iri, - ) - - yield graph - def apply_mapping_row( self, + *, row: frictionless.Row, dataset: rdflib.URIRef, graph: rdflib.Graph, extra_schema: frictionless.Schema, - base_iri: Optional[rdflib.Namespace] = None, + base_iri: rdflib.Namespace | None, + **kwargs: Any, ) -> None: """Applies mapping for a row in the `survey_metadata.csv` template. diff --git a/abis_mapping/templates/survey_occurrence_data_v2/mapping.py b/abis_mapping/templates/survey_occurrence_data_v2/mapping.py index a58f7a73..b7115075 100644 --- a/abis_mapping/templates/survey_occurrence_data_v2/mapping.py +++ b/abis_mapping/templates/survey_occurrence_data_v2/mapping.py @@ -13,7 +13,7 @@ from abis_mapping import vocabs # Typing -from typing import Iterator, Optional, Any +from typing import Any # Constants and Shortcuts @@ -257,115 +257,16 @@ def extract_site_visit_id_keys( # Construct dictionary and return return {row["siteVisitID"]: True for row in r.row_stream if row["siteVisitID"]} - def apply_mapping( - self, - data: base.types.ReadableType, - dataset_iri: Optional[rdflib.URIRef] = None, - base_iri: Optional[rdflib.Namespace] = None, - **kwargs: Any, - ) -> Iterator[rdflib.Graph]: - """Applies Mapping for the `survey_occurrence_data.csv` Template - - Args: - data (base.types.ReadableType): Valid raw data to be mapped. - dataset_iri (Optional[rdflib.URIRef]): Optional dataset IRI. - base_iri (Optional[rdflib.Namespace]): Optional mapping base IRI. - - Keyword Args: - chunk_size (Optional[int]): How many rows of the original data to - ingest before yielding a graph. `None` will ingest all rows. - site_id_geometry_map (dict[str, str]): Default values of geometry wkt - to use for a given site id. - site_visit_id_temporal_map (dict[str, str]): Default values of - temporal entity rdf, as turtle, to use for a given site visit id. - - Yields: - rdflib.Graph: ABIS Conformant RDF Sub-Graph from Raw Data Chunk. - """ - # Extract keyword arguments - chunk_size = kwargs.get("chunk_size") - if not isinstance(chunk_size, int): - chunk_size = None - - site_id_geometry_map = kwargs.get("site_id_geometry_map") - site_visit_id_temporal_map = kwargs.get("site_visit_id_temporal_map") - - # Construct Schema - schema = self.extra_fields_schema( - data=data, - full_schema=True, - ) - extra_schema = self.extra_fields_schema( - data=data, - full_schema=False, - ) - - # Construct Resource - resource = frictionless.Resource( - source=data, - format="csv", # TODO -> Hardcoded to csv for now - schema=schema, - encoding="utf-8", - ) - - # Initialise Graph - graph = utils.rdf.create_graph() - - # Check if Dataset IRI Supplied - if dataset_iri: - # If supplied, add just the dataset type. - graph.add((dataset_iri, a, utils.namespaces.TERN.Dataset)) - else: - # If not supplied, create example "default" Dataset IRI - dataset_iri = utils.rdf.uri(f"dataset/{self.DATASET_DEFAULT_NAME}", base_iri) - - # Add Example Default Dataset if not Supplied - self.add_default_dataset( - uri=dataset_iri, - base_iri=base_iri, - graph=graph, - ) - - # Open the Resource to allow row streaming - with resource.open() as r: - # Loop through Rows - for row in r.row_stream: - # Map Row - self.apply_mapping_row( - row=row, - dataset=dataset_iri, - graph=graph, - extra_schema=extra_schema, - base_iri=base_iri, - site_id_geometry_map=site_id_geometry_map, - site_visit_id_temporal_map=site_visit_id_temporal_map, - ) - - # Check Whether to Yield a Chunk - # The row_number needs to be reduced by one as the numbering of rows - # in a Resource includes the header. - if chunk_size is not None and (row.row_number - 1) % chunk_size == 0: - # Yield Chunk - yield graph - - # Initialise New Graph - graph = utils.rdf.create_graph() - # Every chunk should have this node - graph.add((dataset_iri, a, utils.namespaces.TERN.Dataset)) - - # Yield - yield graph - def apply_mapping_row( self, + *, row: frictionless.Row, dataset: rdflib.URIRef, graph: rdflib.Graph, extra_schema: frictionless.Schema, - base_iri: Optional[rdflib.Namespace] = None, - site_id_geometry_map: dict[str, str] | None = None, - site_visit_id_temporal_map: dict[str, str] | None = None, - ) -> rdflib.Graph: + base_iri: rdflib.Namespace | None, + **kwargs: Any, + ) -> None: """Applies Mapping for a Row in the `survey_occurrence_data.csv` Template Args: @@ -383,6 +284,9 @@ def apply_mapping_row( Returns: rdflib.Graph: Graph with row mapped into it. """ + site_id_geometry_map = kwargs.get("site_id_geometry_map") + site_visit_id_temporal_map = kwargs.get("site_visit_id_temporal_map") + # Get values from row provider_record_id: str = row["providerRecordID"] provider_record_id_source: str = row["providerRecordIDSource"] @@ -1367,9 +1271,6 @@ def apply_mapping_row( extra_schema=extra_schema, ) - # Return - return graph - def add_provider_identified( self, uri: rdflib.URIRef, diff --git a/abis_mapping/templates/survey_site_data_v2/mapping.py b/abis_mapping/templates/survey_site_data_v2/mapping.py index 74fda456..42e5f7c1 100644 --- a/abis_mapping/templates/survey_site_data_v2/mapping.py +++ b/abis_mapping/templates/survey_site_data_v2/mapping.py @@ -19,7 +19,7 @@ from abis_mapping import vocabs # Typing -from typing import Any, Optional, Iterator, Literal +from typing import Any, Literal # Constants and shortcuts @@ -218,85 +218,15 @@ def extract_geometry_defaults( return result - def apply_mapping( - self, - data: base.types.ReadableType, - dataset_iri: Optional[rdflib.URIRef] = None, - base_iri: Optional[rdflib.Namespace] = None, - **kwargs: Any, - ) -> Iterator[rdflib.Graph]: - """Applies Mapping for the `survey_site_data.csv` Template. - - Args: - data (base.types.ReadableType): Valid raw data to be mapped. - dataset_iri (Optional[rdflib.URIRef]): Optional dataset IRI. - base_iri (Optional[rdflib.Namespace]): Optional mapping base IRI. - - Keyword Args: - chunk_size (Optional[int]): How many rows of the original data to - ingest before yielding a graph. `None` will ingest all rows. - - Yields: - rdflib.Graph: ABIS Conformant RDF Sub-Graph from Raw Data Chunk. - """ - # Construct Schema - schema = self.extra_fields_schema( - data=data, - full_schema=True, - ) - extra_schema = self.extra_fields_schema( - data=data, - full_schema=False, - ) - - # Construct Resource - resource = frictionless.Resource( - source=data, - format="csv", # TODO -> Hardcoded to csv for now - schema=schema, - encoding="utf-8", - ) - - # Initialise Graph - graph = utils.rdf.create_graph() - - # Check if Dataset IRI Supplied - if dataset_iri: - # If supplied, add just the dataset type. - graph.add((dataset_iri, a, utils.namespaces.TERN.Dataset)) - else: - # If not supplied, create example "default" Dataset IRI - dataset_iri = utils.rdf.uri(f"dataset/{self.DATASET_DEFAULT_NAME}", base_iri) - - # Add the default dataset - self.add_default_dataset( - uri=dataset_iri, - base_iri=base_iri, - graph=graph, - ) - - # Open the Resource to allow row streaming - with resource.open() as r: - # Loop through rows - for row in r.row_stream: - # Map row - self.apply_mapping_row( - row=row, - dataset=dataset_iri, - graph=graph, - extra_schema=extra_schema, - base_iri=base_iri, - ) - - yield graph - def apply_mapping_row( self, + *, row: frictionless.Row, dataset: rdflib.URIRef, graph: rdflib.Graph, extra_schema: frictionless.Schema, - base_iri: Optional[rdflib.Namespace], + base_iri: rdflib.Namespace | None, + **kwargs: Any, ) -> None: """Applies mapping for a row in the `survey_site_data.csv` template. diff --git a/abis_mapping/templates/survey_site_visit_data_v2/mapping.py b/abis_mapping/templates/survey_site_visit_data_v2/mapping.py index c4534a45..f27f2af6 100644 --- a/abis_mapping/templates/survey_site_visit_data_v2/mapping.py +++ b/abis_mapping/templates/survey_site_visit_data_v2/mapping.py @@ -14,7 +14,7 @@ from abis_mapping import utils # Typing -from typing import Any, Iterator +from typing import Any # Constants / shortcuts @@ -203,105 +203,15 @@ def add_temporal_coverage_bnode( graph.add((end, a, rdflib.TIME.Instant)) graph.add((end, end_date.rdf_in_xsd, end_date.to_rdf_literal())) - def apply_mapping( - self, - data: base.types.ReadableType, - dataset_iri: rdflib.URIRef | None = None, - base_iri: rdflib.Namespace | None = None, - **kwargs: Any, - ) -> Iterator[rdflib.Graph]: - """Applies Mapping from Raw Data to ABIS conformant RDF. - - Args: - data (ReadableType): Readable raw data. - dataset_iri (Optional[rdflib.URIRef]): Optional dataset IRI. - base_iri (Optional[rdflib.Namespace]): Optional mapping base IRI. - **kwargs (Any): Additional keyword arguments. - - Keyword Args: - chunk_size (Optional[int]): How many rows of the original data to - ingest before yielding a graph. `None` will ingest all rows. - - Yields: - rdflib.Graph: ABIS Conformant RDF Sub-Graph from Raw Data Chunk. - """ - # Extract keyword arguments - chunk_size: int | None = kwargs.get("chunk_size") - if not isinstance(chunk_size, int) or chunk_size <= 0: - chunk_size = None - - # Construct Schema - schema = self.extra_fields_schema( - data=data, - full_schema=True, - ) - extra_schema = self.extra_fields_schema( - data=data, - full_schema=False, - ) - - # Construct Resource - resource = frictionless.Resource( - source=data, - format="csv", # TODO -> Hardcoded to csv for now - schema=schema, - encoding="utf-8", - ) - - # Initialise Graph - graph = utils.rdf.create_graph() - graph_has_data: bool = False - - # Check if Dataset IRI Supplied - if dataset_iri: - # If supplied, add just the dataset type. - graph.add((dataset_iri, a, utils.namespaces.TERN.Dataset)) - else: - # If not supplied, create example "default" Dataset IRI - dataset_iri = utils.rdf.uri(f"dataset/{self.DATASET_DEFAULT_NAME}", base_iri) - - # Add the default dataset - self.add_default_dataset( - uri=dataset_iri, - base_iri=base_iri, - graph=graph, - ) - graph_has_data = True - - # Open the Resource to allow row streaming - with resource.open() as r: - # Loop through rows - for i, row in enumerate(r.row_stream, start=1): - # Map row - self.apply_mapping_row( - row=row, - dataset=dataset_iri, - graph=graph, - extra_schema=extra_schema, - base_iri=base_iri, - ) - graph_has_data = True - - # yield chunk if required - if chunk_size is not None and i % chunk_size == 0: - yield graph - # Initialise New Graph for next chunk - graph = utils.rdf.create_graph() - graph_has_data = False - # Every chunk should have this node - graph.add((dataset_iri, a, utils.namespaces.TERN.Dataset)) - - # yield final chunk, or whole graph if not chunking. - if chunk_size is None or graph_has_data: - yield graph - def apply_mapping_row( self, + *, row: frictionless.Row, dataset: rdflib.URIRef, graph: rdflib.Graph, extra_schema: frictionless.Schema, base_iri: rdflib.Namespace | None, + **kwargs: Any, ) -> None: """Applies mapping for a row in the Survey Site Visit Data template. diff --git a/scripts/generate_example_ttl_files.py b/scripts/generate_example_ttl_files.py index 74dcfa82..5bf91072 100644 --- a/scripts/generate_example_ttl_files.py +++ b/scripts/generate_example_ttl_files.py @@ -80,7 +80,7 @@ def main() -> None: raise RuntimeError(f"Mapper not found for {template_id}") # Map data data = input_csv_file_path.read_bytes() - graphs = list(mapper().apply_mapping(data)) + graphs = list(mapper().apply_mapping(data=data, chunk_size=None)) if len(graphs) != 1: raise RuntimeError("apply_mapping did not produce exactly 1 graph") # Write to output file diff --git a/tests/base/test_mapper.py b/tests/base/test_mapper.py index 72340c69..543ea1b9 100644 --- a/tests/base/test_mapper.py +++ b/tests/base/test_mapper.py @@ -18,7 +18,7 @@ from abis_mapping import utils # Typing -from typing import Any, Optional, Iterator +from typing import Any from abis_mapping.base import types as base_types @@ -35,14 +35,17 @@ def close(self) -> None: class StubMapper(base.mapper.ABISMapper): - def apply_mapping( + def apply_mapping_row( self, - data: base_types.ReadableType, - dataset_iri: Optional[rdflib.URIRef] = None, - base_iri: Optional[rdflib.Namespace] = None, + *, + row: frictionless.Row, + dataset: rdflib.URIRef, + graph: rdflib.Graph, + extra_schema: frictionless.Schema, + base_iri: rdflib.Namespace | None, **kwargs: Any, - ) -> Iterator[rdflib.Graph]: - yield from [] + ) -> None: + pass def apply_validation( # type: ignore[empty-body] self, data: base_types.ReadableType, **kwargs: Any diff --git a/tests/templates/test_mapping.py b/tests/templates/test_mapping.py index faa2139a..bab054b8 100644 --- a/tests/templates/test_mapping.py +++ b/tests/templates/test_mapping.py @@ -34,7 +34,7 @@ def test_apply_mapping(template_id: str, test_params: conftest.MappingParameters assert mapper # Map - graphs = list(mapper().apply_mapping(data)) + graphs = list(mapper().apply_mapping(data=data, chunk_size=None)) # Assert assert len(graphs) == 1 @@ -81,7 +81,7 @@ def test_against_shacl(template_id: str, test_params: conftest.MappingParameters assert mapper # Map - graphs = list(mapper().apply_mapping(data)) + graphs = list(mapper().apply_mapping(data=data, chunk_size=None)) # Assert assert len(graphs) == 1 @@ -120,7 +120,7 @@ def test_apply_mapping_chunking(template_id: str, test_params: conftest.Chunking assert mapper # Map - graphs = list(mapper().apply_mapping(data, chunk_size=test_params.chunk_size)) + graphs = list(mapper().apply_mapping(data=data, chunk_size=test_params.chunk_size)) # Assert assert len(graphs) == test_params.yield_count diff --git a/tests/templates/test_survey_occurrence_data_v2.py b/tests/templates/test_survey_occurrence_data_v2.py index dbb46614..48a9033f 100644 --- a/tests/templates/test_survey_occurrence_data_v2.py +++ b/tests/templates/test_survey_occurrence_data_v2.py @@ -284,7 +284,7 @@ def test_apply_mapping(self, mapper: Mapper) -> None: ).read_text() # Resulting graph doesn't match expected when no lat/long provided - graphs = list(mapper.apply_mapping(csv_data)) + graphs = list(mapper.apply_mapping(data=csv_data, chunk_size=None)) assert len(graphs) == 1 assert not conftest.compare_graphs(graphs[0], expected) @@ -301,6 +301,7 @@ def test_apply_mapping(self, mapper: Mapper) -> None: graphs = list( mapper.apply_mapping( data=csv_data, + chunk_size=None, site_id_geometry_map=default_map, ) ) @@ -422,6 +423,7 @@ def test_apply_mapping(self, mapper: Mapper) -> None: # Invoke graphs = mapper.apply_mapping( data=df.to_csv(index=False).encode("utf-8"), + chunk_size=None, site_visit_id_temporal_map=site_visit_id_temporal_map, ) res_g = next(graphs)