From ece6d830a6c671224b61c3a44baf9253e376c592 Mon Sep 17 00:00:00 2001 From: Andrew Sikowitz Date: Tue, 3 Oct 2023 00:12:24 -0400 Subject: [PATCH 1/2] test(ingest/unity): Add Unity Catalog memory performance testing --- .../ingestion/source/unity/proxy_types.py | 1 - .../tests/performance/bigquery/__init__.py | 0 .../bigquery_events.py} | 0 .../{ => bigquery}/test_bigquery_usage.py | 4 +- .../tests/performance/data_generation.py | 53 +++++- .../tests/performance/data_model.py | 31 ++- .../tests/performance/databricks/__init__.py | 0 .../performance/databricks/test_unity.py | 87 +++++++++ .../databricks/unity_proxy_mock.py | 180 ++++++++++++++++++ .../tests/unit/test_bigquery_usage.py | 7 +- 10 files changed, 347 insertions(+), 16 deletions(-) create mode 100644 metadata-ingestion/tests/performance/bigquery/__init__.py rename metadata-ingestion/tests/performance/{bigquery.py => bigquery/bigquery_events.py} (100%) rename metadata-ingestion/tests/performance/{ => bigquery}/test_bigquery_usage.py (96%) create mode 100644 metadata-ingestion/tests/performance/databricks/__init__.py create mode 100644 metadata-ingestion/tests/performance/databricks/test_unity.py create mode 100644 metadata-ingestion/tests/performance/databricks/unity_proxy_mock.py diff --git a/metadata-ingestion/src/datahub/ingestion/source/unity/proxy_types.py b/metadata-ingestion/src/datahub/ingestion/source/unity/proxy_types.py index 2b943d8c98e7d..b7a175456d000 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/unity/proxy_types.py +++ b/metadata-ingestion/src/datahub/ingestion/source/unity/proxy_types.py @@ -154,7 +154,6 @@ class Table(CommonProperty): columns: List[Column] storage_location: Optional[str] data_source_format: Optional[DataSourceFormat] - comment: Optional[str] table_type: TableType owner: Optional[str] generation: Optional[int] diff --git a/metadata-ingestion/tests/performance/bigquery/__init__.py b/metadata-ingestion/tests/performance/bigquery/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/metadata-ingestion/tests/performance/bigquery.py b/metadata-ingestion/tests/performance/bigquery/bigquery_events.py similarity index 100% rename from metadata-ingestion/tests/performance/bigquery.py rename to metadata-ingestion/tests/performance/bigquery/bigquery_events.py diff --git a/metadata-ingestion/tests/performance/test_bigquery_usage.py b/metadata-ingestion/tests/performance/bigquery/test_bigquery_usage.py similarity index 96% rename from metadata-ingestion/tests/performance/test_bigquery_usage.py rename to metadata-ingestion/tests/performance/bigquery/test_bigquery_usage.py index 7e05ef070b45d..d9b684dd31158 100644 --- a/metadata-ingestion/tests/performance/test_bigquery_usage.py +++ b/metadata-ingestion/tests/performance/bigquery/test_bigquery_usage.py @@ -16,7 +16,7 @@ from datahub.ingestion.source.bigquery_v2.bigquery_report import BigQueryV2Report from datahub.ingestion.source.bigquery_v2.usage import BigQueryUsageExtractor from datahub.utilities.perf_timer import PerfTimer -from tests.performance.bigquery import generate_events, ref_from_table +from tests.performance.bigquery.bigquery_events import generate_events, ref_from_table from tests.performance.data_generation import ( NormalDistribution, generate_data, @@ -33,7 +33,7 @@ def run_test(): num_views=2000, time_range=timedelta(days=7), ) - all_tables = seed_metadata.tables + seed_metadata.views + all_tables = seed_metadata.all_tables config = BigQueryV2Config( start_time=seed_metadata.start_time, diff --git a/metadata-ingestion/tests/performance/data_generation.py b/metadata-ingestion/tests/performance/data_generation.py index c530848f27f5c..67b156896909a 100644 --- a/metadata-ingestion/tests/performance/data_generation.py +++ b/metadata-ingestion/tests/performance/data_generation.py @@ -11,11 +11,14 @@ import uuid from dataclasses import dataclass from datetime import datetime, timedelta, timezone -from typing import Iterable, List, TypeVar +from typing import Iterable, List, TypeVar, Union, cast from faker import Faker from tests.performance.data_model import ( + Column, + ColumnMapping, + ColumnType, Container, FieldAccess, Query, @@ -52,15 +55,21 @@ def sample_with_floor(self, floor: int = 1) -> int: @dataclass class SeedMetadata: - containers: List[Container] + # Each list is a layer of containers, e.g. [[databases], [schemas]] + containers: List[List[Container]] + tables: List[Table] views: List[View] start_time: datetime end_time: datetime + @property + def all_tables(self) -> List[Table]: + return self.tables + cast(List[Table], self.views) + def generate_data( - num_containers: int, + num_containers: Union[List[int], int], num_tables: int, num_views: int, columns_per_table: NormalDistribution = NormalDistribution(5, 2), @@ -68,32 +77,52 @@ def generate_data( view_definition_length: NormalDistribution = NormalDistribution(150, 50), time_range: timedelta = timedelta(days=14), ) -> SeedMetadata: - containers = [Container(f"container-{i}") for i in range(num_containers)] + # Assemble containers + if isinstance(num_containers, int): + num_containers = [num_containers] + + containers: List[List[Container]] = [] + for i, num_in_layer in enumerate(num_containers): + layer = [ + Container( + f"{i}-container-{j}", + parent=random.choice(containers[-1]) if containers else None, + ) + for j in range(num_in_layer) + ] + containers.append(layer) + + # Assemble tables tables = [ Table( f"table-{i}", - container=random.choice(containers), + container=random.choice(containers[-1]), columns=[ f"column-{j}-{uuid.uuid4()}" for j in range(columns_per_table.sample_with_floor()) ], + column_mapping=None, ) for i in range(num_tables) ] views = [ View( f"view-{i}", - container=random.choice(containers), + container=random.choice(containers[-1]), columns=[ f"column-{j}-{uuid.uuid4()}" for j in range(columns_per_table.sample_with_floor()) ], + column_mapping=None, definition=f"{uuid.uuid4()}-{'*' * view_definition_length.sample_with_floor(10)}", parents=random.sample(tables, parents_per_view.sample_with_floor()), ) for i in range(num_views) ] + for table in tables + views: + _generate_column_mapping(table) + now = datetime.now(tz=timezone.utc) return SeedMetadata( containers=containers, @@ -162,6 +191,18 @@ def generate_queries( ) +def _generate_column_mapping(table: Table) -> ColumnMapping: + d = {} + for column in table.columns: + d[column] = Column( + name=column, + type=random.choice(list(ColumnType)), + nullable=random.random() < 0.1, # Fixed 10% chance for now + ) + table.column_mapping = d + return d + + def _sample_list(lst: List[T], dist: NormalDistribution, floor: int = 1) -> List[T]: return random.sample(lst, min(dist.sample_with_floor(floor), len(lst))) diff --git a/metadata-ingestion/tests/performance/data_model.py b/metadata-ingestion/tests/performance/data_model.py index c593e69ceb9a7..9425fa827070e 100644 --- a/metadata-ingestion/tests/performance/data_model.py +++ b/metadata-ingestion/tests/performance/data_model.py @@ -1,10 +1,10 @@ from dataclasses import dataclass from datetime import datetime -from typing import List, Optional +from enum import Enum +from typing import Dict, List, Optional from typing_extensions import Literal -Column = str StatementType = Literal[ # SELECT + values from OperationTypeClass "SELECT", "INSERT", @@ -21,13 +21,36 @@ @dataclass class Container: name: str + parent: Optional["Container"] = None + + +class ColumnType(str, Enum): + # Can add types that take parameters in the future + + INTEGER = "INTEGER" + FLOAT = "FLOAT" # Double precision (64 bit) + STRING = "STRING" + BOOLEAN = "BOOLEAN" + DATETIME = "DATETIME" + + +@dataclass +class Column: + name: str + type: ColumnType + nullable: bool + + +ColumnRef = str +ColumnMapping = Dict[ColumnRef, Column] @dataclass class Table: name: str container: Container - columns: List[Column] + columns: List[ColumnRef] + column_mapping: Optional[ColumnMapping] def is_view(self) -> bool: return False @@ -44,7 +67,7 @@ def is_view(self) -> bool: @dataclass class FieldAccess: - column: Column + column: ColumnRef table: Table diff --git a/metadata-ingestion/tests/performance/databricks/__init__.py b/metadata-ingestion/tests/performance/databricks/__init__.py new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/metadata-ingestion/tests/performance/databricks/test_unity.py b/metadata-ingestion/tests/performance/databricks/test_unity.py new file mode 100644 index 0000000000000..f4e514248d396 --- /dev/null +++ b/metadata-ingestion/tests/performance/databricks/test_unity.py @@ -0,0 +1,87 @@ +import logging +import os +from typing import Iterable, Tuple +from unittest.mock import patch + +import humanfriendly +import psutil +from performance.databricks.unity_proxy_mock import UnityCatalogApiProxyMock + +from datahub.ingestion.api.common import PipelineContext +from datahub.ingestion.api.workunit import MetadataWorkUnit +from datahub.ingestion.source.unity.config import UnityCatalogSourceConfig +from datahub.ingestion.source.unity.source import UnityCatalogSource +from datahub.utilities.perf_timer import PerfTimer +from tests.performance.data_generation import ( + NormalDistribution, + generate_data, + generate_queries, +) + + +def run_test(): + seed_metadata = generate_data( + num_containers=[1, 100, 5000], + num_tables=50000, + num_views=10000, + columns_per_table=NormalDistribution(100, 50), + parents_per_view=NormalDistribution(5, 5), + view_definition_length=NormalDistribution(1000, 300), + ) + queries = generate_queries( + seed_metadata, + num_selects=100000, + num_operations=100000, + num_unique_queries=10000, + num_users=1000, + ) + proxy_mock = UnityCatalogApiProxyMock( + seed_metadata, queries=queries, num_service_principals=10000 + ) + print("Data generated") + + config = UnityCatalogSourceConfig( + token="", workspace_url="http://localhost:1234", include_usage_statistics=False + ) + ctx = PipelineContext(run_id="test") + with patch( + "datahub.ingestion.source.unity.source.UnityCatalogApiProxy", + lambda *args, **kwargs: proxy_mock, + ): + source: UnityCatalogSource = UnityCatalogSource(ctx, config) + + pre_mem_usage = psutil.Process(os.getpid()).memory_info().rss + print(f"Test data size: {humanfriendly.format_size(pre_mem_usage)}") + + with PerfTimer() as timer: + workunits = source.get_workunits() + num_workunits, peak_memory_usage = workunit_sink(workunits) + print(f"Workunits Generated: {num_workunits}") + print(f"Seconds Elapsed: {timer.elapsed_seconds():.2f} seconds") + + print( + f"Peak Memory Used: {humanfriendly.format_size(peak_memory_usage - pre_mem_usage)}" + ) + print(source.report.aspects) + + +def workunit_sink(workunits: Iterable[MetadataWorkUnit]) -> Tuple[int, int]: + peak_memory_usage = psutil.Process(os.getpid()).memory_info().rss + i: int = 0 + for i, wu in enumerate(workunits): + if i % 10_000 == 0: + peak_memory_usage = max( + peak_memory_usage, psutil.Process(os.getpid()).memory_info().rss + ) + peak_memory_usage = max( + peak_memory_usage, psutil.Process(os.getpid()).memory_info().rss + ) + + return i, peak_memory_usage + + +if __name__ == "__main__": + root_logger = logging.getLogger() + root_logger.setLevel(logging.INFO) + root_logger.addHandler(logging.StreamHandler()) + run_test() diff --git a/metadata-ingestion/tests/performance/databricks/unity_proxy_mock.py b/metadata-ingestion/tests/performance/databricks/unity_proxy_mock.py new file mode 100644 index 0000000000000..31acef1398903 --- /dev/null +++ b/metadata-ingestion/tests/performance/databricks/unity_proxy_mock.py @@ -0,0 +1,180 @@ +import uuid +from collections import defaultdict +from datetime import datetime, timezone +from typing import Iterable + +from databricks.sdk.service.catalog import ColumnTypeName +from databricks.sdk.service.sql import QueryStatementType +from performance import data_model +from performance.data_generation import SeedMetadata +from performance.data_model import ColumnType, StatementType + +from datahub.ingestion.source.unity.proxy_types import ( + Catalog, + CatalogType, + Column, + Metastore, + Query, + Schema, + ServicePrincipal, + Table, + TableType, +) + + +class UnityCatalogApiProxyMock: + """Mimics UnityCatalogApiProxy for performance testing.""" + + def __init__( + self, + seed_metadata: SeedMetadata, + queries: Iterable[data_model.Query] = (), + num_service_principals: int = 0, + ) -> None: + self.seed_metadata = seed_metadata + self.queries = queries + self.num_service_principals = num_service_principals + self.warehouse_id = "invalid-warehouse-id" + self._schema_to_table = defaultdict(list) # Cache for performance + for table in seed_metadata.all_tables: + self._schema_to_table[table.container.name].append(table) + + def check_basic_connectivity(self) -> bool: + return True + + def assigned_metastore(self) -> Metastore: + container = self.seed_metadata.containers[0][0] + return Metastore( + id=container.name, + name=container.name, + global_metastore_id=container.name, + metastore_id=container.name, + comment=None, + owner=None, + cloud=None, + region=None, + ) + + def catalogs(self, metastore: Metastore) -> Iterable[Catalog]: + for container in self.seed_metadata.containers[1]: + if metastore.name != container.parent.name: + continue + + yield Catalog( + id=f"{metastore.id}.{container.name}", + name=container.name, + metastore=metastore, + comment=None, + owner=None, + type=CatalogType.MANAGED_CATALOG, + ) + + def schemas(self, catalog: Catalog) -> Iterable[Schema]: + for container in self.seed_metadata.containers[2]: + # Assumes all catalog names are unique + if catalog.name != container.parent.name: + continue + + yield Schema( + id=f"{catalog.id}.{container.name}", + name=container.name, + catalog=catalog, + comment=None, + owner=None, + ) + + def tables(self, schema: Schema) -> Iterable[Table]: + for table in self._schema_to_table[schema.name]: + columns = [] + for i, col_name in enumerate(table.columns): + column = table.column_mapping[col_name] + columns.append( + Column( + id=column.name, + name=column.name, + type_name=self._convert_column_type(column.type), + type_text=column.type.value, + nullable=column.nullable, + position=i, + comment=None, + type_precision=0, + type_scale=0, + ) + ) + + yield Table( + id=f"{schema.id}.{table.name}", + name=table.name, + schema=schema, + table_type=TableType.VIEW if table.is_view() else TableType.MANAGED, + columns=columns, + created_at=datetime.now(tz=timezone.utc), + comment=None, + owner=None, + storage_location=None, + data_source_format=None, + generation=None, + created_by="", + updated_at=None, + updated_by=None, + table_id="", + view_definition=table.definition + if isinstance(table, data_model.View) + else None, + properties={}, + ) + + def service_principals(self) -> Iterable[ServicePrincipal]: + for i in range(self.num_service_principals): + yield ServicePrincipal( + id=str(i), + application_id=str(uuid.uuid4()), + display_name=f"user-{i}", + active=True, + ) + + def query_history( + self, + start_time: datetime, + end_time: datetime, + ) -> Iterable[Query]: + for i, query in enumerate(self.queries): + yield Query( + query_id=str(i), + query_text=query.text, + statement_type=self._convert_statement_type(query.type), + start_time=query.timestamp, + end_time=query.timestamp, + user_id=hash(query.actor), + user_name=query.actor, + executed_as_user_id=hash(query.actor), + executed_as_user_name=None, + ) + + def table_lineage(self, table: Table) -> None: + pass + + def get_column_lineage(self, table: Table) -> None: + pass + + @staticmethod + def _convert_column_type(t: ColumnType) -> ColumnTypeName: + if t == ColumnType.INTEGER: + return ColumnTypeName.INT + elif t == ColumnType.FLOAT: + return ColumnTypeName.DOUBLE + elif t == ColumnType.STRING: + return ColumnTypeName.STRING + elif t == ColumnType.BOOLEAN: + return ColumnTypeName.BOOLEAN + elif t == ColumnType.DATETIME: + return ColumnTypeName.TIMESTAMP + else: + raise ValueError(f"Unknown column type: {t}") + + @staticmethod + def _convert_statement_type(t: StatementType) -> QueryStatementType: + if t == "CUSTOM" or t == "UNKNOWN": + return QueryStatementType.OTHER + else: + return QueryStatementType[t] diff --git a/metadata-ingestion/tests/unit/test_bigquery_usage.py b/metadata-ingestion/tests/unit/test_bigquery_usage.py index e06c6fb3fe7e5..1eb5d8b00e27c 100644 --- a/metadata-ingestion/tests/unit/test_bigquery_usage.py +++ b/metadata-ingestion/tests/unit/test_bigquery_usage.py @@ -35,7 +35,7 @@ TimeWindowSizeClass, ) from datahub.testing.compare_metadata_json import diff_metadata_json -from tests.performance.bigquery import generate_events, ref_from_table +from tests.performance.bigquery.bigquery_events import generate_events, ref_from_table from tests.performance.data_generation import generate_data, generate_queries from tests.performance.data_model import Container, FieldAccess, Query, Table, View @@ -45,14 +45,15 @@ ACTOR_2, ACTOR_2_URN = "b@acryl.io", "urn:li:corpuser:b" DATABASE_1 = Container("database_1") DATABASE_2 = Container("database_2") -TABLE_1 = Table("table_1", DATABASE_1, ["id", "name", "age"]) -TABLE_2 = Table("table_2", DATABASE_1, ["id", "table_1_id", "value"]) +TABLE_1 = Table("table_1", DATABASE_1, ["id", "name", "age"], None) +TABLE_2 = Table("table_2", DATABASE_1, ["id", "table_1_id", "value"], None) VIEW_1 = View( name="view_1", container=DATABASE_1, columns=["id", "name", "total"], definition="VIEW DEFINITION 1", parents=[TABLE_1, TABLE_2], + column_mapping=None, ) ALL_TABLES = [TABLE_1, TABLE_2, VIEW_1] From 5c3f0c74b1615e27c2a2870c566fe9af239532a1 Mon Sep 17 00:00:00 2001 From: Andrew Sikowitz Date: Wed, 4 Oct 2023 03:00:43 -0400 Subject: [PATCH 2/2] pr feedback --- .../bigquery/test_bigquery_usage.py | 18 +------- .../performance/databricks/test_unity.py | 20 +-------- .../databricks/unity_proxy_mock.py | 45 ++++++++++--------- .../tests/performance/helpers.py | 21 +++++++++ 4 files changed, 48 insertions(+), 56 deletions(-) create mode 100644 metadata-ingestion/tests/performance/helpers.py diff --git a/metadata-ingestion/tests/performance/bigquery/test_bigquery_usage.py b/metadata-ingestion/tests/performance/bigquery/test_bigquery_usage.py index d9b684dd31158..bbc3378450bff 100644 --- a/metadata-ingestion/tests/performance/bigquery/test_bigquery_usage.py +++ b/metadata-ingestion/tests/performance/bigquery/test_bigquery_usage.py @@ -2,13 +2,11 @@ import os import random from datetime import timedelta -from typing import Iterable, Tuple import humanfriendly import psutil from datahub.emitter.mce_builder import make_dataset_urn -from datahub.ingestion.api.workunit import MetadataWorkUnit from datahub.ingestion.source.bigquery_v2.bigquery_config import ( BigQueryUsageConfig, BigQueryV2Config, @@ -22,6 +20,7 @@ generate_data, generate_queries, ) +from tests.performance.helpers import workunit_sink def run_test(): @@ -88,21 +87,6 @@ def run_test(): print(f"Hash collisions: {report.num_usage_query_hash_collisions}") -def workunit_sink(workunits: Iterable[MetadataWorkUnit]) -> Tuple[int, int]: - peak_memory_usage = psutil.Process(os.getpid()).memory_info().rss - i: int = 0 - for i, wu in enumerate(workunits): - if i % 10_000 == 0: - peak_memory_usage = max( - peak_memory_usage, psutil.Process(os.getpid()).memory_info().rss - ) - peak_memory_usage = max( - peak_memory_usage, psutil.Process(os.getpid()).memory_info().rss - ) - - return i, peak_memory_usage - - if __name__ == "__main__": root_logger = logging.getLogger() root_logger.setLevel(logging.INFO) diff --git a/metadata-ingestion/tests/performance/databricks/test_unity.py b/metadata-ingestion/tests/performance/databricks/test_unity.py index f4e514248d396..cc9558f0692ed 100644 --- a/metadata-ingestion/tests/performance/databricks/test_unity.py +++ b/metadata-ingestion/tests/performance/databricks/test_unity.py @@ -1,14 +1,11 @@ import logging import os -from typing import Iterable, Tuple from unittest.mock import patch import humanfriendly import psutil -from performance.databricks.unity_proxy_mock import UnityCatalogApiProxyMock from datahub.ingestion.api.common import PipelineContext -from datahub.ingestion.api.workunit import MetadataWorkUnit from datahub.ingestion.source.unity.config import UnityCatalogSourceConfig from datahub.ingestion.source.unity.source import UnityCatalogSource from datahub.utilities.perf_timer import PerfTimer @@ -17,6 +14,8 @@ generate_data, generate_queries, ) +from tests.performance.databricks.unity_proxy_mock import UnityCatalogApiProxyMock +from tests.performance.helpers import workunit_sink def run_test(): @@ -65,21 +64,6 @@ def run_test(): print(source.report.aspects) -def workunit_sink(workunits: Iterable[MetadataWorkUnit]) -> Tuple[int, int]: - peak_memory_usage = psutil.Process(os.getpid()).memory_info().rss - i: int = 0 - for i, wu in enumerate(workunits): - if i % 10_000 == 0: - peak_memory_usage = max( - peak_memory_usage, psutil.Process(os.getpid()).memory_info().rss - ) - peak_memory_usage = max( - peak_memory_usage, psutil.Process(os.getpid()).memory_info().rss - ) - - return i, peak_memory_usage - - if __name__ == "__main__": root_logger = logging.getLogger() root_logger.setLevel(logging.INFO) diff --git a/metadata-ingestion/tests/performance/databricks/unity_proxy_mock.py b/metadata-ingestion/tests/performance/databricks/unity_proxy_mock.py index 31acef1398903..593163e12bf0a 100644 --- a/metadata-ingestion/tests/performance/databricks/unity_proxy_mock.py +++ b/metadata-ingestion/tests/performance/databricks/unity_proxy_mock.py @@ -1,13 +1,10 @@ import uuid from collections import defaultdict from datetime import datetime, timezone -from typing import Iterable +from typing import Dict, Iterable, List from databricks.sdk.service.catalog import ColumnTypeName from databricks.sdk.service.sql import QueryStatementType -from performance import data_model -from performance.data_generation import SeedMetadata -from performance.data_model import ColumnType, StatementType from datahub.ingestion.source.unity.proxy_types import ( Catalog, @@ -20,6 +17,9 @@ Table, TableType, ) +from tests.performance import data_model +from tests.performance.data_generation import SeedMetadata +from tests.performance.data_model import ColumnType, StatementType class UnityCatalogApiProxyMock: @@ -35,7 +35,9 @@ def __init__( self.queries = queries self.num_service_principals = num_service_principals self.warehouse_id = "invalid-warehouse-id" - self._schema_to_table = defaultdict(list) # Cache for performance + + # Cache for performance + self._schema_to_table: Dict[str, List[data_model.Table]] = defaultdict(list) for table in seed_metadata.all_tables: self._schema_to_table[table.container.name].append(table) @@ -57,7 +59,7 @@ def assigned_metastore(self) -> Metastore: def catalogs(self, metastore: Metastore) -> Iterable[Catalog]: for container in self.seed_metadata.containers[1]: - if metastore.name != container.parent.name: + if not container.parent or metastore.name != container.parent.name: continue yield Catalog( @@ -72,7 +74,7 @@ def catalogs(self, metastore: Metastore) -> Iterable[Catalog]: def schemas(self, catalog: Catalog) -> Iterable[Schema]: for container in self.seed_metadata.containers[2]: # Assumes all catalog names are unique - if catalog.name != container.parent.name: + if not container.parent or catalog.name != container.parent.name: continue yield Schema( @@ -86,21 +88,22 @@ def schemas(self, catalog: Catalog) -> Iterable[Schema]: def tables(self, schema: Schema) -> Iterable[Table]: for table in self._schema_to_table[schema.name]: columns = [] - for i, col_name in enumerate(table.columns): - column = table.column_mapping[col_name] - columns.append( - Column( - id=column.name, - name=column.name, - type_name=self._convert_column_type(column.type), - type_text=column.type.value, - nullable=column.nullable, - position=i, - comment=None, - type_precision=0, - type_scale=0, + if table.column_mapping: + for i, col_name in enumerate(table.columns): + column = table.column_mapping[col_name] + columns.append( + Column( + id=column.name, + name=column.name, + type_name=self._convert_column_type(column.type), + type_text=column.type.value, + nullable=column.nullable, + position=i, + comment=None, + type_precision=0, + type_scale=0, + ) ) - ) yield Table( id=f"{schema.id}.{table.name}", diff --git a/metadata-ingestion/tests/performance/helpers.py b/metadata-ingestion/tests/performance/helpers.py new file mode 100644 index 0000000000000..eb98e53670c96 --- /dev/null +++ b/metadata-ingestion/tests/performance/helpers.py @@ -0,0 +1,21 @@ +import os +from typing import Iterable, Tuple + +import psutil + +from datahub.ingestion.api.workunit import MetadataWorkUnit + + +def workunit_sink(workunits: Iterable[MetadataWorkUnit]) -> Tuple[int, int]: + peak_memory_usage = psutil.Process(os.getpid()).memory_info().rss + i: int = 0 + for i, wu in enumerate(workunits): + if i % 10_000 == 0: + peak_memory_usage = max( + peak_memory_usage, psutil.Process(os.getpid()).memory_info().rss + ) + peak_memory_usage = max( + peak_memory_usage, psutil.Process(os.getpid()).memory_info().rss + ) + + return i, peak_memory_usage