From 57f855ecd11632e884b12fda0fc57e2694ee26a5 Mon Sep 17 00:00:00 2001 From: Mayuri Nehate <33225191+mayurinehate@users.noreply.github.com> Date: Tue, 10 Oct 2023 12:18:21 +0530 Subject: [PATCH 01/14] feat(ingest): refactor + simplify incremental lineage helper (#8976) --- .../api/incremental_lineage_helper.py | 139 ++++++++++++++++++ .../datahub/ingestion/api/source_helpers.py | 138 +---------------- .../source/snowflake/snowflake_v2.py | 4 +- .../test_incremental_lineage_helper.py | 6 +- 4 files changed, 142 insertions(+), 145 deletions(-) create mode 100644 metadata-ingestion/src/datahub/ingestion/api/incremental_lineage_helper.py diff --git a/metadata-ingestion/src/datahub/ingestion/api/incremental_lineage_helper.py b/metadata-ingestion/src/datahub/ingestion/api/incremental_lineage_helper.py new file mode 100644 index 0000000000000..9478c5cf7efa2 --- /dev/null +++ b/metadata-ingestion/src/datahub/ingestion/api/incremental_lineage_helper.py @@ -0,0 +1,139 @@ +import copy +from typing import Dict, Iterable, Optional + +from datahub.emitter.mce_builder import datahub_guid, set_aspect +from datahub.emitter.mcp import MetadataChangeProposalWrapper +from datahub.ingestion.api.workunit import MetadataWorkUnit +from datahub.ingestion.graph.client import DataHubGraph +from datahub.metadata.schema_classes import ( + FineGrainedLineageClass, + MetadataChangeEventClass, + SystemMetadataClass, + UpstreamClass, + UpstreamLineageClass, +) +from datahub.specific.dataset import DatasetPatchBuilder + + +def _convert_upstream_lineage_to_patch( + urn: str, + aspect: UpstreamLineageClass, + system_metadata: Optional[SystemMetadataClass], +) -> MetadataWorkUnit: + patch_builder = DatasetPatchBuilder(urn, system_metadata) + for upstream in aspect.upstreams: + patch_builder.add_upstream_lineage(upstream) + mcp = next(iter(patch_builder.build())) + return MetadataWorkUnit(id=f"{urn}-upstreamLineage", mcp_raw=mcp) + + +def get_fine_grained_lineage_key(fine_upstream: FineGrainedLineageClass) -> str: + return datahub_guid( + { + "upstreams": sorted(fine_upstream.upstreams or []), + "downstreams": sorted(fine_upstream.downstreams or []), + "transformOperation": fine_upstream.transformOperation, + } + ) + + +def _merge_upstream_lineage( + new_aspect: UpstreamLineageClass, gms_aspect: UpstreamLineageClass +) -> UpstreamLineageClass: + merged_aspect = copy.deepcopy(gms_aspect) + + upstreams_map: Dict[str, UpstreamClass] = { + upstream.dataset: upstream for upstream in merged_aspect.upstreams + } + + upstreams_updated = False + fine_upstreams_updated = False + + for table_upstream in new_aspect.upstreams: + if table_upstream.dataset not in upstreams_map or ( + table_upstream.auditStamp.time + > upstreams_map[table_upstream.dataset].auditStamp.time + ): + upstreams_map[table_upstream.dataset] = table_upstream + upstreams_updated = True + + if upstreams_updated: + merged_aspect.upstreams = list(upstreams_map.values()) + + if new_aspect.fineGrainedLineages and merged_aspect.fineGrainedLineages: + fine_upstreams_map: Dict[str, FineGrainedLineageClass] = { + get_fine_grained_lineage_key(fine_upstream): fine_upstream + for fine_upstream in merged_aspect.fineGrainedLineages + } + for column_upstream in new_aspect.fineGrainedLineages: + column_upstream_key = get_fine_grained_lineage_key(column_upstream) + + if column_upstream_key not in fine_upstreams_map or ( + column_upstream.confidenceScore + > fine_upstreams_map[column_upstream_key].confidenceScore + ): + fine_upstreams_map[column_upstream_key] = column_upstream + fine_upstreams_updated = True + + if fine_upstreams_updated: + merged_aspect.fineGrainedLineages = list(fine_upstreams_map.values()) + else: + merged_aspect.fineGrainedLineages = ( + new_aspect.fineGrainedLineages or gms_aspect.fineGrainedLineages + ) + + return merged_aspect + + +def _lineage_wu_via_read_modify_write( + graph: Optional[DataHubGraph], + urn: str, + aspect: UpstreamLineageClass, + system_metadata: Optional[SystemMetadataClass], +) -> MetadataWorkUnit: + if graph is None: + raise ValueError( + "Failed to handle incremental lineage, DataHubGraph is missing. " + "Use `datahub-rest` sink OR provide `datahub-api` config in recipe. " + ) + gms_aspect = graph.get_aspect(urn, UpstreamLineageClass) + if gms_aspect: + new_aspect = _merge_upstream_lineage(aspect, gms_aspect) + else: + new_aspect = aspect + + return MetadataChangeProposalWrapper( + entityUrn=urn, aspect=new_aspect, systemMetadata=system_metadata + ).as_workunit() + + +def auto_incremental_lineage( + graph: Optional[DataHubGraph], + incremental_lineage: bool, + stream: Iterable[MetadataWorkUnit], +) -> Iterable[MetadataWorkUnit]: + if not incremental_lineage: + yield from stream + return # early exit + + for wu in stream: + lineage_aspect: Optional[UpstreamLineageClass] = wu.get_aspect_of_type( + UpstreamLineageClass + ) + urn = wu.get_urn() + + if lineage_aspect: + if isinstance(wu.metadata, MetadataChangeEventClass): + set_aspect( + wu.metadata, None, UpstreamLineageClass + ) # we'll emit upstreamLineage separately below + if len(wu.metadata.proposedSnapshot.aspects) > 0: + yield wu + + yield _lineage_wu_via_read_modify_write( + graph, urn, lineage_aspect, wu.metadata.systemMetadata + ) if lineage_aspect.fineGrainedLineages else _convert_upstream_lineage_to_patch( + urn, lineage_aspect, wu.metadata.systemMetadata + ) + else: + yield wu diff --git a/metadata-ingestion/src/datahub/ingestion/api/source_helpers.py b/metadata-ingestion/src/datahub/ingestion/api/source_helpers.py index 42f970e97c95f..7fc15cf829678 100644 --- a/metadata-ingestion/src/datahub/ingestion/api/source_helpers.py +++ b/metadata-ingestion/src/datahub/ingestion/api/source_helpers.py @@ -1,4 +1,3 @@ -import copy import logging from datetime import datetime, timezone from typing import ( @@ -16,14 +15,9 @@ ) from datahub.configuration.time_window_config import BaseTimeWindowConfig -from datahub.emitter.mce_builder import ( - datahub_guid, - make_dataplatform_instance_urn, - set_aspect, -) +from datahub.emitter.mce_builder import make_dataplatform_instance_urn from datahub.emitter.mcp import MetadataChangeProposalWrapper from datahub.ingestion.api.workunit import MetadataWorkUnit -from datahub.ingestion.graph.client import DataHubGraph from datahub.metadata.schema_classes import ( BrowsePathEntryClass, BrowsePathsClass, @@ -31,17 +25,12 @@ ChangeTypeClass, ContainerClass, DatasetUsageStatisticsClass, - FineGrainedLineageClass, MetadataChangeEventClass, MetadataChangeProposalClass, StatusClass, - SystemMetadataClass, TagKeyClass, TimeWindowSizeClass, - UpstreamClass, - UpstreamLineageClass, ) -from datahub.specific.dataset import DatasetPatchBuilder from datahub.telemetry import telemetry from datahub.utilities.urns.dataset_urn import DatasetUrn from datahub.utilities.urns.tag_urn import TagUrn @@ -377,128 +366,3 @@ def _prepend_platform_instance( return [BrowsePathEntryClass(id=urn, urn=urn)] + entries return entries - - -def auto_incremental_lineage( - graph: Optional[DataHubGraph], - incremental_lineage: bool, - include_column_level_lineage: bool, - stream: Iterable[MetadataWorkUnit], -) -> Iterable[MetadataWorkUnit]: - if not incremental_lineage: - yield from stream - return # early exit - - for wu in stream: - lineage_aspect: Optional[UpstreamLineageClass] = wu.get_aspect_of_type( - UpstreamLineageClass - ) - urn = wu.get_urn() - - if lineage_aspect: - if isinstance(wu.metadata, MetadataChangeEventClass): - set_aspect( - wu.metadata, None, UpstreamLineageClass - ) # we'll emit upstreamLineage separately below - if len(wu.metadata.proposedSnapshot.aspects) > 0: - yield wu - - yield _lineage_wu_via_read_modify_write( - graph, urn, lineage_aspect, wu.metadata.systemMetadata - ) if include_column_level_lineage else _convert_upstream_lineage_to_patch( - urn, lineage_aspect, wu.metadata.systemMetadata - ) - else: - yield wu - - -def _convert_upstream_lineage_to_patch( - urn: str, - aspect: UpstreamLineageClass, - system_metadata: Optional[SystemMetadataClass], -) -> MetadataWorkUnit: - patch_builder = DatasetPatchBuilder(urn, system_metadata) - for upstream in aspect.upstreams: - patch_builder.add_upstream_lineage(upstream) - mcp = next(iter(patch_builder.build())) - return MetadataWorkUnit(id=f"{urn}-upstreamLineage", mcp_raw=mcp) - - -def _lineage_wu_via_read_modify_write( - graph: Optional[DataHubGraph], - urn: str, - aspect: UpstreamLineageClass, - system_metadata: Optional[SystemMetadataClass], -) -> MetadataWorkUnit: - if graph is None: - raise ValueError( - "Failed to handle incremental lineage, DataHubGraph is missing. " - "Use `datahub-rest` sink OR provide `datahub-api` config in recipe. " - ) - gms_aspect = graph.get_aspect(urn, UpstreamLineageClass) - if gms_aspect: - new_aspect = _merge_upstream_lineage(aspect, gms_aspect) - else: - new_aspect = aspect - - return MetadataChangeProposalWrapper( - entityUrn=urn, aspect=new_aspect, systemMetadata=system_metadata - ).as_workunit() - - -def _merge_upstream_lineage( - new_aspect: UpstreamLineageClass, gms_aspect: UpstreamLineageClass -) -> UpstreamLineageClass: - merged_aspect = copy.deepcopy(gms_aspect) - - upstreams_map: Dict[str, UpstreamClass] = { - upstream.dataset: upstream for upstream in merged_aspect.upstreams - } - - upstreams_updated = False - fine_upstreams_updated = False - - for table_upstream in new_aspect.upstreams: - if table_upstream.dataset not in upstreams_map or ( - table_upstream.auditStamp.time - > upstreams_map[table_upstream.dataset].auditStamp.time - ): - upstreams_map[table_upstream.dataset] = table_upstream - upstreams_updated = True - - if upstreams_updated: - merged_aspect.upstreams = list(upstreams_map.values()) - - if new_aspect.fineGrainedLineages and merged_aspect.fineGrainedLineages: - fine_upstreams_map: Dict[str, FineGrainedLineageClass] = { - get_fine_grained_lineage_key(fine_upstream): fine_upstream - for fine_upstream in merged_aspect.fineGrainedLineages - } - for column_upstream in new_aspect.fineGrainedLineages: - column_upstream_key = get_fine_grained_lineage_key(column_upstream) - - if column_upstream_key not in fine_upstreams_map or ( - column_upstream.confidenceScore - > fine_upstreams_map[column_upstream_key].confidenceScore - ): - fine_upstreams_map[column_upstream_key] = column_upstream - fine_upstreams_updated = True - - if fine_upstreams_updated: - merged_aspect.fineGrainedLineages = list(fine_upstreams_map.values()) - else: - merged_aspect.fineGrainedLineages = ( - new_aspect.fineGrainedLineages or gms_aspect.fineGrainedLineages - ) - - return merged_aspect - - -def get_fine_grained_lineage_key(fine_upstream: FineGrainedLineageClass) -> str: - return datahub_guid( - { - "upstreams": sorted(fine_upstream.upstreams or []), - "downstreams": sorted(fine_upstream.downstreams or []), - "transformOperation": fine_upstream.transformOperation, - } - ) diff --git a/metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_v2.py b/metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_v2.py index e0848b5f9ab34..a5c07d9a3870c 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_v2.py +++ b/metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_v2.py @@ -27,6 +27,7 @@ platform_name, support_status, ) +from datahub.ingestion.api.incremental_lineage_helper import auto_incremental_lineage from datahub.ingestion.api.source import ( CapabilityReport, MetadataWorkUnitProcessor, @@ -36,7 +37,6 @@ TestableSource, TestConnectionReport, ) -from datahub.ingestion.api.source_helpers import auto_incremental_lineage from datahub.ingestion.api.workunit import MetadataWorkUnit from datahub.ingestion.glossary.classification_mixin import ClassificationHandler from datahub.ingestion.source.common.subtypes import ( @@ -517,8 +517,6 @@ def get_workunit_processors(self) -> List[Optional[MetadataWorkUnitProcessor]]: auto_incremental_lineage, self.ctx.graph, self.config.incremental_lineage, - self.config.include_column_lineage - or self.config.include_view_column_lineage, ), StaleEntityRemovalHandler.create( self, self.config, self.ctx diff --git a/metadata-ingestion/tests/unit/api/source_helpers/test_incremental_lineage_helper.py b/metadata-ingestion/tests/unit/api/source_helpers/test_incremental_lineage_helper.py index 4078bda26c743..54a22d860285c 100644 --- a/metadata-ingestion/tests/unit/api/source_helpers/test_incremental_lineage_helper.py +++ b/metadata-ingestion/tests/unit/api/source_helpers/test_incremental_lineage_helper.py @@ -6,7 +6,7 @@ import datahub.metadata.schema_classes as models from datahub.emitter.mce_builder import make_dataset_urn, make_schema_field_urn from datahub.emitter.mcp import MetadataChangeProposalWrapper -from datahub.ingestion.api.source_helpers import auto_incremental_lineage +from datahub.ingestion.api.incremental_lineage_helper import auto_incremental_lineage from datahub.ingestion.api.workunit import MetadataWorkUnit from datahub.ingestion.sink.file import write_metadata_file from tests.test_helpers import mce_helpers @@ -88,7 +88,6 @@ def test_incremental_table_lineage(tmp_path, pytestconfig): processed_wus = auto_incremental_lineage( graph=None, incremental_lineage=True, - include_column_level_lineage=False, stream=[ MetadataChangeProposalWrapper( entityUrn=urn, aspect=aspect, systemMetadata=system_metadata @@ -146,7 +145,6 @@ def test_incremental_column_level_lineage( processed_wus = auto_incremental_lineage( graph=mock_graph, incremental_lineage=True, - include_column_level_lineage=True, stream=[ MetadataChangeProposalWrapper( entityUrn=dataset_urn, @@ -184,7 +182,6 @@ def test_incremental_column_lineage_less_upstreams_in_gms_aspect( processed_wus = auto_incremental_lineage( graph=mock_graph, incremental_lineage=True, - include_column_level_lineage=True, stream=[ MetadataChangeProposalWrapper( entityUrn=urn, aspect=aspect, systemMetadata=system_metadata @@ -227,7 +224,6 @@ def test_incremental_column_lineage_more_upstreams_in_gms_aspect( processed_wus = auto_incremental_lineage( graph=mock_graph, incremental_lineage=True, - include_column_level_lineage=True, stream=[ MetadataChangeProposalWrapper( entityUrn=urn, aspect=aspect, systemMetadata=system_metadata From bb39d5418fcbf8bebbae1b510c63a1170865a072 Mon Sep 17 00:00:00 2001 From: Aseem Bansal Date: Tue, 10 Oct 2023 16:08:34 +0530 Subject: [PATCH 02/14] fix(lint): run black, isort (#8978) --- .../tests/assertions/assertions_test.py | 33 ++-- smoke-test/tests/browse/browse_test.py | 51 +++++-- smoke-test/tests/cli/datahub-cli.py | 76 +++++++--- smoke-test/tests/cli/datahub_graph_test.py | 12 +- .../cli/delete_cmd/test_timeseries_delete.py | 12 +- .../ingest_cmd/test_timeseries_rollback.py | 6 +- .../cli/user_groups_cmd/test_group_cmd.py | 3 +- smoke-test/tests/conftest.py | 4 +- smoke-test/tests/consistency_utils.py | 16 +- .../tests/containers/containers_test.py | 4 +- smoke-test/tests/cypress/integration_test.py | 23 ++- .../tests/dataproduct/test_dataproduct.py | 4 +- smoke-test/tests/delete/delete_test.py | 18 +-- .../tests/deprecation/deprecation_test.py | 9 +- smoke-test/tests/domains/domains_test.py | 15 +- .../managed_ingestion_test.py | 3 +- smoke-test/tests/patch/common_patch_tests.py | 52 ++----- .../tests/patch/test_datajob_patches.py | 23 +-- .../tests/patch/test_dataset_patches.py | 18 ++- smoke-test/tests/policies/test_policies.py | 10 +- .../tests/setup/lineage/helper_classes.py | 5 +- .../setup/lineage/ingest_data_job_change.py | 42 ++---- .../lineage/ingest_dataset_join_change.py | 36 ++--- .../lineage/ingest_input_datasets_change.py | 42 ++---- .../setup/lineage/ingest_time_lineage.py | 18 ++- smoke-test/tests/setup/lineage/utils.py | 85 +++++------ .../tags-and-terms/tags_and_terms_test.py | 4 +- smoke-test/tests/telemetry/telemetry_test.py | 4 +- smoke-test/tests/test_result_msg.py | 23 ++- smoke-test/tests/test_stateful_ingestion.py | 14 +- smoke-test/tests/tests/tests_test.py | 7 +- smoke-test/tests/timeline/timeline_test.py | 67 +++++---- .../tokens/revokable_access_token_test.py | 12 +- smoke-test/tests/utils.py | 17 +-- smoke-test/tests/views/views_test.py | 142 +++++++++--------- 35 files changed, 457 insertions(+), 453 deletions(-) diff --git a/smoke-test/tests/assertions/assertions_test.py b/smoke-test/tests/assertions/assertions_test.py index 4aa64c512f684..48f3564e6cd97 100644 --- a/smoke-test/tests/assertions/assertions_test.py +++ b/smoke-test/tests/assertions/assertions_test.py @@ -2,28 +2,29 @@ import urllib import pytest -import requests_wrapper as requests import tenacity from datahub.emitter.mce_builder import make_dataset_urn, make_schema_field_urn from datahub.emitter.mcp import MetadataChangeProposalWrapper from datahub.ingestion.api.common import PipelineContext, RecordEnvelope from datahub.ingestion.api.sink import NoopWriteCallback from datahub.ingestion.sink.file import FileSink, FileSinkConfig -from datahub.metadata.com.linkedin.pegasus2avro.assertion import AssertionStdAggregation -from datahub.metadata.schema_classes import ( - AssertionInfoClass, - AssertionResultClass, - AssertionResultTypeClass, - AssertionRunEventClass, - AssertionRunStatusClass, - AssertionStdOperatorClass, - AssertionTypeClass, - DatasetAssertionInfoClass, - DatasetAssertionScopeClass, - PartitionSpecClass, - PartitionTypeClass, -) -from tests.utils import delete_urns_from_file, get_gms_url, ingest_file_via_rest, wait_for_healthcheck_util, get_sleep_info +from datahub.metadata.com.linkedin.pegasus2avro.assertion import \ + AssertionStdAggregation +from datahub.metadata.schema_classes import (AssertionInfoClass, + AssertionResultClass, + AssertionResultTypeClass, + AssertionRunEventClass, + AssertionRunStatusClass, + AssertionStdOperatorClass, + AssertionTypeClass, + DatasetAssertionInfoClass, + DatasetAssertionScopeClass, + PartitionSpecClass, + PartitionTypeClass) + +import requests_wrapper as requests +from tests.utils import (delete_urns_from_file, get_gms_url, get_sleep_info, + ingest_file_via_rest, wait_for_healthcheck_util) restli_default_headers = { "X-RestLi-Protocol-Version": "2.0.0", diff --git a/smoke-test/tests/browse/browse_test.py b/smoke-test/tests/browse/browse_test.py index b9d2143d13ec7..550f0062d5a39 100644 --- a/smoke-test/tests/browse/browse_test.py +++ b/smoke-test/tests/browse/browse_test.py @@ -1,9 +1,10 @@ import time import pytest -import requests_wrapper as requests -from tests.utils import delete_urns_from_file, get_frontend_url, ingest_file_via_rest +import requests_wrapper as requests +from tests.utils import (delete_urns_from_file, get_frontend_url, + ingest_file_via_rest) TEST_DATASET_1_URN = "urn:li:dataset:(urn:li:dataPlatform:kafka,test-browse-1,PROD)" TEST_DATASET_2_URN = "urn:li:dataset:(urn:li:dataPlatform:kafka,test-browse-2,PROD)" @@ -51,7 +52,9 @@ def test_get_browse_paths(frontend_session, ingest_cleanup_data): # /prod -- There should be one entity get_browse_paths_json = { "query": get_browse_paths_query, - "variables": {"input": { "type": "DATASET", "path": ["prod"], "start": 0, "count": 100 } }, + "variables": { + "input": {"type": "DATASET", "path": ["prod"], "start": 0, "count": 100} + }, } response = frontend_session.post( @@ -67,12 +70,19 @@ def test_get_browse_paths(frontend_session, ingest_cleanup_data): browse = res_data["data"]["browse"] print(browse) - assert browse["entities"] == [{ "urn": TEST_DATASET_3_URN }] + assert browse["entities"] == [{"urn": TEST_DATASET_3_URN}] # /prod/kafka1 get_browse_paths_json = { "query": get_browse_paths_query, - "variables": {"input": { "type": "DATASET", "path": ["prod", "kafka1"], "start": 0, "count": 10 } }, + "variables": { + "input": { + "type": "DATASET", + "path": ["prod", "kafka1"], + "start": 0, + "count": 10, + } + }, } response = frontend_session.post( @@ -88,16 +98,27 @@ def test_get_browse_paths(frontend_session, ingest_cleanup_data): browse = res_data["data"]["browse"] assert browse == { - "total": 3, - "entities": [{ "urn": TEST_DATASET_1_URN }, { "urn": TEST_DATASET_2_URN }, { "urn": TEST_DATASET_3_URN }], - "groups": [], - "metadata": { "path": ["prod", "kafka1"], "totalNumEntities": 0 } + "total": 3, + "entities": [ + {"urn": TEST_DATASET_1_URN}, + {"urn": TEST_DATASET_2_URN}, + {"urn": TEST_DATASET_3_URN}, + ], + "groups": [], + "metadata": {"path": ["prod", "kafka1"], "totalNumEntities": 0}, } # /prod/kafka2 get_browse_paths_json = { "query": get_browse_paths_query, - "variables": {"input": { "type": "DATASET", "path": ["prod", "kafka2"], "start": 0, "count": 10 } }, + "variables": { + "input": { + "type": "DATASET", + "path": ["prod", "kafka2"], + "start": 0, + "count": 10, + } + }, } response = frontend_session.post( @@ -113,10 +134,8 @@ def test_get_browse_paths(frontend_session, ingest_cleanup_data): browse = res_data["data"]["browse"] assert browse == { - "total": 2, - "entities": [{ "urn": TEST_DATASET_1_URN }, { "urn": TEST_DATASET_2_URN }], - "groups": [], - "metadata": { "path": ["prod", "kafka2"], "totalNumEntities": 0 } + "total": 2, + "entities": [{"urn": TEST_DATASET_1_URN}, {"urn": TEST_DATASET_2_URN}], + "groups": [], + "metadata": {"path": ["prod", "kafka2"], "totalNumEntities": 0}, } - - diff --git a/smoke-test/tests/cli/datahub-cli.py b/smoke-test/tests/cli/datahub-cli.py index 1d0080bdd9d48..c3db6028efceb 100644 --- a/smoke-test/tests/cli/datahub-cli.py +++ b/smoke-test/tests/cli/datahub-cli.py @@ -1,8 +1,11 @@ import json -import pytest from time import sleep -from datahub.cli.cli_utils import guess_entity_type, post_entity, get_aspects_for_entity + +import pytest +from datahub.cli.cli_utils import (get_aspects_for_entity, guess_entity_type, + post_entity) from datahub.cli.ingest_cli import get_session_and_host, rollback + from tests.utils import ingest_file_via_rest, wait_for_writes_to_sync ingested_dataset_run_id = "" @@ -24,24 +27,46 @@ def test_setup(): session, gms_host = get_session_and_host() - assert "browsePaths" not in get_aspects_for_entity(entity_urn=dataset_urn, aspects=["browsePaths"], typed=False) - assert "editableDatasetProperties" not in get_aspects_for_entity(entity_urn=dataset_urn, aspects=["editableDatasetProperties"], typed=False) + assert "browsePaths" not in get_aspects_for_entity( + entity_urn=dataset_urn, aspects=["browsePaths"], typed=False + ) + assert "editableDatasetProperties" not in get_aspects_for_entity( + entity_urn=dataset_urn, aspects=["editableDatasetProperties"], typed=False + ) - ingested_dataset_run_id = ingest_file_via_rest("tests/cli/cli_test_data.json").config.run_id + ingested_dataset_run_id = ingest_file_via_rest( + "tests/cli/cli_test_data.json" + ).config.run_id print("Setup ingestion id: " + ingested_dataset_run_id) - assert "browsePaths" in get_aspects_for_entity(entity_urn=dataset_urn, aspects=["browsePaths"], typed=False) + assert "browsePaths" in get_aspects_for_entity( + entity_urn=dataset_urn, aspects=["browsePaths"], typed=False + ) yield # Clean up rollback_url = f"{gms_host}/runs?action=rollback" - session.post(rollback_url, data=json.dumps({"runId": ingested_editable_run_id, "dryRun": False, "hardDelete": True})) - session.post(rollback_url, data=json.dumps({"runId": ingested_dataset_run_id, "dryRun": False, "hardDelete": True})) + session.post( + rollback_url, + data=json.dumps( + {"runId": ingested_editable_run_id, "dryRun": False, "hardDelete": True} + ), + ) + session.post( + rollback_url, + data=json.dumps( + {"runId": ingested_dataset_run_id, "dryRun": False, "hardDelete": True} + ), + ) - assert "browsePaths" not in get_aspects_for_entity(entity_urn=dataset_urn, aspects=["browsePaths"], typed=False) - assert "editableDatasetProperties" not in get_aspects_for_entity(entity_urn=dataset_urn, aspects=["editableDatasetProperties"], typed=False) + assert "browsePaths" not in get_aspects_for_entity( + entity_urn=dataset_urn, aspects=["browsePaths"], typed=False + ) + assert "editableDatasetProperties" not in get_aspects_for_entity( + entity_urn=dataset_urn, aspects=["editableDatasetProperties"], typed=False + ) @pytest.mark.dependency() @@ -49,9 +74,7 @@ def test_rollback_editable(): global ingested_dataset_run_id global ingested_editable_run_id platform = "urn:li:dataPlatform:kafka" - dataset_name = ( - "test-rollback" - ) + dataset_name = "test-rollback" env = "PROD" dataset_urn = f"urn:li:dataset:({platform},{dataset_name},{env})" @@ -59,23 +82,38 @@ def test_rollback_editable(): print("Ingested dataset id:", ingested_dataset_run_id) # Assert that second data ingestion worked - assert "browsePaths" in get_aspects_for_entity(entity_urn=dataset_urn, aspects=["browsePaths"], typed=False) + assert "browsePaths" in get_aspects_for_entity( + entity_urn=dataset_urn, aspects=["browsePaths"], typed=False + ) # Make editable change - ingested_editable_run_id = ingest_file_via_rest("tests/cli/cli_editable_test_data.json").config.run_id + ingested_editable_run_id = ingest_file_via_rest( + "tests/cli/cli_editable_test_data.json" + ).config.run_id print("ingested editable id:", ingested_editable_run_id) # Assert that second data ingestion worked - assert "editableDatasetProperties" in get_aspects_for_entity(entity_urn=dataset_urn, aspects=["editableDatasetProperties"], typed=False) + assert "editableDatasetProperties" in get_aspects_for_entity( + entity_urn=dataset_urn, aspects=["editableDatasetProperties"], typed=False + ) # rollback ingestion 1 rollback_url = f"{gms_host}/runs?action=rollback" - session.post(rollback_url, data=json.dumps({"runId": ingested_dataset_run_id, "dryRun": False, "hardDelete": False})) + session.post( + rollback_url, + data=json.dumps( + {"runId": ingested_dataset_run_id, "dryRun": False, "hardDelete": False} + ), + ) # Allow async MCP processor to handle ingestions & rollbacks wait_for_writes_to_sync() # EditableDatasetProperties should still be part of the entity that was soft deleted. - assert "editableDatasetProperties" in get_aspects_for_entity(entity_urn=dataset_urn, aspects=["editableDatasetProperties"], typed=False) + assert "editableDatasetProperties" in get_aspects_for_entity( + entity_urn=dataset_urn, aspects=["editableDatasetProperties"], typed=False + ) # But first ingestion aspects should not be present - assert "browsePaths" not in get_aspects_for_entity(entity_urn=dataset_urn, typed=False) + assert "browsePaths" not in get_aspects_for_entity( + entity_urn=dataset_urn, typed=False + ) diff --git a/smoke-test/tests/cli/datahub_graph_test.py b/smoke-test/tests/cli/datahub_graph_test.py index 16925d26f6983..17c8924fb0998 100644 --- a/smoke-test/tests/cli/datahub_graph_test.py +++ b/smoke-test/tests/cli/datahub_graph_test.py @@ -1,13 +1,11 @@ import pytest import tenacity from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph -from datahub.metadata.schema_classes import KafkaSchemaClass, SchemaMetadataClass -from tests.utils import ( - delete_urns_from_file, - get_gms_url, - get_sleep_info, - ingest_file_via_rest, -) +from datahub.metadata.schema_classes import (KafkaSchemaClass, + SchemaMetadataClass) + +from tests.utils import (delete_urns_from_file, get_gms_url, get_sleep_info, + ingest_file_via_rest) sleep_sec, sleep_times = get_sleep_info() diff --git a/smoke-test/tests/cli/delete_cmd/test_timeseries_delete.py b/smoke-test/tests/cli/delete_cmd/test_timeseries_delete.py index 4288a61b7a0c1..106da7cd8d71e 100644 --- a/smoke-test/tests/cli/delete_cmd/test_timeseries_delete.py +++ b/smoke-test/tests/cli/delete_cmd/test_timeseries_delete.py @@ -1,21 +1,22 @@ import json import logging +import sys import tempfile import time -import sys from json import JSONDecodeError from typing import Any, Dict, List, Optional -from click.testing import CliRunner, Result - import datahub.emitter.mce_builder as builder +from click.testing import CliRunner, Result from datahub.emitter.serialization_helper import pre_json_transform from datahub.entrypoints import datahub from datahub.metadata.schema_classes import DatasetProfileClass + +import requests_wrapper as requests from tests.aspect_generators.timeseries.dataset_profile_gen import \ gen_dataset_profiles -from tests.utils import get_strftime_from_timestamp_millis, wait_for_writes_to_sync -import requests_wrapper as requests +from tests.utils import (get_strftime_from_timestamp_millis, + wait_for_writes_to_sync) logger = logging.getLogger(__name__) @@ -33,6 +34,7 @@ def sync_elastic() -> None: wait_for_writes_to_sync() + def datahub_put_profile(dataset_profile: DatasetProfileClass) -> None: with tempfile.NamedTemporaryFile("w+t", suffix=".json") as aspect_file: aspect_text: str = json.dumps(pre_json_transform(dataset_profile.to_obj())) diff --git a/smoke-test/tests/cli/ingest_cmd/test_timeseries_rollback.py b/smoke-test/tests/cli/ingest_cmd/test_timeseries_rollback.py index 61e7a5a65b494..e962b1a5cafd6 100644 --- a/smoke-test/tests/cli/ingest_cmd/test_timeseries_rollback.py +++ b/smoke-test/tests/cli/ingest_cmd/test_timeseries_rollback.py @@ -2,14 +2,14 @@ import time from typing import Any, Dict, List, Optional -from click.testing import CliRunner, Result - import datahub.emitter.mce_builder as builder +from click.testing import CliRunner, Result from datahub.emitter.serialization_helper import post_json_transform from datahub.entrypoints import datahub from datahub.metadata.schema_classes import DatasetProfileClass -from tests.utils import ingest_file_via_rest, wait_for_writes_to_sync + import requests_wrapper as requests +from tests.utils import ingest_file_via_rest, wait_for_writes_to_sync runner = CliRunner(mix_stderr=False) diff --git a/smoke-test/tests/cli/user_groups_cmd/test_group_cmd.py b/smoke-test/tests/cli/user_groups_cmd/test_group_cmd.py index 405e061c016f9..7b986d3be0444 100644 --- a/smoke-test/tests/cli/user_groups_cmd/test_group_cmd.py +++ b/smoke-test/tests/cli/user_groups_cmd/test_group_cmd.py @@ -1,6 +1,7 @@ import json import sys import tempfile +import time from typing import Any, Dict, Iterable, List import yaml @@ -8,7 +9,7 @@ from datahub.api.entities.corpgroup.corpgroup import CorpGroup from datahub.entrypoints import datahub from datahub.ingestion.graph.client import DataHubGraph, get_default_graph -import time + import requests_wrapper as requests from tests.utils import wait_for_writes_to_sync diff --git a/smoke-test/tests/conftest.py b/smoke-test/tests/conftest.py index eed7a983197ef..57b92a2db1c19 100644 --- a/smoke-test/tests/conftest.py +++ b/smoke-test/tests/conftest.py @@ -2,8 +2,8 @@ import pytest -from tests.utils import wait_for_healthcheck_util, get_frontend_session from tests.test_result_msg import send_message +from tests.utils import get_frontend_session, wait_for_healthcheck_util # Disable telemetry os.environ["DATAHUB_TELEMETRY_ENABLED"] = "false" @@ -28,5 +28,5 @@ def test_healthchecks(wait_for_healthchecks): def pytest_sessionfinish(session, exitstatus): - """ whole test run finishes. """ + """whole test run finishes.""" send_message(exitstatus) diff --git a/smoke-test/tests/consistency_utils.py b/smoke-test/tests/consistency_utils.py index 15993733c592b..607835bf3649c 100644 --- a/smoke-test/tests/consistency_utils.py +++ b/smoke-test/tests/consistency_utils.py @@ -1,10 +1,16 @@ -import time +import logging import os import subprocess +import time _ELASTIC_BUFFER_WRITES_TIME_IN_SEC: int = 1 USE_STATIC_SLEEP: bool = bool(os.getenv("USE_STATIC_SLEEP", False)) -ELASTICSEARCH_REFRESH_INTERVAL_SECONDS: int = int(os.getenv("ELASTICSEARCH_REFRESH_INTERVAL_SECONDS", 5)) +ELASTICSEARCH_REFRESH_INTERVAL_SECONDS: int = int( + os.getenv("ELASTICSEARCH_REFRESH_INTERVAL_SECONDS", 5) +) + +logger = logging.getLogger(__name__) + def wait_for_writes_to_sync(max_timeout_in_sec: int = 120) -> None: if USE_STATIC_SLEEP: @@ -30,7 +36,9 @@ def wait_for_writes_to_sync(max_timeout_in_sec: int = 120) -> None: lag_zero = True if not lag_zero: - logger.warning(f"Exiting early from waiting for elastic to catch up due to a timeout. Current lag is {lag_values}") + logger.warning( + f"Exiting early from waiting for elastic to catch up due to a timeout. Current lag is {lag_values}" + ) else: # we want to sleep for an additional period of time for Elastic writes buffer to clear - time.sleep(_ELASTIC_BUFFER_WRITES_TIME_IN_SEC) \ No newline at end of file + time.sleep(_ELASTIC_BUFFER_WRITES_TIME_IN_SEC) diff --git a/smoke-test/tests/containers/containers_test.py b/smoke-test/tests/containers/containers_test.py index 575e3def6cf23..05a45239dabf8 100644 --- a/smoke-test/tests/containers/containers_test.py +++ b/smoke-test/tests/containers/containers_test.py @@ -1,5 +1,7 @@ import pytest -from tests.utils import delete_urns_from_file, get_frontend_url, ingest_file_via_rest + +from tests.utils import (delete_urns_from_file, get_frontend_url, + ingest_file_via_rest) @pytest.fixture(scope="module", autouse=False) diff --git a/smoke-test/tests/cypress/integration_test.py b/smoke-test/tests/cypress/integration_test.py index b3bacf39ac7ae..4ad2bc53fa87d 100644 --- a/smoke-test/tests/cypress/integration_test.py +++ b/smoke-test/tests/cypress/integration_test.py @@ -1,18 +1,16 @@ -from typing import Set, List - import datetime -import pytest -import subprocess import os +import subprocess +from typing import List, Set + +import pytest + +from tests.setup.lineage.ingest_time_lineage import (get_time_lineage_urns, + ingest_time_lineage) +from tests.utils import (create_datahub_step_state_aspects, delete_urns, + delete_urns_from_file, get_admin_username, + ingest_file_via_rest) -from tests.utils import ( - create_datahub_step_state_aspects, - get_admin_username, - ingest_file_via_rest, - delete_urns_from_file, - delete_urns, -) -from tests.setup.lineage.ingest_time_lineage import ingest_time_lineage, get_time_lineage_urns CYPRESS_TEST_DATA_DIR = "tests/cypress" TEST_DATA_FILENAME = "data.json" @@ -145,7 +143,6 @@ def ingest_cleanup_data(): delete_urns_from_file(f"{CYPRESS_TEST_DATA_DIR}/{TEST_ONBOARDING_DATA_FILENAME}") delete_urns(get_time_lineage_urns()) - print_now() print("deleting onboarding data file") if os.path.exists(f"{CYPRESS_TEST_DATA_DIR}/{TEST_ONBOARDING_DATA_FILENAME}"): diff --git a/smoke-test/tests/dataproduct/test_dataproduct.py b/smoke-test/tests/dataproduct/test_dataproduct.py index db198098f21fa..baef1cb1cb3ba 100644 --- a/smoke-test/tests/dataproduct/test_dataproduct.py +++ b/smoke-test/tests/dataproduct/test_dataproduct.py @@ -1,4 +1,6 @@ +import logging import os +import subprocess import tempfile import time from random import randint @@ -17,8 +19,6 @@ DomainPropertiesClass, DomainsClass) from datahub.utilities.urns.urn import Urn -import subprocess -import logging logger = logging.getLogger(__name__) diff --git a/smoke-test/tests/delete/delete_test.py b/smoke-test/tests/delete/delete_test.py index 68e001f983fbf..d920faaf3a89a 100644 --- a/smoke-test/tests/delete/delete_test.py +++ b/smoke-test/tests/delete/delete_test.py @@ -1,16 +1,14 @@ -import os import json -import pytest +import os from time import sleep + +import pytest from datahub.cli.cli_utils import get_aspects_for_entity from datahub.cli.ingest_cli import get_session_and_host -from tests.utils import ( - ingest_file_via_rest, - wait_for_healthcheck_util, - delete_urns_from_file, - wait_for_writes_to_sync, - get_datahub_graph, -) + +from tests.utils import (delete_urns_from_file, get_datahub_graph, + ingest_file_via_rest, wait_for_healthcheck_util, + wait_for_writes_to_sync) # Disable telemetry os.environ["DATAHUB_TELEMETRY_ENABLED"] = "false" @@ -102,7 +100,7 @@ def test_delete_reference(test_setup, depends=["test_healthchecks"]): graph.delete_references_to_urn(tag_urn, dry_run=False) wait_for_writes_to_sync() - + # Validate that references no longer exist references_count, related_aspects = graph.delete_references_to_urn( tag_urn, dry_run=True diff --git a/smoke-test/tests/deprecation/deprecation_test.py b/smoke-test/tests/deprecation/deprecation_test.py index 1149a970aa8e5..a8969804d03d7 100644 --- a/smoke-test/tests/deprecation/deprecation_test.py +++ b/smoke-test/tests/deprecation/deprecation_test.py @@ -1,10 +1,7 @@ import pytest -from tests.utils import ( - delete_urns_from_file, - get_frontend_url, - ingest_file_via_rest, - get_root_urn, -) + +from tests.utils import (delete_urns_from_file, get_frontend_url, get_root_urn, + ingest_file_via_rest) @pytest.fixture(scope="module", autouse=True) diff --git a/smoke-test/tests/domains/domains_test.py b/smoke-test/tests/domains/domains_test.py index 7ffe1682cafd8..fa8c918e3cbe1 100644 --- a/smoke-test/tests/domains/domains_test.py +++ b/smoke-test/tests/domains/domains_test.py @@ -1,12 +1,8 @@ import pytest import tenacity -from tests.utils import ( - delete_urns_from_file, - get_frontend_url, - get_gms_url, - ingest_file_via_rest, - get_sleep_info, -) + +from tests.utils import (delete_urns_from_file, get_frontend_url, get_gms_url, + get_sleep_info, ingest_file_via_rest) sleep_sec, sleep_times = get_sleep_info() @@ -240,4 +236,7 @@ def test_set_unset_domain(frontend_session, ingest_cleanup_data): assert res_data assert res_data["data"]["dataset"]["domain"]["domain"]["urn"] == domain_urn - assert res_data["data"]["dataset"]["domain"]["domain"]["properties"]["name"] == "Engineering" + assert ( + res_data["data"]["dataset"]["domain"]["domain"]["properties"]["name"] + == "Engineering" + ) diff --git a/smoke-test/tests/managed-ingestion/managed_ingestion_test.py b/smoke-test/tests/managed-ingestion/managed_ingestion_test.py index 1238a1dd5730a..b5e408731334e 100644 --- a/smoke-test/tests/managed-ingestion/managed_ingestion_test.py +++ b/smoke-test/tests/managed-ingestion/managed_ingestion_test.py @@ -3,7 +3,8 @@ import pytest import tenacity -from tests.utils import get_frontend_url, get_sleep_info, wait_for_healthcheck_util +from tests.utils import (get_frontend_url, get_sleep_info, + wait_for_healthcheck_util) sleep_sec, sleep_times = get_sleep_info() diff --git a/smoke-test/tests/patch/common_patch_tests.py b/smoke-test/tests/patch/common_patch_tests.py index 574e4fd4e4c88..f1d6abf5da794 100644 --- a/smoke-test/tests/patch/common_patch_tests.py +++ b/smoke-test/tests/patch/common_patch_tests.py @@ -2,25 +2,17 @@ import uuid from typing import Dict, Optional, Type -from datahub.emitter.mce_builder import ( - make_tag_urn, - make_term_urn, - make_user_urn, -) +from datahub.emitter.mce_builder import (make_tag_urn, make_term_urn, + make_user_urn) from datahub.emitter.mcp import MetadataChangeProposalWrapper from datahub.emitter.mcp_patch_builder import MetadataPatchProposal from datahub.ingestion.graph.client import DataHubGraph, DataHubGraphConfig -from datahub.metadata.schema_classes import ( - AuditStampClass, - GlobalTagsClass, - GlossaryTermAssociationClass, - GlossaryTermsClass, - OwnerClass, - OwnershipClass, - OwnershipTypeClass, - TagAssociationClass, - _Aspect, -) +from datahub.metadata.schema_classes import (AuditStampClass, GlobalTagsClass, + GlossaryTermAssociationClass, + GlossaryTermsClass, OwnerClass, + OwnershipClass, + OwnershipTypeClass, + TagAssociationClass, _Aspect) def helper_test_entity_terms_patch( @@ -34,18 +26,14 @@ def get_terms(graph, entity_urn): term_urn = make_term_urn(term=f"testTerm-{uuid.uuid4()}") - term_association = GlossaryTermAssociationClass( - urn=term_urn, context="test" - ) + term_association = GlossaryTermAssociationClass(urn=term_urn, context="test") global_terms = GlossaryTermsClass( terms=[term_association], auditStamp=AuditStampClass( time=int(time.time() * 1000.0), actor=make_user_urn("tester") ), ) - mcpw = MetadataChangeProposalWrapper( - entityUrn=test_entity_urn, aspect=global_terms - ) + mcpw = MetadataChangeProposalWrapper(entityUrn=test_entity_urn, aspect=global_terms) with DataHubGraph(DataHubGraphConfig()) as graph: graph.emit_mcp(mcpw) @@ -88,9 +76,7 @@ def helper_test_dataset_tags_patch( tag_association = TagAssociationClass(tag=tag_urn, context="test") global_tags = GlobalTagsClass(tags=[tag_association]) - mcpw = MetadataChangeProposalWrapper( - entityUrn=test_entity_urn, aspect=global_tags - ) + mcpw = MetadataChangeProposalWrapper(entityUrn=test_entity_urn, aspect=global_tags) with DataHubGraph(DataHubGraphConfig()) as graph: graph.emit_mcp(mcpw) @@ -153,15 +139,11 @@ def helper_test_ownership_patch( assert owner.owners[0].owner == make_user_urn("jdoe") for patch_mcp in ( - patch_builder_class(test_entity_urn) - .add_owner(owner_to_add) - .build() + patch_builder_class(test_entity_urn).add_owner(owner_to_add).build() ): graph.emit_mcp(patch_mcp) - owner = graph.get_aspect( - entity_urn=test_entity_urn, aspect_type=OwnershipClass - ) + owner = graph.get_aspect(entity_urn=test_entity_urn, aspect_type=OwnershipClass) assert len(owner.owners) == 2 for patch_mcp in ( @@ -171,9 +153,7 @@ def helper_test_ownership_patch( ): graph.emit_mcp(patch_mcp) - owner = graph.get_aspect( - entity_urn=test_entity_urn, aspect_type=OwnershipClass - ) + owner = graph.get_aspect(entity_urn=test_entity_urn, aspect_type=OwnershipClass) assert len(owner.owners) == 1 assert owner.owners[0].owner == make_user_urn("jdoe") @@ -199,9 +179,7 @@ def get_custom_properties( orig_aspect = base_aspect assert hasattr(orig_aspect, "customProperties") orig_aspect.customProperties = base_property_map - mcpw = MetadataChangeProposalWrapper( - entityUrn=test_entity_urn, aspect=orig_aspect - ) + mcpw = MetadataChangeProposalWrapper(entityUrn=test_entity_urn, aspect=orig_aspect) with DataHubGraph(DataHubGraphConfig()) as graph: graph.emit(mcpw) diff --git a/smoke-test/tests/patch/test_datajob_patches.py b/smoke-test/tests/patch/test_datajob_patches.py index 407410ee89914..342d5d683228a 100644 --- a/smoke-test/tests/patch/test_datajob_patches.py +++ b/smoke-test/tests/patch/test_datajob_patches.py @@ -3,19 +3,14 @@ from datahub.emitter.mce_builder import make_data_job_urn, make_dataset_urn from datahub.emitter.mcp import MetadataChangeProposalWrapper from datahub.ingestion.graph.client import DataHubGraph, DataHubGraphConfig -from datahub.metadata.schema_classes import ( - DataJobInfoClass, - DataJobInputOutputClass, - EdgeClass, -) +from datahub.metadata.schema_classes import (DataJobInfoClass, + DataJobInputOutputClass, + EdgeClass) from datahub.specific.datajob import DataJobPatchBuilder from tests.patch.common_patch_tests import ( - helper_test_custom_properties_patch, - helper_test_dataset_tags_patch, - helper_test_entity_terms_patch, - helper_test_ownership_patch, -) + helper_test_custom_properties_patch, helper_test_dataset_tags_patch, + helper_test_entity_terms_patch, helper_test_ownership_patch) def _make_test_datajob_urn( @@ -37,16 +32,12 @@ def test_datajob_ownership_patch(wait_for_healthchecks): # Tags def test_datajob_tags_patch(wait_for_healthchecks): - helper_test_dataset_tags_patch( - _make_test_datajob_urn(), DataJobPatchBuilder - ) + helper_test_dataset_tags_patch(_make_test_datajob_urn(), DataJobPatchBuilder) # Terms def test_dataset_terms_patch(wait_for_healthchecks): - helper_test_entity_terms_patch( - _make_test_datajob_urn(), DataJobPatchBuilder - ) + helper_test_entity_terms_patch(_make_test_datajob_urn(), DataJobPatchBuilder) # Custom Properties diff --git a/smoke-test/tests/patch/test_dataset_patches.py b/smoke-test/tests/patch/test_dataset_patches.py index 239aab64675d8..6704d19760fb9 100644 --- a/smoke-test/tests/patch/test_dataset_patches.py +++ b/smoke-test/tests/patch/test_dataset_patches.py @@ -20,7 +20,10 @@ UpstreamClass, UpstreamLineageClass) from datahub.specific.dataset import DatasetPatchBuilder -from tests.patch.common_patch_tests import helper_test_entity_terms_patch, helper_test_dataset_tags_patch, helper_test_ownership_patch, helper_test_custom_properties_patch + +from tests.patch.common_patch_tests import ( + helper_test_custom_properties_patch, helper_test_dataset_tags_patch, + helper_test_entity_terms_patch, helper_test_ownership_patch) # Common Aspect Patch Tests @@ -31,6 +34,7 @@ def test_dataset_ownership_patch(wait_for_healthchecks): ) helper_test_ownership_patch(dataset_urn, DatasetPatchBuilder) + # Tags def test_dataset_tags_patch(wait_for_healthchecks): dataset_urn = make_dataset_urn( @@ -38,6 +42,7 @@ def test_dataset_tags_patch(wait_for_healthchecks): ) helper_test_dataset_tags_patch(dataset_urn, DatasetPatchBuilder) + # Terms def test_dataset_terms_patch(wait_for_healthchecks): dataset_urn = make_dataset_urn( @@ -284,8 +289,15 @@ def test_custom_properties_patch(wait_for_healthchecks): dataset_urn = make_dataset_urn( platform="hive", name=f"SampleHiveDataset-{uuid.uuid4()}", env="PROD" ) - orig_dataset_properties = DatasetPropertiesClass(name="test_name", description="test_description") - helper_test_custom_properties_patch(test_entity_urn=dataset_urn, patch_builder_class=DatasetPatchBuilder, custom_properties_aspect_class=DatasetPropertiesClass, base_aspect=orig_dataset_properties) + orig_dataset_properties = DatasetPropertiesClass( + name="test_name", description="test_description" + ) + helper_test_custom_properties_patch( + test_entity_urn=dataset_urn, + patch_builder_class=DatasetPatchBuilder, + custom_properties_aspect_class=DatasetPropertiesClass, + base_aspect=orig_dataset_properties, + ) with DataHubGraph(DataHubGraphConfig()) as graph: # Patch custom properties along with name diff --git a/smoke-test/tests/policies/test_policies.py b/smoke-test/tests/policies/test_policies.py index b7091541894dd..67142181d2b96 100644 --- a/smoke-test/tests/policies/test_policies.py +++ b/smoke-test/tests/policies/test_policies.py @@ -1,12 +1,8 @@ import pytest import tenacity -from tests.utils import ( - get_frontend_url, - wait_for_healthcheck_util, - get_frontend_session, - get_sleep_info, - get_root_urn, -) + +from tests.utils import (get_frontend_session, get_frontend_url, get_root_urn, + get_sleep_info, wait_for_healthcheck_util) TEST_POLICY_NAME = "Updated Platform Policy" diff --git a/smoke-test/tests/setup/lineage/helper_classes.py b/smoke-test/tests/setup/lineage/helper_classes.py index 53f77b08d15ed..d550f3093be85 100644 --- a/smoke-test/tests/setup/lineage/helper_classes.py +++ b/smoke-test/tests/setup/lineage/helper_classes.py @@ -1,10 +1,7 @@ from dataclasses import dataclass from typing import Any, Dict, List, Optional -from datahub.metadata.schema_classes import ( - EdgeClass, - SchemaFieldDataTypeClass, -) +from datahub.metadata.schema_classes import EdgeClass, SchemaFieldDataTypeClass @dataclass diff --git a/smoke-test/tests/setup/lineage/ingest_data_job_change.py b/smoke-test/tests/setup/lineage/ingest_data_job_change.py index 8e3e9c5352922..588a1625419bc 100644 --- a/smoke-test/tests/setup/lineage/ingest_data_job_change.py +++ b/smoke-test/tests/setup/lineage/ingest_data_job_change.py @@ -1,36 +1,20 @@ from typing import List -from datahub.emitter.mce_builder import ( - make_dataset_urn, - make_data_flow_urn, - make_data_job_urn_with_flow, -) +from datahub.emitter.mce_builder import (make_data_flow_urn, + make_data_job_urn_with_flow, + make_dataset_urn) from datahub.emitter.rest_emitter import DatahubRestEmitter -from datahub.metadata.schema_classes import ( - DateTypeClass, - NumberTypeClass, - SchemaFieldDataTypeClass, - StringTypeClass, -) +from datahub.metadata.schema_classes import (DateTypeClass, NumberTypeClass, + SchemaFieldDataTypeClass, + StringTypeClass) -from tests.setup.lineage.constants import ( - AIRFLOW_DATA_PLATFORM, - SNOWFLAKE_DATA_PLATFORM, - TIMESTAMP_MILLIS_EIGHT_DAYS_AGO, - TIMESTAMP_MILLIS_ONE_DAY_AGO, -) -from tests.setup.lineage.helper_classes import ( - Field, - Dataset, - Task, - Pipeline, -) -from tests.setup.lineage.utils import ( - create_edge, - create_node, - create_nodes_and_edges, - emit_mcps, -) +from tests.setup.lineage.constants import (AIRFLOW_DATA_PLATFORM, + SNOWFLAKE_DATA_PLATFORM, + TIMESTAMP_MILLIS_EIGHT_DAYS_AGO, + TIMESTAMP_MILLIS_ONE_DAY_AGO) +from tests.setup.lineage.helper_classes import Dataset, Field, Pipeline, Task +from tests.setup.lineage.utils import (create_edge, create_node, + create_nodes_and_edges, emit_mcps) # Constants for Case 2 DAILY_TEMPERATURE_DATASET_ID = "climate.daily_temperature" diff --git a/smoke-test/tests/setup/lineage/ingest_dataset_join_change.py b/smoke-test/tests/setup/lineage/ingest_dataset_join_change.py index 35a8e6d5cf02e..bb9f51b6b5e9b 100644 --- a/smoke-test/tests/setup/lineage/ingest_dataset_join_change.py +++ b/smoke-test/tests/setup/lineage/ingest_dataset_join_change.py @@ -1,32 +1,18 @@ from typing import List -from datahub.emitter.mce_builder import ( - make_dataset_urn, -) +from datahub.emitter.mce_builder import make_dataset_urn from datahub.emitter.rest_emitter import DatahubRestEmitter -from datahub.metadata.schema_classes import ( - NumberTypeClass, - SchemaFieldDataTypeClass, - StringTypeClass, - UpstreamClass, -) +from datahub.metadata.schema_classes import (NumberTypeClass, + SchemaFieldDataTypeClass, + StringTypeClass, UpstreamClass) -from tests.setup.lineage.constants import ( - DATASET_ENTITY_TYPE, - SNOWFLAKE_DATA_PLATFORM, - TIMESTAMP_MILLIS_EIGHT_DAYS_AGO, - TIMESTAMP_MILLIS_ONE_DAY_AGO, -) -from tests.setup.lineage.helper_classes import ( - Field, - Dataset, -) -from tests.setup.lineage.utils import ( - create_node, - create_upstream_edge, - create_upstream_mcp, - emit_mcps, -) +from tests.setup.lineage.constants import (DATASET_ENTITY_TYPE, + SNOWFLAKE_DATA_PLATFORM, + TIMESTAMP_MILLIS_EIGHT_DAYS_AGO, + TIMESTAMP_MILLIS_ONE_DAY_AGO) +from tests.setup.lineage.helper_classes import Dataset, Field +from tests.setup.lineage.utils import (create_node, create_upstream_edge, + create_upstream_mcp, emit_mcps) # Constants for Case 3 GDP_DATASET_ID = "economic_data.gdp" diff --git a/smoke-test/tests/setup/lineage/ingest_input_datasets_change.py b/smoke-test/tests/setup/lineage/ingest_input_datasets_change.py index f4fb795147478..6079d7a3d2b63 100644 --- a/smoke-test/tests/setup/lineage/ingest_input_datasets_change.py +++ b/smoke-test/tests/setup/lineage/ingest_input_datasets_change.py @@ -1,36 +1,20 @@ from typing import List -from datahub.emitter.mce_builder import ( - make_dataset_urn, - make_data_flow_urn, - make_data_job_urn_with_flow, -) +from datahub.emitter.mce_builder import (make_data_flow_urn, + make_data_job_urn_with_flow, + make_dataset_urn) from datahub.emitter.rest_emitter import DatahubRestEmitter -from datahub.metadata.schema_classes import ( - NumberTypeClass, - SchemaFieldDataTypeClass, - StringTypeClass, -) - -from tests.setup.lineage.constants import ( - AIRFLOW_DATA_PLATFORM, - BQ_DATA_PLATFORM, - TIMESTAMP_MILLIS_EIGHT_DAYS_AGO, - TIMESTAMP_MILLIS_ONE_DAY_AGO, -) -from tests.setup.lineage.helper_classes import ( - Field, - Dataset, - Task, - Pipeline, -) -from tests.setup.lineage.utils import ( - create_edge, - create_node, - create_nodes_and_edges, - emit_mcps, -) +from datahub.metadata.schema_classes import (NumberTypeClass, + SchemaFieldDataTypeClass, + StringTypeClass) +from tests.setup.lineage.constants import (AIRFLOW_DATA_PLATFORM, + BQ_DATA_PLATFORM, + TIMESTAMP_MILLIS_EIGHT_DAYS_AGO, + TIMESTAMP_MILLIS_ONE_DAY_AGO) +from tests.setup.lineage.helper_classes import Dataset, Field, Pipeline, Task +from tests.setup.lineage.utils import (create_edge, create_node, + create_nodes_and_edges, emit_mcps) # Constants for Case 1 TRANSACTIONS_DATASET_ID = "transactions.transactions" diff --git a/smoke-test/tests/setup/lineage/ingest_time_lineage.py b/smoke-test/tests/setup/lineage/ingest_time_lineage.py index cae8e0124d501..3aec979707290 100644 --- a/smoke-test/tests/setup/lineage/ingest_time_lineage.py +++ b/smoke-test/tests/setup/lineage/ingest_time_lineage.py @@ -1,12 +1,14 @@ +import os from typing import List from datahub.emitter.rest_emitter import DatahubRestEmitter -from tests.setup.lineage.ingest_input_datasets_change import ingest_input_datasets_change, get_input_datasets_change_urns -from tests.setup.lineage.ingest_data_job_change import ingest_data_job_change, get_data_job_change_urns -from tests.setup.lineage.ingest_dataset_join_change import ingest_dataset_join_change, get_dataset_join_change_urns - -import os +from tests.setup.lineage.ingest_data_job_change import ( + get_data_job_change_urns, ingest_data_job_change) +from tests.setup.lineage.ingest_dataset_join_change import ( + get_dataset_join_change_urns, ingest_dataset_join_change) +from tests.setup.lineage.ingest_input_datasets_change import ( + get_input_datasets_change_urns, ingest_input_datasets_change) SERVER = os.getenv("DATAHUB_SERVER") or "http://localhost:8080" TOKEN = os.getenv("DATAHUB_TOKEN") or "" @@ -20,4 +22,8 @@ def ingest_time_lineage() -> None: def get_time_lineage_urns() -> List[str]: - return get_input_datasets_change_urns() + get_data_job_change_urns() + get_dataset_join_change_urns() + return ( + get_input_datasets_change_urns() + + get_data_job_change_urns() + + get_dataset_join_change_urns() + ) diff --git a/smoke-test/tests/setup/lineage/utils.py b/smoke-test/tests/setup/lineage/utils.py index 672f7a945a6af..c72f6ccb89b7a 100644 --- a/smoke-test/tests/setup/lineage/utils.py +++ b/smoke-test/tests/setup/lineage/utils.py @@ -1,41 +1,30 @@ import datetime -from datahub.emitter.mce_builder import ( - make_data_platform_urn, - make_dataset_urn, - make_data_job_urn_with_flow, - make_data_flow_urn, -) +from typing import List + +from datahub.emitter.mce_builder import (make_data_flow_urn, + make_data_job_urn_with_flow, + make_data_platform_urn, + make_dataset_urn) from datahub.emitter.mcp import MetadataChangeProposalWrapper from datahub.emitter.rest_emitter import DatahubRestEmitter from datahub.metadata.com.linkedin.pegasus2avro.dataset import UpstreamLineage -from datahub.metadata.schema_classes import ( - AuditStampClass, - ChangeTypeClass, - DatasetLineageTypeClass, - DatasetPropertiesClass, - DataFlowInfoClass, - DataJobInputOutputClass, - DataJobInfoClass, - EdgeClass, - MySqlDDLClass, - SchemaFieldClass, - SchemaMetadataClass, - UpstreamClass, -) -from typing import List - -from tests.setup.lineage.constants import ( - DATASET_ENTITY_TYPE, - DATA_JOB_ENTITY_TYPE, - DATA_FLOW_ENTITY_TYPE, - DATA_FLOW_INFO_ASPECT_NAME, - DATA_JOB_INFO_ASPECT_NAME, - DATA_JOB_INPUT_OUTPUT_ASPECT_NAME, -) -from tests.setup.lineage.helper_classes import ( - Dataset, - Pipeline, -) +from datahub.metadata.schema_classes import (AuditStampClass, ChangeTypeClass, + DataFlowInfoClass, + DataJobInfoClass, + DataJobInputOutputClass, + DatasetLineageTypeClass, + DatasetPropertiesClass, EdgeClass, + MySqlDDLClass, SchemaFieldClass, + SchemaMetadataClass, + UpstreamClass) + +from tests.setup.lineage.constants import (DATA_FLOW_ENTITY_TYPE, + DATA_FLOW_INFO_ASPECT_NAME, + DATA_JOB_ENTITY_TYPE, + DATA_JOB_INFO_ASPECT_NAME, + DATA_JOB_INPUT_OUTPUT_ASPECT_NAME, + DATASET_ENTITY_TYPE) +from tests.setup.lineage.helper_classes import Dataset, Pipeline def create_node(dataset: Dataset) -> List[MetadataChangeProposalWrapper]: @@ -85,10 +74,10 @@ def create_node(dataset: Dataset) -> List[MetadataChangeProposalWrapper]: def create_edge( - source_urn: str, - destination_urn: str, - created_timestamp_millis: int, - updated_timestamp_millis: int, + source_urn: str, + destination_urn: str, + created_timestamp_millis: int, + updated_timestamp_millis: int, ) -> EdgeClass: created_audit_stamp: AuditStampClass = AuditStampClass( time=created_timestamp_millis, actor="urn:li:corpuser:unknown" @@ -105,7 +94,7 @@ def create_edge( def create_nodes_and_edges( - airflow_dag: Pipeline, + airflow_dag: Pipeline, ) -> List[MetadataChangeProposalWrapper]: mcps = [] data_flow_urn = make_data_flow_urn( @@ -160,9 +149,9 @@ def create_nodes_and_edges( def create_upstream_edge( - upstream_entity_urn: str, - created_timestamp_millis: int, - updated_timestamp_millis: int, + upstream_entity_urn: str, + created_timestamp_millis: int, + updated_timestamp_millis: int, ): created_audit_stamp: AuditStampClass = AuditStampClass( time=created_timestamp_millis, actor="urn:li:corpuser:unknown" @@ -180,11 +169,11 @@ def create_upstream_edge( def create_upstream_mcp( - entity_type: str, - entity_urn: str, - upstreams: List[UpstreamClass], - timestamp_millis: int, - run_id: str = "", + entity_type: str, + entity_urn: str, + upstreams: List[UpstreamClass], + timestamp_millis: int, + run_id: str = "", ) -> MetadataChangeProposalWrapper: print(f"Creating upstreamLineage aspect for {entity_urn}") timestamp_millis: int = int(datetime.datetime.now().timestamp() * 1000) @@ -203,7 +192,7 @@ def create_upstream_mcp( def emit_mcps( - emitter: DatahubRestEmitter, mcps: List[MetadataChangeProposalWrapper] + emitter: DatahubRestEmitter, mcps: List[MetadataChangeProposalWrapper] ) -> None: for mcp in mcps: emitter.emit_mcp(mcp) diff --git a/smoke-test/tests/tags-and-terms/tags_and_terms_test.py b/smoke-test/tests/tags-and-terms/tags_and_terms_test.py index b0ca29b544cfe..6ac75765286f0 100644 --- a/smoke-test/tests/tags-and-terms/tags_and_terms_test.py +++ b/smoke-test/tests/tags-and-terms/tags_and_terms_test.py @@ -1,5 +1,7 @@ import pytest -from tests.utils import delete_urns_from_file, get_frontend_url, ingest_file_via_rest, wait_for_healthcheck_util + +from tests.utils import (delete_urns_from_file, get_frontend_url, + ingest_file_via_rest, wait_for_healthcheck_util) @pytest.fixture(scope="module", autouse=True) diff --git a/smoke-test/tests/telemetry/telemetry_test.py b/smoke-test/tests/telemetry/telemetry_test.py index 3672abcda948d..3127061c9f506 100644 --- a/smoke-test/tests/telemetry/telemetry_test.py +++ b/smoke-test/tests/telemetry/telemetry_test.py @@ -7,5 +7,7 @@ def test_no_clientID(): client_id_urn = "urn:li:telemetry:clientId" aspect = ["telemetryClientId"] - res_data = json.dumps(get_aspects_for_entity(entity_urn=client_id_urn, aspects=aspect, typed=False)) + res_data = json.dumps( + get_aspects_for_entity(entity_urn=client_id_urn, aspects=aspect, typed=False) + ) assert res_data == "{}" diff --git a/smoke-test/tests/test_result_msg.py b/smoke-test/tests/test_result_msg.py index e3b336db9d66c..b9775e8ee4acd 100644 --- a/smoke-test/tests/test_result_msg.py +++ b/smoke-test/tests/test_result_msg.py @@ -1,6 +1,6 @@ -from slack_sdk import WebClient import os +from slack_sdk import WebClient datahub_stats = {} @@ -10,10 +10,10 @@ def add_datahub_stats(stat_name, stat_val): def send_to_slack(passed: str): - slack_api_token = os.getenv('SLACK_API_TOKEN') - slack_channel = os.getenv('SLACK_CHANNEL') - slack_thread_ts = os.getenv('SLACK_THREAD_TS') - test_identifier = os.getenv('TEST_IDENTIFIER', 'LOCAL_TEST') + slack_api_token = os.getenv("SLACK_API_TOKEN") + slack_channel = os.getenv("SLACK_CHANNEL") + slack_thread_ts = os.getenv("SLACK_THREAD_TS") + test_identifier = os.getenv("TEST_IDENTIFIER", "LOCAL_TEST") if slack_api_token is None or slack_channel is None: return client = WebClient(token=slack_api_token) @@ -26,14 +26,21 @@ def send_to_slack(passed: str): message += f"Num {entity_type} is {val}\n" if slack_thread_ts is None: - client.chat_postMessage(channel=slack_channel, text=f'{test_identifier} Status - {passed}\n{message}') + client.chat_postMessage( + channel=slack_channel, + text=f"{test_identifier} Status - {passed}\n{message}", + ) else: - client.chat_postMessage(channel=slack_channel, text=f'{test_identifier} Status - {passed}\n{message}', thread_ts=slack_thread_ts) + client.chat_postMessage( + channel=slack_channel, + text=f"{test_identifier} Status - {passed}\n{message}", + thread_ts=slack_thread_ts, + ) def send_message(exitstatus): try: - send_to_slack('PASSED' if exitstatus == 0 else 'FAILED') + send_to_slack("PASSED" if exitstatus == 0 else "FAILED") except Exception as e: # We don't want to fail pytest at all print(f"Exception happened for sending msg to slack {e}") diff --git a/smoke-test/tests/test_stateful_ingestion.py b/smoke-test/tests/test_stateful_ingestion.py index a10cf13a08029..c6adb402e5d51 100644 --- a/smoke-test/tests/test_stateful_ingestion.py +++ b/smoke-test/tests/test_stateful_ingestion.py @@ -4,17 +4,15 @@ from datahub.ingestion.run.pipeline import Pipeline from datahub.ingestion.source.sql.mysql import MySQLConfig, MySQLSource from datahub.ingestion.source.state.checkpoint import Checkpoint -from datahub.ingestion.source.state.entity_removal_state import GenericCheckpointState -from datahub.ingestion.source.state.stale_entity_removal_handler import StaleEntityRemovalHandler +from datahub.ingestion.source.state.entity_removal_state import \ + GenericCheckpointState +from datahub.ingestion.source.state.stale_entity_removal_handler import \ + StaleEntityRemovalHandler from sqlalchemy import create_engine from sqlalchemy.sql import text -from tests.utils import ( - get_gms_url, - get_mysql_password, - get_mysql_url, - get_mysql_username, -) +from tests.utils import (get_gms_url, get_mysql_password, get_mysql_url, + get_mysql_username) def test_stateful_ingestion(wait_for_healthchecks): diff --git a/smoke-test/tests/tests/tests_test.py b/smoke-test/tests/tests/tests_test.py index 0b87f90a92c58..213a2ea087b7a 100644 --- a/smoke-test/tests/tests/tests_test.py +++ b/smoke-test/tests/tests/tests_test.py @@ -1,9 +1,13 @@ import pytest import tenacity -from tests.utils import delete_urns_from_file, get_frontend_url, ingest_file_via_rest, wait_for_healthcheck_util, get_sleep_info + +from tests.utils import (delete_urns_from_file, get_frontend_url, + get_sleep_info, ingest_file_via_rest, + wait_for_healthcheck_util) sleep_sec, sleep_times = get_sleep_info() + @pytest.fixture(scope="module", autouse=True) def ingest_cleanup_data(request): print("ingesting test data") @@ -18,6 +22,7 @@ def wait_for_healthchecks(): wait_for_healthcheck_util() yield + @pytest.mark.dependency() def test_healthchecks(wait_for_healthchecks): # Call to wait_for_healthchecks fixture will do the actual functionality. diff --git a/smoke-test/tests/timeline/timeline_test.py b/smoke-test/tests/timeline/timeline_test.py index a73d585c6c72d..4705343c1a2ba 100644 --- a/smoke-test/tests/timeline/timeline_test.py +++ b/smoke-test/tests/timeline/timeline_test.py @@ -3,14 +3,14 @@ from datahub.cli import timeline_cli from datahub.cli.cli_utils import guess_entity_type, post_entity -from tests.utils import ingest_file_via_rest, wait_for_writes_to_sync, get_datahub_graph + +from tests.utils import (get_datahub_graph, ingest_file_via_rest, + wait_for_writes_to_sync) def test_all(): platform = "urn:li:dataPlatform:kafka" - dataset_name = ( - "test-timeline-sample-kafka" - ) + dataset_name = "test-timeline-sample-kafka" env = "PROD" dataset_urn = f"urn:li:dataset:({platform},{dataset_name},{env})" @@ -18,8 +18,13 @@ def test_all(): ingest_file_via_rest("tests/timeline/timeline_test_datav2.json") ingest_file_via_rest("tests/timeline/timeline_test_datav3.json") - res_data = timeline_cli.get_timeline(dataset_urn, ["TAG", "DOCUMENTATION", "TECHNICAL_SCHEMA", "GLOSSARY_TERM", - "OWNER"], None, None, False) + res_data = timeline_cli.get_timeline( + dataset_urn, + ["TAG", "DOCUMENTATION", "TECHNICAL_SCHEMA", "GLOSSARY_TERM", "OWNER"], + None, + None, + False, + ) get_datahub_graph().hard_delete_entity(urn=dataset_urn) assert res_data @@ -35,9 +40,7 @@ def test_all(): def test_schema(): platform = "urn:li:dataPlatform:kafka" - dataset_name = ( - "test-timeline-sample-kafka" - ) + dataset_name = "test-timeline-sample-kafka" env = "PROD" dataset_urn = f"urn:li:dataset:({platform},{dataset_name},{env})" @@ -45,7 +48,9 @@ def test_schema(): put(dataset_urn, "schemaMetadata", "test_resources/timeline/newschemav2.json") put(dataset_urn, "schemaMetadata", "test_resources/timeline/newschemav3.json") - res_data = timeline_cli.get_timeline(dataset_urn, ["TECHNICAL_SCHEMA"], None, None, False) + res_data = timeline_cli.get_timeline( + dataset_urn, ["TECHNICAL_SCHEMA"], None, None, False + ) get_datahub_graph().hard_delete_entity(urn=dataset_urn) assert res_data @@ -61,9 +66,7 @@ def test_schema(): def test_glossary(): platform = "urn:li:dataPlatform:kafka" - dataset_name = ( - "test-timeline-sample-kafka" - ) + dataset_name = "test-timeline-sample-kafka" env = "PROD" dataset_urn = f"urn:li:dataset:({platform},{dataset_name},{env})" @@ -71,7 +74,9 @@ def test_glossary(): put(dataset_urn, "glossaryTerms", "test_resources/timeline/newglossaryv2.json") put(dataset_urn, "glossaryTerms", "test_resources/timeline/newglossaryv3.json") - res_data = timeline_cli.get_timeline(dataset_urn, ["GLOSSARY_TERM"], None, None, False) + res_data = timeline_cli.get_timeline( + dataset_urn, ["GLOSSARY_TERM"], None, None, False + ) get_datahub_graph().hard_delete_entity(urn=dataset_urn) assert res_data @@ -87,17 +92,29 @@ def test_glossary(): def test_documentation(): platform = "urn:li:dataPlatform:kafka" - dataset_name = ( - "test-timeline-sample-kafka" - ) + dataset_name = "test-timeline-sample-kafka" env = "PROD" dataset_urn = f"urn:li:dataset:({platform},{dataset_name},{env})" - put(dataset_urn, "institutionalMemory", "test_resources/timeline/newdocumentation.json") - put(dataset_urn, "institutionalMemory", "test_resources/timeline/newdocumentationv2.json") - put(dataset_urn, "institutionalMemory", "test_resources/timeline/newdocumentationv3.json") + put( + dataset_urn, + "institutionalMemory", + "test_resources/timeline/newdocumentation.json", + ) + put( + dataset_urn, + "institutionalMemory", + "test_resources/timeline/newdocumentationv2.json", + ) + put( + dataset_urn, + "institutionalMemory", + "test_resources/timeline/newdocumentationv3.json", + ) - res_data = timeline_cli.get_timeline(dataset_urn, ["DOCUMENTATION"], None, None, False) + res_data = timeline_cli.get_timeline( + dataset_urn, ["DOCUMENTATION"], None, None, False + ) get_datahub_graph().hard_delete_entity(urn=dataset_urn) assert res_data @@ -113,9 +130,7 @@ def test_documentation(): def test_tags(): platform = "urn:li:dataPlatform:kafka" - dataset_name = ( - "test-timeline-sample-kafka" - ) + dataset_name = "test-timeline-sample-kafka" env = "PROD" dataset_urn = f"urn:li:dataset:({platform},{dataset_name},{env})" @@ -139,9 +154,7 @@ def test_tags(): def test_ownership(): platform = "urn:li:dataPlatform:kafka" - dataset_name = ( - "test-timeline-sample-kafka" - ) + dataset_name = "test-timeline-sample-kafka" env = "PROD" dataset_urn = f"urn:li:dataset:({platform},{dataset_name},{env})" diff --git a/smoke-test/tests/tokens/revokable_access_token_test.py b/smoke-test/tests/tokens/revokable_access_token_test.py index b10ad3aa3fc2a..55f3de594af4e 100644 --- a/smoke-test/tests/tokens/revokable_access_token_test.py +++ b/smoke-test/tests/tokens/revokable_access_token_test.py @@ -1,15 +1,11 @@ import os -import pytest -import requests from time import sleep -from tests.utils import ( - get_frontend_url, - wait_for_healthcheck_util, - get_admin_credentials, - wait_for_writes_to_sync, -) +import pytest +import requests +from tests.utils import (get_admin_credentials, get_frontend_url, + wait_for_healthcheck_util, wait_for_writes_to_sync) # Disable telemetry os.environ["DATAHUB_TELEMETRY_ENABLED"] = "false" diff --git a/smoke-test/tests/utils.py b/smoke-test/tests/utils.py index af03efd4f71f8..bd75b13d1910f 100644 --- a/smoke-test/tests/utils.py +++ b/smoke-test/tests/utils.py @@ -1,19 +1,20 @@ import functools import json +import logging import os -from datetime import datetime, timedelta, timezone import subprocess import time -from typing import Any, Dict, List, Tuple +from datetime import datetime, timedelta, timezone from time import sleep -from joblib import Parallel, delayed +from typing import Any, Dict, List, Tuple -import requests_wrapper as requests -import logging from datahub.cli import cli_utils from datahub.cli.cli_utils import get_system_auth -from datahub.ingestion.graph.client import DataHubGraph, DatahubClientConfig +from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph from datahub.ingestion.run.pipeline import Pipeline +from joblib import Parallel, delayed + +import requests_wrapper as requests from tests.consistency_utils import wait_for_writes_to_sync TIME: int = 1581407189000 @@ -174,6 +175,7 @@ def delete(entry): wait_for_writes_to_sync() + # Fixed now value NOW: datetime = datetime.now() @@ -232,6 +234,3 @@ def create_datahub_step_state_aspects( ] with open(onboarding_filename, "w") as f: json.dump(aspects_dict, f, indent=2) - - - diff --git a/smoke-test/tests/views/views_test.py b/smoke-test/tests/views/views_test.py index 4da69750a167b..685c3bd80b04d 100644 --- a/smoke-test/tests/views/views_test.py +++ b/smoke-test/tests/views/views_test.py @@ -1,16 +1,14 @@ -import pytest import time + +import pytest import tenacity -from tests.utils import ( - delete_urns_from_file, - get_frontend_url, - get_gms_url, - ingest_file_via_rest, - get_sleep_info, -) + +from tests.utils import (delete_urns_from_file, get_frontend_url, get_gms_url, + get_sleep_info, ingest_file_via_rest) sleep_sec, sleep_times = get_sleep_info() + @pytest.mark.dependency() def test_healthchecks(wait_for_healthchecks): # Call to wait_for_healthchecks fixture will do the actual functionality. @@ -40,6 +38,7 @@ def _ensure_more_views(frontend_session, list_views_json, query_name, before_cou assert after_count == before_count + 1 return after_count + @tenacity.retry( stop=tenacity.stop_after_attempt(sleep_times), wait=tenacity.wait_fixed(sleep_sec) ) @@ -111,18 +110,18 @@ def test_create_list_delete_global_view(frontend_session): new_view_name = "Test View" new_view_description = "Test Description" new_view_definition = { - "entityTypes": ["DATASET", "DASHBOARD"], - "filter": { - "operator": "AND", - "filters": [ - { - "field": "tags", - "values": ["urn:li:tag:test"], - "negated": False, - "condition": "EQUAL" - } - ] - } + "entityTypes": ["DATASET", "DASHBOARD"], + "filter": { + "operator": "AND", + "filters": [ + { + "field": "tags", + "values": ["urn:li:tag:test"], + "negated": False, + "condition": "EQUAL", + } + ], + }, } # Create new View @@ -137,7 +136,7 @@ def test_create_list_delete_global_view(frontend_session): "viewType": "GLOBAL", "name": new_view_name, "description": new_view_description, - "definition": new_view_definition + "definition": new_view_definition, } }, } @@ -169,9 +168,7 @@ def test_create_list_delete_global_view(frontend_session): "query": """mutation deleteView($urn: String!) {\n deleteView(urn: $urn) }""", - "variables": { - "urn": view_urn - }, + "variables": {"urn": view_urn}, } response = frontend_session.post( @@ -189,7 +186,9 @@ def test_create_list_delete_global_view(frontend_session): ) -@pytest.mark.dependency(depends=["test_healthchecks", "test_create_list_delete_global_view"]) +@pytest.mark.dependency( + depends=["test_healthchecks", "test_create_list_delete_global_view"] +) def test_create_list_delete_personal_view(frontend_session): # Get count of existing views @@ -237,18 +236,18 @@ def test_create_list_delete_personal_view(frontend_session): new_view_name = "Test View" new_view_description = "Test Description" new_view_definition = { - "entityTypes": ["DATASET", "DASHBOARD"], - "filter": { - "operator": "AND", - "filters": [ - { - "field": "tags", - "values": ["urn:li:tag:test"], - "negated": False, - "condition": "EQUAL" - } - ] - } + "entityTypes": ["DATASET", "DASHBOARD"], + "filter": { + "operator": "AND", + "filters": [ + { + "field": "tags", + "values": ["urn:li:tag:test"], + "negated": False, + "condition": "EQUAL", + } + ], + }, } # Create new View @@ -263,7 +262,7 @@ def test_create_list_delete_personal_view(frontend_session): "viewType": "PERSONAL", "name": new_view_name, "description": new_view_description, - "definition": new_view_definition + "definition": new_view_definition, } }, } @@ -293,9 +292,7 @@ def test_create_list_delete_personal_view(frontend_session): "query": """mutation deleteView($urn: String!) {\n deleteView(urn: $urn) }""", - "variables": { - "urn": view_urn - }, + "variables": {"urn": view_urn}, } response = frontend_session.post( @@ -312,25 +309,28 @@ def test_create_list_delete_personal_view(frontend_session): before_count=new_count, ) -@pytest.mark.dependency(depends=["test_healthchecks", "test_create_list_delete_personal_view"]) + +@pytest.mark.dependency( + depends=["test_healthchecks", "test_create_list_delete_personal_view"] +) def test_update_global_view(frontend_session): # First create a view new_view_name = "Test View" new_view_description = "Test Description" new_view_definition = { - "entityTypes": ["DATASET", "DASHBOARD"], - "filter": { - "operator": "AND", - "filters": [ - { - "field": "tags", - "values": ["urn:li:tag:test"], - "negated": False, - "condition": "EQUAL" - } - ] - } + "entityTypes": ["DATASET", "DASHBOARD"], + "filter": { + "operator": "AND", + "filters": [ + { + "field": "tags", + "values": ["urn:li:tag:test"], + "negated": False, + "condition": "EQUAL", + } + ], + }, } # Create new View @@ -345,7 +345,7 @@ def test_update_global_view(frontend_session): "viewType": "PERSONAL", "name": new_view_name, "description": new_view_description, - "definition": new_view_definition + "definition": new_view_definition, } }, } @@ -366,18 +366,18 @@ def test_update_global_view(frontend_session): new_view_name = "New Test View" new_view_description = "New Test Description" new_view_definition = { - "entityTypes": ["DATASET", "DASHBOARD", "CHART", "DATA_FLOW"], - "filter": { - "operator": "OR", - "filters": [ - { - "field": "glossaryTerms", - "values": ["urn:li:glossaryTerm:test"], - "negated": True, - "condition": "CONTAIN" - } - ] - } + "entityTypes": ["DATASET", "DASHBOARD", "CHART", "DATA_FLOW"], + "filter": { + "operator": "OR", + "filters": [ + { + "field": "glossaryTerms", + "values": ["urn:li:glossaryTerm:test"], + "negated": True, + "condition": "CONTAIN", + } + ], + }, } update_view_json = { @@ -391,8 +391,8 @@ def test_update_global_view(frontend_session): "input": { "name": new_view_name, "description": new_view_description, - "definition": new_view_definition - } + "definition": new_view_definition, + }, }, } @@ -411,9 +411,7 @@ def test_update_global_view(frontend_session): "query": """mutation deleteView($urn: String!) {\n deleteView(urn: $urn) }""", - "variables": { - "urn": view_urn - }, + "variables": {"urn": view_urn}, } response = frontend_session.post( From 6ecdeda5ff590456c6bfadfa5c37821f7281169e Mon Sep 17 00:00:00 2001 From: Aseem Bansal Date: Tue, 10 Oct 2023 16:28:40 +0530 Subject: [PATCH 03/14] fix(setup): drop older table if exists (#8979) --- docker/mariadb/init.sql | 2 ++ docker/mysql-setup/init.sql | 2 ++ docker/mysql/init.sql | 2 ++ docker/postgres-setup/init.sql | 2 ++ docker/postgres/init.sql | 2 ++ 5 files changed, 10 insertions(+) diff --git a/docker/mariadb/init.sql b/docker/mariadb/init.sql index c4132575cf442..95c8cabbc5ca4 100644 --- a/docker/mariadb/init.sql +++ b/docker/mariadb/init.sql @@ -28,3 +28,5 @@ insert into metadata_aspect_v2 (urn, aspect, version, metadata, createdon, creat now(), 'urn:li:corpuser:__datahub_system' ); + +DROP TABLE IF EXISTS metadata_index; diff --git a/docker/mysql-setup/init.sql b/docker/mysql-setup/init.sql index 2370a971941d2..b789329ddfd17 100644 --- a/docker/mysql-setup/init.sql +++ b/docker/mysql-setup/init.sql @@ -39,3 +39,5 @@ INSERT INTO metadata_aspect_v2 SELECT * FROM temp_metadata_aspect_v2 WHERE NOT EXISTS (SELECT * from metadata_aspect_v2); DROP TABLE temp_metadata_aspect_v2; + +DROP TABLE IF EXISTS metadata_index; diff --git a/docker/mysql/init.sql b/docker/mysql/init.sql index b4b4e4617806c..aca57d7cd444c 100644 --- a/docker/mysql/init.sql +++ b/docker/mysql/init.sql @@ -27,3 +27,5 @@ INSERT INTO metadata_aspect_v2 (urn, aspect, version, metadata, createdon, creat now(), 'urn:li:corpuser:__datahub_system' ); + +DROP TABLE IF EXISTS metadata_index; diff --git a/docker/postgres-setup/init.sql b/docker/postgres-setup/init.sql index 12fff7aec7fe6..72b2f73192e00 100644 --- a/docker/postgres-setup/init.sql +++ b/docker/postgres-setup/init.sql @@ -35,3 +35,5 @@ INSERT INTO metadata_aspect_v2 SELECT * FROM temp_metadata_aspect_v2 WHERE NOT EXISTS (SELECT * from metadata_aspect_v2); DROP TABLE temp_metadata_aspect_v2; + +DROP TABLE IF EXISTS metadata_index; diff --git a/docker/postgres/init.sql b/docker/postgres/init.sql index cf477c135422e..87c8dd3337fac 100644 --- a/docker/postgres/init.sql +++ b/docker/postgres/init.sql @@ -28,3 +28,5 @@ insert into metadata_aspect_v2 (urn, aspect, version, metadata, createdon, creat now(), 'urn:li:corpuser:__datahub_system' ); + +DROP TABLE IF EXISTS metadata_index; From 1a72fa499c3404c6c3d2961e9575495f2dd021d2 Mon Sep 17 00:00:00 2001 From: Andrew Sikowitz Date: Tue, 10 Oct 2023 17:34:06 -0400 Subject: [PATCH 04/14] feat(ingest/tableau): Allow parsing of database name from fullName (#8981) --- .../src/datahub/ingestion/source/tableau.py | 74 ++------ .../ingestion/source/tableau_common.py | 162 +++++++++++++----- .../tableau/test_tableau_ingest.py | 34 ++-- 3 files changed, 151 insertions(+), 119 deletions(-) diff --git a/metadata-ingestion/src/datahub/ingestion/source/tableau.py b/metadata-ingestion/src/datahub/ingestion/source/tableau.py index e347cd26d245a..bad7ae49d325e 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/tableau.py +++ b/metadata-ingestion/src/datahub/ingestion/source/tableau.py @@ -77,6 +77,7 @@ FIELD_TYPE_MAPPING, MetadataQueryException, TableauLineageOverrides, + TableauUpstreamReference, clean_query, custom_sql_graphql_query, dashboard_graphql_query, @@ -85,7 +86,6 @@ get_overridden_info, get_unique_custom_sql, make_fine_grained_lineage_class, - make_table_urn, make_upstream_class, published_datasource_graphql_query, query_metadata, @@ -271,7 +271,7 @@ class TableauConfig( "You can change this if your Tableau projects contain slashes in their names, and you'd like to filter by project.", ) - default_schema_map: dict = Field( + default_schema_map: Dict[str, str] = Field( default={}, description="Default schema to use when schema is not found." ) ingest_tags: Optional[bool] = Field( @@ -997,41 +997,16 @@ def get_upstream_tables( ) continue - schema = table.get(tableau_constant.SCHEMA) or "" - table_name = table.get(tableau_constant.NAME) or "" - full_name = table.get(tableau_constant.FULL_NAME) or "" - upstream_db = ( - table[tableau_constant.DATABASE][tableau_constant.NAME] - if table.get(tableau_constant.DATABASE) - and table[tableau_constant.DATABASE].get(tableau_constant.NAME) - else "" - ) - logger.debug( - "Processing Table with Connection Type: {0} and id {1}".format( - table.get(tableau_constant.CONNECTION_TYPE) or "", - table.get(tableau_constant.ID) or "", + try: + ref = TableauUpstreamReference.create( + table, default_schema_map=self.config.default_schema_map ) - ) - schema = self._get_schema(schema, upstream_db, full_name) - # if the schema is included within the table name we omit it - if ( - schema - and table_name - and full_name - and table_name == full_name - and schema in table_name - ): - logger.debug( - f"Omitting schema for upstream table {table[tableau_constant.ID]}, schema included in table name" - ) - schema = "" + except Exception as e: + logger.info(f"Failed to generate upstream reference for {table}: {e}") + continue - table_urn = make_table_urn( + table_urn = ref.make_dataset_urn( self.config.env, - upstream_db, - table.get(tableau_constant.CONNECTION_TYPE) or "", - schema, - table_name, self.config.platform_instance_map, self.config.lineage_overrides, ) @@ -1052,7 +1027,7 @@ def get_upstream_tables( urn=table_urn, id=table[tableau_constant.ID], num_cols=num_tbl_cols, - paths=set([table_path]) if table_path else set(), + paths={table_path} if table_path else set(), ) else: self.database_tables[table_urn].update_table( @@ -2462,35 +2437,6 @@ def emit_embedded_datasources(self) -> Iterable[MetadataWorkUnit]: is_embedded_ds=True, ) - @lru_cache(maxsize=None) - def _get_schema(self, schema_provided: str, database: str, fullName: str) -> str: - # For some databases, the schema attribute in tableau api does not return - # correct schema name for the table. For more information, see - # https://help.tableau.com/current/api/metadata_api/en-us/docs/meta_api_model.html#schema_attribute. - # Hence we extract schema from fullName whenever fullName is available - schema = self._extract_schema_from_fullName(fullName) if fullName else "" - if not schema: - schema = schema_provided - elif schema != schema_provided: - logger.debug( - "Correcting schema, provided {0}, corrected {1}".format( - schema_provided, schema - ) - ) - - if not schema and database in self.config.default_schema_map: - schema = self.config.default_schema_map[database] - - return schema - - @lru_cache(maxsize=None) - def _extract_schema_from_fullName(self, fullName: str) -> str: - # fullName is observed to be in format [schemaName].[tableName] - # OR simply tableName OR [tableName] - if fullName.startswith("[") and "].[" in fullName: - return fullName[1 : fullName.index("]")] - return "" - @lru_cache(maxsize=None) def get_last_modified( self, creator: Optional[str], created_at: bytes, updated_at: bytes diff --git a/metadata-ingestion/src/datahub/ingestion/source/tableau_common.py b/metadata-ingestion/src/datahub/ingestion/source/tableau_common.py index 2c92285fdba77..7c4852042ce7c 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/tableau_common.py +++ b/metadata-ingestion/src/datahub/ingestion/source/tableau_common.py @@ -1,4 +1,6 @@ import html +import logging +from dataclasses import dataclass from functools import lru_cache from typing import Dict, List, Optional, Tuple @@ -6,6 +8,7 @@ import datahub.emitter.mce_builder as builder from datahub.configuration.common import ConfigModel +from datahub.ingestion.source import tableau_constant as tc from datahub.metadata.com.linkedin.pegasus2avro.dataset import ( DatasetLineageType, FineGrainedLineage, @@ -31,6 +34,8 @@ ) from datahub.utilities.sqlglot_lineage import ColumnLineageInfo, SqlParsingResult +logger = logging.getLogger(__name__) + class TableauLineageOverrides(ConfigModel): platform_override_map: Optional[Dict[str, str]] = Field( @@ -537,12 +542,12 @@ def get_fully_qualified_table_name( platform: str, upstream_db: str, schema: str, - full_name: str, + table_name: str, ) -> str: if platform == "athena": upstream_db = "" database_name = f"{upstream_db}." if upstream_db else "" - final_name = full_name.replace("[", "").replace("]", "") + final_name = table_name.replace("[", "").replace("]", "") schema_name = f"{schema}." if schema else "" @@ -573,17 +578,123 @@ def get_fully_qualified_table_name( return fully_qualified_table_name -def get_platform_instance( - platform: str, platform_instance_map: Optional[Dict[str, str]] -) -> Optional[str]: - if platform_instance_map is not None and platform in platform_instance_map.keys(): - return platform_instance_map[platform] +@dataclass +class TableauUpstreamReference: + database: Optional[str] + schema: Optional[str] + table: str + + connection_type: str + + @classmethod + def create( + cls, d: dict, default_schema_map: Optional[Dict[str, str]] = None + ) -> "TableauUpstreamReference": + # Values directly from `table` object from Tableau + database = t_database = d.get(tc.DATABASE, {}).get(tc.NAME) + schema = t_schema = d.get(tc.SCHEMA) + table = t_table = d.get(tc.NAME) or "" + t_full_name = d.get(tc.FULL_NAME) + t_connection_type = d[tc.CONNECTION_TYPE] # required to generate urn + t_id = d[tc.ID] + + parsed_full_name = cls.parse_full_name(t_full_name) + if parsed_full_name and len(parsed_full_name) == 3: + database, schema, table = parsed_full_name + elif parsed_full_name and len(parsed_full_name) == 2: + schema, table = parsed_full_name + else: + logger.debug( + f"Upstream urn generation ({t_id}):" + f" Did not parse full name {t_full_name}: unexpected number of values", + ) + + if not schema and default_schema_map and database in default_schema_map: + schema = default_schema_map[database] + + if database != t_database: + logger.debug( + f"Upstream urn generation ({t_id}):" + f" replacing database {t_database} with {database} from full name {t_full_name}" + ) + if schema != t_schema: + logger.debug( + f"Upstream urn generation ({t_id}):" + f" replacing schema {t_schema} with {schema} from full name {t_full_name}" + ) + if table != t_table: + logger.debug( + f"Upstream urn generation ({t_id}):" + f" replacing table {t_table} with {table} from full name {t_full_name}" + ) + + # TODO: See if we can remove this -- made for redshift + if ( + schema + and t_table + and t_full_name + and t_table == t_full_name + and schema in t_table + ): + logger.debug( + f"Omitting schema for upstream table {t_id}, schema included in table name" + ) + schema = "" + + return cls( + database=database, + schema=schema, + table=table, + connection_type=t_connection_type, + ) + + @staticmethod + def parse_full_name(full_name: Optional[str]) -> Optional[List[str]]: + # fullName is observed to be in formats: + # [database].[schema].[table] + # [schema].[table] + # [table] + # table + # schema + + # TODO: Validate the startswith check. Currently required for our integration tests + if full_name is None or not full_name.startswith("["): + return None + + return full_name.replace("[", "").replace("]", "").split(".") + + def make_dataset_urn( + self, + env: str, + platform_instance_map: Optional[Dict[str, str]], + lineage_overrides: Optional[TableauLineageOverrides] = None, + ) -> str: + ( + upstream_db, + platform_instance, + platform, + original_platform, + ) = get_overridden_info( + connection_type=self.connection_type, + upstream_db=self.database, + lineage_overrides=lineage_overrides, + platform_instance_map=platform_instance_map, + ) + + table_name = get_fully_qualified_table_name( + original_platform, + upstream_db or "", + self.schema, + self.table, + ) - return None + return builder.make_dataset_urn_with_platform_instance( + platform, table_name, platform_instance, env + ) def get_overridden_info( - connection_type: str, + connection_type: Optional[str], upstream_db: Optional[str], platform_instance_map: Optional[Dict[str, str]], lineage_overrides: Optional[TableauLineageOverrides] = None, @@ -605,7 +716,9 @@ def get_overridden_info( ): upstream_db = lineage_overrides.database_override_map[upstream_db] - platform_instance = get_platform_instance(original_platform, platform_instance_map) + platform_instance = ( + platform_instance_map.get(original_platform) if platform_instance_map else None + ) if original_platform in ("athena", "hive", "mysql"): # Two tier databases upstream_db = None @@ -613,35 +726,6 @@ def get_overridden_info( return upstream_db, platform_instance, platform, original_platform -def make_table_urn( - env: str, - upstream_db: Optional[str], - connection_type: str, - schema: str, - full_name: str, - platform_instance_map: Optional[Dict[str, str]], - lineage_overrides: Optional[TableauLineageOverrides] = None, -) -> str: - - upstream_db, platform_instance, platform, original_platform = get_overridden_info( - connection_type=connection_type, - upstream_db=upstream_db, - lineage_overrides=lineage_overrides, - platform_instance_map=platform_instance_map, - ) - - table_name = get_fully_qualified_table_name( - original_platform, - upstream_db if upstream_db is not None else "", - schema, - full_name, - ) - - return builder.make_dataset_urn_with_platform_instance( - platform, table_name, platform_instance, env - ) - - def make_description_from_params(description, formula): """ Generate column description diff --git a/metadata-ingestion/tests/integration/tableau/test_tableau_ingest.py b/metadata-ingestion/tests/integration/tableau/test_tableau_ingest.py index c31867f5aa904..0510f4a40f659 100644 --- a/metadata-ingestion/tests/integration/tableau/test_tableau_ingest.py +++ b/metadata-ingestion/tests/integration/tableau/test_tableau_ingest.py @@ -20,7 +20,7 @@ from datahub.ingestion.source.tableau import TableauConfig, TableauSource from datahub.ingestion.source.tableau_common import ( TableauLineageOverrides, - make_table_urn, + TableauUpstreamReference, ) from datahub.metadata.com.linkedin.pegasus2avro.dataset import ( DatasetLineageType, @@ -546,13 +546,13 @@ def test_lineage_overrides(): enable_logging() # Simple - specify platform instance to presto table assert ( - make_table_urn( - DEFAULT_ENV, + TableauUpstreamReference( "presto_catalog", - "presto", "test-schema", - "presto_catalog.test-schema.test-table", - platform_instance_map={"presto": "my_presto_instance"}, + "test-table", + "presto", + ).make_dataset_urn( + env=DEFAULT_ENV, platform_instance_map={"presto": "my_presto_instance"} ) == "urn:li:dataset:(urn:li:dataPlatform:presto,my_presto_instance.presto_catalog.test-schema.test-table,PROD)" ) @@ -560,12 +560,13 @@ def test_lineage_overrides(): # Transform presto urn to hive urn # resulting platform instance for hive = mapped platform instance + presto_catalog assert ( - make_table_urn( - DEFAULT_ENV, + TableauUpstreamReference( "presto_catalog", - "presto", "test-schema", - "presto_catalog.test-schema.test-table", + "test-table", + "presto", + ).make_dataset_urn( + env=DEFAULT_ENV, platform_instance_map={"presto": "my_instance"}, lineage_overrides=TableauLineageOverrides( platform_override_map={"presto": "hive"}, @@ -574,14 +575,15 @@ def test_lineage_overrides(): == "urn:li:dataset:(urn:li:dataPlatform:hive,my_instance.presto_catalog.test-schema.test-table,PROD)" ) - # tranform hive urn to presto urn + # transform hive urn to presto urn assert ( - make_table_urn( - DEFAULT_ENV, - "", - "hive", + TableauUpstreamReference( + None, "test-schema", - "test-schema.test-table", + "test-table", + "hive", + ).make_dataset_urn( + env=DEFAULT_ENV, platform_instance_map={"hive": "my_presto_instance.presto_catalog"}, lineage_overrides=TableauLineageOverrides( platform_override_map={"hive": "presto"}, From e2988017c23270acd95e25ec3289983ecc3895f7 Mon Sep 17 00:00:00 2001 From: Amanda Hernando <110099762+amanda-her@users.noreply.github.com> Date: Wed, 11 Oct 2023 01:36:01 +0200 Subject: [PATCH 05/14] feat(auth): add data platform instance field resolver provider (#8828) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Sergio Gómez Villamor Co-authored-by: Adrián Pertíñez --- .../authorization/ResolvedResourceSpec.java | 17 ++ .../authorization/ResourceFieldType.java | 6 +- .../DefaultResourceSpecResolver.java | 9 +- ...PlatformInstanceFieldResolverProvider.java | 70 +++++++ ...formInstanceFieldResolverProviderTest.java | 188 ++++++++++++++++++ 5 files changed, 286 insertions(+), 4 deletions(-) create mode 100644 metadata-service/auth-impl/src/main/java/com/datahub/authorization/fieldresolverprovider/DataPlatformInstanceFieldResolverProvider.java create mode 100644 metadata-service/auth-impl/src/test/java/com/datahub/authorization/fieldresolverprovider/DataPlatformInstanceFieldResolverProviderTest.java diff --git a/metadata-auth/auth-api/src/main/java/com/datahub/authorization/ResolvedResourceSpec.java b/metadata-auth/auth-api/src/main/java/com/datahub/authorization/ResolvedResourceSpec.java index 53dd0be44f963..8e429a8ca1b94 100644 --- a/metadata-auth/auth-api/src/main/java/com/datahub/authorization/ResolvedResourceSpec.java +++ b/metadata-auth/auth-api/src/main/java/com/datahub/authorization/ResolvedResourceSpec.java @@ -3,6 +3,7 @@ import java.util.Collections; import java.util.Map; import java.util.Set; +import javax.annotation.Nullable; import lombok.Getter; import lombok.RequiredArgsConstructor; import lombok.ToString; @@ -35,4 +36,20 @@ public Set getOwners() { } return fieldResolvers.get(ResourceFieldType.OWNER).getFieldValuesFuture().join().getValues(); } + + /** + * Fetch the platform instance for a Resolved Resource Spec + * @return a Platform Instance or null if one does not exist. + */ + @Nullable + public String getDataPlatformInstance() { + if (!fieldResolvers.containsKey(ResourceFieldType.DATA_PLATFORM_INSTANCE)) { + return null; + } + Set dataPlatformInstance = fieldResolvers.get(ResourceFieldType.DATA_PLATFORM_INSTANCE).getFieldValuesFuture().join().getValues(); + if (dataPlatformInstance.size() > 0) { + return dataPlatformInstance.stream().findFirst().get(); + } + return null; + } } diff --git a/metadata-auth/auth-api/src/main/java/com/datahub/authorization/ResourceFieldType.java b/metadata-auth/auth-api/src/main/java/com/datahub/authorization/ResourceFieldType.java index ee54d2bfbba1d..478522dc7c331 100644 --- a/metadata-auth/auth-api/src/main/java/com/datahub/authorization/ResourceFieldType.java +++ b/metadata-auth/auth-api/src/main/java/com/datahub/authorization/ResourceFieldType.java @@ -19,5 +19,9 @@ public enum ResourceFieldType { /** * Domains of resource */ - DOMAIN + DOMAIN, + /** + * Data platform instance of resource + */ + DATA_PLATFORM_INSTANCE } diff --git a/metadata-service/auth-impl/src/main/java/com/datahub/authorization/DefaultResourceSpecResolver.java b/metadata-service/auth-impl/src/main/java/com/datahub/authorization/DefaultResourceSpecResolver.java index cd4e0b0967829..64c43dc8aa591 100644 --- a/metadata-service/auth-impl/src/main/java/com/datahub/authorization/DefaultResourceSpecResolver.java +++ b/metadata-service/auth-impl/src/main/java/com/datahub/authorization/DefaultResourceSpecResolver.java @@ -1,13 +1,15 @@ package com.datahub.authorization; -import com.datahub.authorization.fieldresolverprovider.EntityTypeFieldResolverProvider; -import com.datahub.authorization.fieldresolverprovider.OwnerFieldResolverProvider; import com.datahub.authentication.Authentication; +import com.datahub.authorization.fieldresolverprovider.DataPlatformInstanceFieldResolverProvider; import com.datahub.authorization.fieldresolverprovider.DomainFieldResolverProvider; +import com.datahub.authorization.fieldresolverprovider.EntityTypeFieldResolverProvider; import com.datahub.authorization.fieldresolverprovider.EntityUrnFieldResolverProvider; +import com.datahub.authorization.fieldresolverprovider.OwnerFieldResolverProvider; import com.datahub.authorization.fieldresolverprovider.ResourceFieldResolverProvider; import com.google.common.collect.ImmutableList; import com.linkedin.entity.client.EntityClient; + import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -20,7 +22,8 @@ public DefaultResourceSpecResolver(Authentication systemAuthentication, EntityCl _resourceFieldResolverProviders = ImmutableList.of(new EntityTypeFieldResolverProvider(), new EntityUrnFieldResolverProvider(), new DomainFieldResolverProvider(entityClient, systemAuthentication), - new OwnerFieldResolverProvider(entityClient, systemAuthentication)); + new OwnerFieldResolverProvider(entityClient, systemAuthentication), + new DataPlatformInstanceFieldResolverProvider(entityClient, systemAuthentication)); } @Override diff --git a/metadata-service/auth-impl/src/main/java/com/datahub/authorization/fieldresolverprovider/DataPlatformInstanceFieldResolverProvider.java b/metadata-service/auth-impl/src/main/java/com/datahub/authorization/fieldresolverprovider/DataPlatformInstanceFieldResolverProvider.java new file mode 100644 index 0000000000000..cd838625c2ca1 --- /dev/null +++ b/metadata-service/auth-impl/src/main/java/com/datahub/authorization/fieldresolverprovider/DataPlatformInstanceFieldResolverProvider.java @@ -0,0 +1,70 @@ +package com.datahub.authorization.fieldresolverprovider; + +import com.datahub.authentication.Authentication; +import com.datahub.authorization.FieldResolver; +import com.datahub.authorization.ResourceFieldType; +import com.datahub.authorization.ResourceSpec; +import com.linkedin.common.DataPlatformInstance; +import com.linkedin.common.urn.Urn; +import com.linkedin.common.urn.UrnUtils; +import com.linkedin.entity.EntityResponse; +import com.linkedin.entity.EnvelopedAspect; +import com.linkedin.entity.client.EntityClient; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; + +import java.util.Collections; +import java.util.Objects; + +import static com.linkedin.metadata.Constants.*; + +/** + * Provides field resolver for domain given resourceSpec + */ +@Slf4j +@RequiredArgsConstructor +public class DataPlatformInstanceFieldResolverProvider implements ResourceFieldResolverProvider { + + private final EntityClient _entityClient; + private final Authentication _systemAuthentication; + + @Override + public ResourceFieldType getFieldType() { + return ResourceFieldType.DATA_PLATFORM_INSTANCE; + } + + @Override + public FieldResolver getFieldResolver(ResourceSpec resourceSpec) { + return FieldResolver.getResolverFromFunction(resourceSpec, this::getDataPlatformInstance); + } + + private FieldResolver.FieldValue getDataPlatformInstance(ResourceSpec resourceSpec) { + Urn entityUrn = UrnUtils.getUrn(resourceSpec.getResource()); + // In the case that the entity is a platform instance, the associated platform instance entity is the instance itself + if (entityUrn.getEntityType().equals(DATA_PLATFORM_INSTANCE_ENTITY_NAME)) { + return FieldResolver.FieldValue.builder() + .values(Collections.singleton(entityUrn.toString())) + .build(); + } + + EnvelopedAspect dataPlatformInstanceAspect; + try { + EntityResponse response = _entityClient.getV2(entityUrn.getEntityType(), entityUrn, + Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME), _systemAuthentication); + if (response == null || !response.getAspects().containsKey(DATA_PLATFORM_INSTANCE_ASPECT_NAME)) { + return FieldResolver.emptyFieldValue(); + } + dataPlatformInstanceAspect = response.getAspects().get(DATA_PLATFORM_INSTANCE_ASPECT_NAME); + } catch (Exception e) { + log.error("Error while retrieving platform instance aspect for urn {}", entityUrn, e); + return FieldResolver.emptyFieldValue(); + } + DataPlatformInstance dataPlatformInstance = new DataPlatformInstance(dataPlatformInstanceAspect.getValue().data()); + if (dataPlatformInstance.getInstance() == null) { + return FieldResolver.emptyFieldValue(); + } + return FieldResolver.FieldValue.builder() + .values(Collections.singleton(Objects.requireNonNull(dataPlatformInstance.getInstance()).toString())) + .build(); + } +} \ No newline at end of file diff --git a/metadata-service/auth-impl/src/test/java/com/datahub/authorization/fieldresolverprovider/DataPlatformInstanceFieldResolverProviderTest.java b/metadata-service/auth-impl/src/test/java/com/datahub/authorization/fieldresolverprovider/DataPlatformInstanceFieldResolverProviderTest.java new file mode 100644 index 0000000000000..e525c602c2620 --- /dev/null +++ b/metadata-service/auth-impl/src/test/java/com/datahub/authorization/fieldresolverprovider/DataPlatformInstanceFieldResolverProviderTest.java @@ -0,0 +1,188 @@ +package com.datahub.authorization.fieldresolverprovider; + +import com.datahub.authentication.Authentication; +import com.datahub.authorization.ResourceFieldType; +import com.datahub.authorization.ResourceSpec; +import com.linkedin.common.DataPlatformInstance; +import com.linkedin.common.urn.Urn; +import com.linkedin.entity.Aspect; +import com.linkedin.entity.EntityResponse; +import com.linkedin.entity.EnvelopedAspect; +import com.linkedin.entity.EnvelopedAspectMap; +import com.linkedin.entity.client.EntityClient; +import com.linkedin.r2.RemoteInvocationException; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; +import org.testng.annotations.BeforeMethod; +import org.testng.annotations.Test; + +import java.net.URISyntaxException; +import java.util.Collections; +import java.util.Set; + +import static com.linkedin.metadata.Constants.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.*; +import static org.testng.Assert.assertEquals; +import static org.testng.Assert.assertTrue; + +public class DataPlatformInstanceFieldResolverProviderTest { + + private static final String DATA_PLATFORM_INSTANCE_URN = + "urn:li:dataPlatformInstance:(urn:li:dataPlatform:s3,test-platform-instance)"; + private static final String RESOURCE_URN = + "urn:li:dataset:(urn:li:dataPlatform:s3,test-platform-instance.testDataset,PROD)"; + private static final ResourceSpec RESOURCE_SPEC = new ResourceSpec(DATASET_ENTITY_NAME, RESOURCE_URN); + + @Mock + private EntityClient entityClientMock; + @Mock + private Authentication systemAuthenticationMock; + + private DataPlatformInstanceFieldResolverProvider dataPlatformInstanceFieldResolverProvider; + + @BeforeMethod + public void setup() { + MockitoAnnotations.initMocks(this); + dataPlatformInstanceFieldResolverProvider = + new DataPlatformInstanceFieldResolverProvider(entityClientMock, systemAuthenticationMock); + } + + @Test + public void shouldReturnDataPlatformInstanceType() { + assertEquals(ResourceFieldType.DATA_PLATFORM_INSTANCE, dataPlatformInstanceFieldResolverProvider.getFieldType()); + } + + @Test + public void shouldReturnFieldValueWithResourceSpecIfTypeIsDataPlatformInstance() { + var resourceSpec = new ResourceSpec(DATA_PLATFORM_INSTANCE_ENTITY_NAME, DATA_PLATFORM_INSTANCE_URN); + + var result = dataPlatformInstanceFieldResolverProvider.getFieldResolver(resourceSpec); + + assertEquals(Set.of(DATA_PLATFORM_INSTANCE_URN), result.getFieldValuesFuture().join().getValues()); + verifyZeroInteractions(entityClientMock); + } + + @Test + public void shouldReturnEmptyFieldValueWhenResponseIsNull() throws RemoteInvocationException, URISyntaxException { + when(entityClientMock.getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + )).thenReturn(null); + + var result = dataPlatformInstanceFieldResolverProvider.getFieldResolver(RESOURCE_SPEC); + + assertTrue(result.getFieldValuesFuture().join().getValues().isEmpty()); + verify(entityClientMock, times(1)).getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + ); + } + + @Test + public void shouldReturnEmptyFieldValueWhenResourceHasNoDataPlatformInstance() + throws RemoteInvocationException, URISyntaxException { + var entityResponseMock = mock(EntityResponse.class); + when(entityResponseMock.getAspects()).thenReturn(new EnvelopedAspectMap()); + when(entityClientMock.getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + )).thenReturn(entityResponseMock); + + var result = dataPlatformInstanceFieldResolverProvider.getFieldResolver(RESOURCE_SPEC); + + assertTrue(result.getFieldValuesFuture().join().getValues().isEmpty()); + verify(entityClientMock, times(1)).getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + ); + } + + @Test + public void shouldReturnEmptyFieldValueWhenThereIsAnException() throws RemoteInvocationException, URISyntaxException { + when(entityClientMock.getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + )).thenThrow(new RemoteInvocationException()); + + var result = dataPlatformInstanceFieldResolverProvider.getFieldResolver(RESOURCE_SPEC); + + assertTrue(result.getFieldValuesFuture().join().getValues().isEmpty()); + verify(entityClientMock, times(1)).getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + ); + } + + @Test + public void shouldReturnEmptyFieldValueWhenDataPlatformInstanceHasNoInstance() + throws RemoteInvocationException, URISyntaxException { + + var dataPlatform = new DataPlatformInstance() + .setPlatform(Urn.createFromString("urn:li:dataPlatform:s3")); + var entityResponseMock = mock(EntityResponse.class); + var envelopedAspectMap = new EnvelopedAspectMap(); + envelopedAspectMap.put(DATA_PLATFORM_INSTANCE_ASPECT_NAME, + new EnvelopedAspect().setValue(new Aspect(dataPlatform.data()))); + when(entityResponseMock.getAspects()).thenReturn(envelopedAspectMap); + when(entityClientMock.getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + )).thenReturn(entityResponseMock); + + var result = dataPlatformInstanceFieldResolverProvider.getFieldResolver(RESOURCE_SPEC); + + assertTrue(result.getFieldValuesFuture().join().getValues().isEmpty()); + verify(entityClientMock, times(1)).getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + ); + } + + @Test + public void shouldReturnFieldValueWithDataPlatformInstanceOfTheResource() + throws RemoteInvocationException, URISyntaxException { + + var dataPlatformInstance = new DataPlatformInstance() + .setPlatform(Urn.createFromString("urn:li:dataPlatform:s3")) + .setInstance(Urn.createFromString(DATA_PLATFORM_INSTANCE_URN)); + var entityResponseMock = mock(EntityResponse.class); + var envelopedAspectMap = new EnvelopedAspectMap(); + envelopedAspectMap.put(DATA_PLATFORM_INSTANCE_ASPECT_NAME, + new EnvelopedAspect().setValue(new Aspect(dataPlatformInstance.data()))); + when(entityResponseMock.getAspects()).thenReturn(envelopedAspectMap); + when(entityClientMock.getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + )).thenReturn(entityResponseMock); + + var result = dataPlatformInstanceFieldResolverProvider.getFieldResolver(RESOURCE_SPEC); + + assertEquals(Set.of(DATA_PLATFORM_INSTANCE_URN), result.getFieldValuesFuture().join().getValues()); + verify(entityClientMock, times(1)).getV2( + eq(DATASET_ENTITY_NAME), + any(Urn.class), + eq(Collections.singleton(DATA_PLATFORM_INSTANCE_ASPECT_NAME)), + eq(systemAuthenticationMock) + ); + } +} From a17db676e37d90ec47f16a43ab95e0d562952939 Mon Sep 17 00:00:00 2001 From: siladitya <68184387+siladitya2@users.noreply.github.com> Date: Wed, 11 Oct 2023 02:43:36 +0200 Subject: [PATCH 06/14] feat(graphql): Added datafetcher for DataPlatformInstance entity (#8935) Co-authored-by: si-chakraborty Co-authored-by: John Joyce --- .../datahub/graphql/GmsGraphQLEngine.java | 1 + .../DataPlatformInstanceType.java | 34 ++++++++++++++++++- .../src/main/resources/entity.graphql | 5 +++ 3 files changed, 39 insertions(+), 1 deletion(-) diff --git a/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java b/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java index 3ba0cc1f747e3..ebb5c7d62c7d3 100644 --- a/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java +++ b/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java @@ -821,6 +821,7 @@ private void configureQueryResolvers(final RuntimeWiring.Builder builder) { .dataFetcher("glossaryNode", getResolver(glossaryNodeType)) .dataFetcher("domain", getResolver((domainType))) .dataFetcher("dataPlatform", getResolver(dataPlatformType)) + .dataFetcher("dataPlatformInstance", getResolver(dataPlatformInstanceType)) .dataFetcher("mlFeatureTable", getResolver(mlFeatureTableType)) .dataFetcher("mlFeature", getResolver(mlFeatureType)) .dataFetcher("mlPrimaryKey", getResolver(mlPrimaryKeyType)) diff --git a/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/types/dataplatforminstance/DataPlatformInstanceType.java b/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/types/dataplatforminstance/DataPlatformInstanceType.java index 2423fc31ea52e..87614e1332528 100644 --- a/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/types/dataplatforminstance/DataPlatformInstanceType.java +++ b/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/types/dataplatforminstance/DataPlatformInstanceType.java @@ -4,16 +4,25 @@ import com.linkedin.common.urn.Urn; import com.linkedin.common.urn.UrnUtils; import com.linkedin.datahub.graphql.QueryContext; +import com.linkedin.datahub.graphql.generated.AutoCompleteResults; import com.linkedin.datahub.graphql.generated.DataPlatformInstance; import com.linkedin.datahub.graphql.generated.Entity; import com.linkedin.datahub.graphql.generated.EntityType; +import com.linkedin.datahub.graphql.generated.FacetFilterInput; +import com.linkedin.datahub.graphql.generated.SearchResults; import com.linkedin.datahub.graphql.types.dataplatforminstance.mappers.DataPlatformInstanceMapper; +import com.linkedin.datahub.graphql.types.mappers.AutoCompleteResultsMapper; +import com.linkedin.datahub.graphql.types.SearchableEntityType; import com.linkedin.entity.EntityResponse; import com.linkedin.entity.client.EntityClient; import com.linkedin.metadata.Constants; +import com.linkedin.metadata.query.AutoCompleteResult; +import com.linkedin.metadata.query.filter.Filter; import graphql.execution.DataFetcherResult; +import org.apache.commons.lang3.NotImplementedException; import javax.annotation.Nonnull; +import javax.annotation.Nullable; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -22,7 +31,10 @@ import java.util.function.Function; import java.util.stream.Collectors; -public class DataPlatformInstanceType implements com.linkedin.datahub.graphql.types.EntityType { +import static com.linkedin.metadata.Constants.DATA_PLATFORM_INSTANCE_ENTITY_NAME; + +public class DataPlatformInstanceType implements SearchableEntityType, + com.linkedin.datahub.graphql.types.EntityType { static final Set ASPECTS_TO_FETCH = ImmutableSet.of( Constants.DATA_PLATFORM_INSTANCE_KEY_ASPECT_NAME, @@ -84,4 +96,24 @@ public List> batchLoad(@Nonnull List filters, + int start, + int count, + @Nonnull final QueryContext context) throws Exception { + throw new NotImplementedException("Searchable type (deprecated) not implemented on DataPlatformInstance entity type"); + } + + @Override + public AutoCompleteResults autoComplete(@Nonnull String query, + @Nullable String field, + @Nullable Filter filters, + int limit, + @Nonnull final QueryContext context) throws Exception { + final AutoCompleteResult result = _entityClient.autoComplete(DATA_PLATFORM_INSTANCE_ENTITY_NAME, query, + filters, limit, context.getAuthentication()); + return AutoCompleteResultsMapper.map(result); + } + } diff --git a/datahub-graphql-core/src/main/resources/entity.graphql b/datahub-graphql-core/src/main/resources/entity.graphql index 39f86948c77c4..0b15d7b875a9c 100644 --- a/datahub-graphql-core/src/main/resources/entity.graphql +++ b/datahub-graphql-core/src/main/resources/entity.graphql @@ -226,6 +226,11 @@ type Query { listOwnershipTypes( "Input required for listing custom ownership types" input: ListOwnershipTypesInput!): ListOwnershipTypesResult! + + """ + Fetch a Data Platform Instance by primary key (urn) + """ + dataPlatformInstance(urn: String!): DataPlatformInstance } """ From dfcea2441e75e1eef517c0f9a4765e6e7990f297 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20G=C3=B3mez=20Villamor?= Date: Wed, 11 Oct 2023 03:04:44 +0200 Subject: [PATCH 07/14] feat(config): configurable bootstrap policies file (#8812) Co-authored-by: John Joyce --- .../configuration/src/main/resources/application.yml | 4 ++++ .../boot/factories/BootstrapManagerFactory.java | 7 ++++++- .../metadata/boot/steps/IngestPoliciesStep.java | 10 +++++++--- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/metadata-service/configuration/src/main/resources/application.yml b/metadata-service/configuration/src/main/resources/application.yml index 4dfd96ac75c6c..d22f92adca8f9 100644 --- a/metadata-service/configuration/src/main/resources/application.yml +++ b/metadata-service/configuration/src/main/resources/application.yml @@ -276,6 +276,10 @@ bootstrap: enabled: ${UPGRADE_DEFAULT_BROWSE_PATHS_ENABLED:false} # enable to run the upgrade to migrate legacy default browse paths to new ones backfillBrowsePathsV2: enabled: ${BACKFILL_BROWSE_PATHS_V2:false} # Enables running the backfill of browsePathsV2 upgrade step. There are concerns about the load of this step so hiding it behind a flag. Deprecating in favor of running through SystemUpdate + policies: + file: ${BOOTSTRAP_POLICIES_FILE:classpath:boot/policies.json} + # eg for local file + # file: "file:///datahub/datahub-gms/resources/custom-policies.json" servlets: waitTimeout: ${BOOTSTRAP_SERVLETS_WAITTIMEOUT:60} # Total waiting time in seconds for servlets to initialize diff --git a/metadata-service/factories/src/main/java/com/linkedin/metadata/boot/factories/BootstrapManagerFactory.java b/metadata-service/factories/src/main/java/com/linkedin/metadata/boot/factories/BootstrapManagerFactory.java index c490f00021201..3a761bd12647e 100644 --- a/metadata-service/factories/src/main/java/com/linkedin/metadata/boot/factories/BootstrapManagerFactory.java +++ b/metadata-service/factories/src/main/java/com/linkedin/metadata/boot/factories/BootstrapManagerFactory.java @@ -31,6 +31,7 @@ import com.linkedin.metadata.search.EntitySearchService; import com.linkedin.metadata.search.SearchService; import com.linkedin.metadata.search.transformer.SearchDocumentTransformer; + import java.util.ArrayList; import java.util.List; import javax.annotation.Nonnull; @@ -41,6 +42,7 @@ import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Import; import org.springframework.context.annotation.Scope; +import org.springframework.core.io.Resource; @Configuration @@ -89,13 +91,16 @@ public class BootstrapManagerFactory { @Value("${bootstrap.backfillBrowsePathsV2.enabled}") private Boolean _backfillBrowsePathsV2Enabled; + @Value("${bootstrap.policies.file}") + private Resource _policiesResource; + @Bean(name = "bootstrapManager") @Scope("singleton") @Nonnull protected BootstrapManager createInstance() { final IngestRootUserStep ingestRootUserStep = new IngestRootUserStep(_entityService); final IngestPoliciesStep ingestPoliciesStep = - new IngestPoliciesStep(_entityRegistry, _entityService, _entitySearchService, _searchDocumentTransformer); + new IngestPoliciesStep(_entityRegistry, _entityService, _entitySearchService, _searchDocumentTransformer, _policiesResource); final IngestRolesStep ingestRolesStep = new IngestRolesStep(_entityService, _entityRegistry); final IngestDataPlatformsStep ingestDataPlatformsStep = new IngestDataPlatformsStep(_entityService); final IngestDataPlatformInstancesStep ingestDataPlatformInstancesStep = diff --git a/metadata-service/factories/src/main/java/com/linkedin/metadata/boot/steps/IngestPoliciesStep.java b/metadata-service/factories/src/main/java/com/linkedin/metadata/boot/steps/IngestPoliciesStep.java index 87dcfd736da40..cf29645214466 100644 --- a/metadata-service/factories/src/main/java/com/linkedin/metadata/boot/steps/IngestPoliciesStep.java +++ b/metadata-service/factories/src/main/java/com/linkedin/metadata/boot/steps/IngestPoliciesStep.java @@ -25,6 +25,7 @@ import com.linkedin.mxe.GenericAspect; import com.linkedin.mxe.MetadataChangeProposal; import com.linkedin.policy.DataHubPolicyInfo; + import java.io.IOException; import java.net.URISyntaxException; import java.util.Collections; @@ -35,7 +36,8 @@ import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import org.springframework.core.io.ClassPathResource; +import org.springframework.core.io.Resource; + import static com.linkedin.metadata.Constants.*; @@ -52,6 +54,8 @@ public class IngestPoliciesStep implements BootstrapStep { private final EntitySearchService _entitySearchService; private final SearchDocumentTransformer _searchDocumentTransformer; + private final Resource _policiesResource; + @Override public String name() { return "IngestPoliciesStep"; @@ -66,10 +70,10 @@ public void execute() throws IOException, URISyntaxException { .maxStringLength(maxSize).build()); // 0. Execute preflight check to see whether we need to ingest policies - log.info("Ingesting default access policies..."); + log.info("Ingesting default access policies from: {}...", _policiesResource); // 1. Read from the file into JSON. - final JsonNode policiesObj = mapper.readTree(new ClassPathResource("./boot/policies.json").getFile()); + final JsonNode policiesObj = mapper.readTree(_policiesResource.getFile()); if (!policiesObj.isArray()) { throw new RuntimeException( From 10a190470e8c932b6d34cba49de7dbcba687a088 Mon Sep 17 00:00:00 2001 From: siddiquebagwan-gslab Date: Wed, 11 Oct 2023 08:54:08 +0530 Subject: [PATCH 08/14] feat(ingestion/redshift): CLL support in redshift (#8921) --- .../ingestion/source/redshift/config.py | 4 + .../ingestion/source/redshift/lineage.py | 215 +++++++++++++----- .../ingestion/source/redshift/redshift.py | 1 + .../tests/unit/test_redshift_lineage.py | 95 ++++++-- 4 files changed, 234 insertions(+), 81 deletions(-) diff --git a/metadata-ingestion/src/datahub/ingestion/source/redshift/config.py b/metadata-ingestion/src/datahub/ingestion/source/redshift/config.py index 804a14b0fe1cf..2789b800940db 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/redshift/config.py +++ b/metadata-ingestion/src/datahub/ingestion/source/redshift/config.py @@ -132,6 +132,10 @@ class RedshiftConfig( description="Whether `schema_pattern` is matched against fully qualified schema name `.`.", ) + extract_column_level_lineage: bool = Field( + default=True, description="Whether to extract column level lineage." + ) + @root_validator(pre=True) def check_email_is_set_on_usage(cls, values): if values.get("include_usage_statistics"): diff --git a/metadata-ingestion/src/datahub/ingestion/source/redshift/lineage.py b/metadata-ingestion/src/datahub/ingestion/source/redshift/lineage.py index bbe52b5d98ba3..c9ddfbe92ab2a 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/redshift/lineage.py +++ b/metadata-ingestion/src/datahub/ingestion/source/redshift/lineage.py @@ -9,10 +9,12 @@ import humanfriendly import redshift_connector -from sqllineage.runner import LineageRunner +import datahub.emitter.mce_builder as builder +import datahub.utilities.sqlglot_lineage as sqlglot_l from datahub.emitter import mce_builder from datahub.emitter.mce_builder import make_dataset_urn_with_platform_instance +from datahub.ingestion.api.common import PipelineContext from datahub.ingestion.source.aws.s3_util import strip_s3_prefix from datahub.ingestion.source.redshift.common import get_db_name from datahub.ingestion.source.redshift.config import LineageMode, RedshiftConfig @@ -28,13 +30,19 @@ from datahub.ingestion.source.state.redundant_run_skip_handler import ( RedundantLineageRunSkipHandler, ) -from datahub.metadata.com.linkedin.pegasus2avro.dataset import UpstreamLineage +from datahub.metadata.com.linkedin.pegasus2avro.dataset import ( + FineGrainedLineage, + FineGrainedLineageDownstreamType, + FineGrainedLineageUpstreamType, + UpstreamLineage, +) from datahub.metadata.schema_classes import ( DatasetLineageTypeClass, UpstreamClass, UpstreamLineageClass, ) from datahub.utilities import memory_footprint +from datahub.utilities.urns import dataset_urn logger: logging.Logger = logging.getLogger(__name__) @@ -56,13 +64,14 @@ class LineageCollectorType(Enum): @dataclass(frozen=True, eq=True) class LineageDataset: platform: LineageDatasetPlatform - path: str + urn: str @dataclass() class LineageItem: dataset: LineageDataset upstreams: Set[LineageDataset] + cll: Optional[List[sqlglot_l.ColumnLineageInfo]] collector_type: LineageCollectorType dataset_lineage_type: str = field(init=False) @@ -83,10 +92,12 @@ def __init__( self, config: RedshiftConfig, report: RedshiftReport, + context: PipelineContext, redundant_run_skip_handler: Optional[RedundantLineageRunSkipHandler] = None, ): self.config = config self.report = report + self.context = context self._lineage_map: Dict[str, LineageItem] = defaultdict() self.redundant_run_skip_handler = redundant_run_skip_handler @@ -121,33 +132,37 @@ def _get_s3_path(self, path: str) -> str: return path - def _get_sources_from_query(self, db_name: str, query: str) -> List[LineageDataset]: + def _get_sources_from_query( + self, db_name: str, query: str + ) -> Tuple[List[LineageDataset], Optional[List[sqlglot_l.ColumnLineageInfo]]]: sources: List[LineageDataset] = list() - parser = LineageRunner(query) + parsed_result: Optional[ + sqlglot_l.SqlParsingResult + ] = sqlglot_l.create_lineage_sql_parsed_result( + query=query, + platform=LineageDatasetPlatform.REDSHIFT.value, + platform_instance=self.config.platform_instance, + database=db_name, + schema=str(self.config.default_schema), + graph=self.context.graph, + env=self.config.env, + ) - for table in parser.source_tables: - split = str(table).split(".") - if len(split) == 3: - db_name, source_schema, source_table = split - elif len(split) == 2: - source_schema, source_table = split - else: - raise ValueError( - f"Invalid table name {table} in query {query}. " - f"Expected format: [db_name].[schema].[table] or [schema].[table] or [table]." - ) + if parsed_result is None: + logger.debug(f"native query parsing failed for {query}") + return sources, None - if source_schema == "": - source_schema = str(self.config.default_schema) + logger.debug(f"parsed_result = {parsed_result}") + for table_urn in parsed_result.in_tables: source = LineageDataset( platform=LineageDatasetPlatform.REDSHIFT, - path=f"{db_name}.{source_schema}.{source_table}", + urn=table_urn, ) sources.append(source) - return sources + return sources, parsed_result.column_lineage def _build_s3_path_from_row(self, filename: str) -> str: path = filename.strip() @@ -165,9 +180,11 @@ def _get_sources( source_table: Optional[str], ddl: Optional[str], filename: Optional[str], - ) -> List[LineageDataset]: + ) -> Tuple[List[LineageDataset], Optional[List[sqlglot_l.ColumnLineageInfo]]]: sources: List[LineageDataset] = list() # Source + cll: Optional[List[sqlglot_l.ColumnLineageInfo]] = None + if ( lineage_type in { @@ -177,7 +194,7 @@ def _get_sources( and ddl is not None ): try: - sources = self._get_sources_from_query(db_name=db_name, query=ddl) + sources, cll = self._get_sources_from_query(db_name=db_name, query=ddl) except Exception as e: logger.warning( f"Error parsing query {ddl} for getting lineage. Error was {e}." @@ -192,22 +209,38 @@ def _get_sources( "Only s3 source supported with copy. The source was: {path}." ) self.report.num_lineage_dropped_not_support_copy_path += 1 - return sources + return sources, cll path = strip_s3_prefix(self._get_s3_path(path)) + urn = make_dataset_urn_with_platform_instance( + platform=platform.value, + name=path, + env=self.config.env, + platform_instance=self.config.platform_instance_map.get( + platform.value + ) + if self.config.platform_instance_map is not None + else None, + ) elif source_schema is not None and source_table is not None: platform = LineageDatasetPlatform.REDSHIFT path = f"{db_name}.{source_schema}.{source_table}" + urn = make_dataset_urn_with_platform_instance( + platform=platform.value, + platform_instance=self.config.platform_instance, + name=path, + env=self.config.env, + ) else: - return [] + return [], cll sources = [ LineageDataset( platform=platform, - path=path, + urn=urn, ) ] - return sources + return sources, cll def _populate_lineage_map( self, @@ -231,6 +264,7 @@ def _populate_lineage_map( :rtype: None """ try: + cll: Optional[List[sqlglot_l.ColumnLineageInfo]] = None raw_db_name = database alias_db_name = get_db_name(self.config) @@ -243,7 +277,7 @@ def _populate_lineage_map( if not target: continue - sources = self._get_sources( + sources, cll = self._get_sources( lineage_type, alias_db_name, source_schema=lineage_row.source_schema, @@ -251,6 +285,7 @@ def _populate_lineage_map( ddl=lineage_row.ddl, filename=lineage_row.filename, ) + target.cll = cll target.upstreams.update( self._get_upstream_lineages( @@ -262,20 +297,16 @@ def _populate_lineage_map( ) # Merging downstreams if dataset already exists and has downstreams - if target.dataset.path in self._lineage_map: - self._lineage_map[ - target.dataset.path - ].upstreams = self._lineage_map[ - target.dataset.path - ].upstreams.union( - target.upstreams - ) + if target.dataset.urn in self._lineage_map: + self._lineage_map[target.dataset.urn].upstreams = self._lineage_map[ + target.dataset.urn + ].upstreams.union(target.upstreams) else: - self._lineage_map[target.dataset.path] = target + self._lineage_map[target.dataset.urn] = target logger.debug( - f"Lineage[{target}]:{self._lineage_map[target.dataset.path]}" + f"Lineage[{target}]:{self._lineage_map[target.dataset.urn]}" ) except Exception as e: self.warn( @@ -308,17 +339,34 @@ def _get_target_lineage( target_platform = LineageDatasetPlatform.S3 # Following call requires 'filename' key in lineage_row target_path = self._build_s3_path_from_row(lineage_row.filename) + urn = make_dataset_urn_with_platform_instance( + platform=target_platform.value, + name=target_path, + env=self.config.env, + platform_instance=self.config.platform_instance_map.get( + target_platform.value + ) + if self.config.platform_instance_map is not None + else None, + ) except ValueError as e: self.warn(logger, "non-s3-lineage", str(e)) return None else: target_platform = LineageDatasetPlatform.REDSHIFT target_path = f"{alias_db_name}.{lineage_row.target_schema}.{lineage_row.target_table}" + urn = make_dataset_urn_with_platform_instance( + platform=target_platform.value, + platform_instance=self.config.platform_instance, + name=target_path, + env=self.config.env, + ) return LineageItem( - dataset=LineageDataset(platform=target_platform, path=target_path), + dataset=LineageDataset(platform=target_platform, urn=urn), upstreams=set(), collector_type=lineage_type, + cll=None, ) def _get_upstream_lineages( @@ -331,11 +379,22 @@ def _get_upstream_lineages( targe_source = [] for source in sources: if source.platform == LineageDatasetPlatform.REDSHIFT: - db, schema, table = source.path.split(".") + qualified_table_name = dataset_urn.DatasetUrn.create_from_string( + source.urn + ).get_entity_id()[1] + db, schema, table = qualified_table_name.split(".") if db == raw_db_name: db = alias_db_name path = f"{db}.{schema}.{table}" - source = LineageDataset(platform=source.platform, path=path) + source = LineageDataset( + platform=source.platform, + urn=make_dataset_urn_with_platform_instance( + platform=LineageDatasetPlatform.REDSHIFT.value, + platform_instance=self.config.platform_instance, + name=path, + env=self.config.env, + ), + ) # Filtering out tables which does not exist in Redshift # It was deleted in the meantime or query parser did not capture well the table name @@ -345,7 +404,7 @@ def _get_upstream_lineages( or not any(table == t.name for t in all_tables[db][schema]) ): logger.debug( - f"{source.path} missing table, dropping from lineage.", + f"{source.urn} missing table, dropping from lineage.", ) self.report.num_lineage_tables_dropped += 1 continue @@ -433,36 +492,73 @@ def populate_lineage( memory_footprint.total_size(self._lineage_map) ) + def make_fine_grained_lineage_class( + self, lineage_item: LineageItem, dataset_urn: str + ) -> List[FineGrainedLineage]: + fine_grained_lineages: List[FineGrainedLineage] = [] + + if ( + self.config.extract_column_level_lineage is False + or lineage_item.cll is None + ): + logger.debug("CLL extraction is disabled") + return fine_grained_lineages + + logger.debug("Extracting column level lineage") + + cll: List[sqlglot_l.ColumnLineageInfo] = lineage_item.cll + + for cll_info in cll: + downstream = ( + [builder.make_schema_field_urn(dataset_urn, cll_info.downstream.column)] + if cll_info.downstream is not None + and cll_info.downstream.column is not None + else [] + ) + + upstreams = [ + builder.make_schema_field_urn(column_ref.table, column_ref.column) + for column_ref in cll_info.upstreams + ] + + fine_grained_lineages.append( + FineGrainedLineage( + downstreamType=FineGrainedLineageDownstreamType.FIELD, + downstreams=downstream, + upstreamType=FineGrainedLineageUpstreamType.FIELD_SET, + upstreams=upstreams, + ) + ) + + logger.debug(f"Created fine_grained_lineage for {dataset_urn}") + + return fine_grained_lineages + def get_lineage( self, table: Union[RedshiftTable, RedshiftView], dataset_urn: str, schema: RedshiftSchema, ) -> Optional[Tuple[UpstreamLineageClass, Dict[str, str]]]: - dataset_key = mce_builder.dataset_urn_to_key(dataset_urn) - if dataset_key is None: - return None upstream_lineage: List[UpstreamClass] = [] - if dataset_key.name in self._lineage_map: - item = self._lineage_map[dataset_key.name] + cll_lineage: List[FineGrainedLineage] = [] + + if dataset_urn in self._lineage_map: + item = self._lineage_map[dataset_urn] for upstream in item.upstreams: upstream_table = UpstreamClass( - dataset=make_dataset_urn_with_platform_instance( - upstream.platform.value, - upstream.path, - platform_instance=self.config.platform_instance_map.get( - upstream.platform.value - ) - if self.config.platform_instance_map - else None, - env=self.config.env, - ), + dataset=upstream.urn, type=item.dataset_lineage_type, ) upstream_lineage.append(upstream_table) + cll_lineage = self.make_fine_grained_lineage_class( + lineage_item=item, + dataset_urn=dataset_urn, + ) + tablename = table.name if table.type == "EXTERNAL_TABLE": # external_db_params = schema.option @@ -489,7 +585,12 @@ def get_lineage( else: return None - return UpstreamLineage(upstreams=upstream_lineage), {} + return ( + UpstreamLineage( + upstreams=upstream_lineage, fineGrainedLineages=cll_lineage or None + ), + {}, + ) def report_status(self, step: str, status: bool) -> None: if self.redundant_run_skip_handler: diff --git a/metadata-ingestion/src/datahub/ingestion/source/redshift/redshift.py b/metadata-ingestion/src/datahub/ingestion/source/redshift/redshift.py index e8a8ff976afa6..a1b6333a3775d 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/redshift/redshift.py +++ b/metadata-ingestion/src/datahub/ingestion/source/redshift/redshift.py @@ -881,6 +881,7 @@ def extract_lineage( self.lineage_extractor = RedshiftLineageExtractor( config=self.config, report=self.report, + context=self.ctx, redundant_run_skip_handler=self.redundant_lineage_run_skip_handler, ) diff --git a/metadata-ingestion/tests/unit/test_redshift_lineage.py b/metadata-ingestion/tests/unit/test_redshift_lineage.py index c7d6ac18e044c..db5af3a71efb9 100644 --- a/metadata-ingestion/tests/unit/test_redshift_lineage.py +++ b/metadata-ingestion/tests/unit/test_redshift_lineage.py @@ -1,6 +1,8 @@ +from datahub.ingestion.api.common import PipelineContext from datahub.ingestion.source.redshift.config import RedshiftConfig from datahub.ingestion.source.redshift.lineage import RedshiftLineageExtractor from datahub.ingestion.source.redshift.report import RedshiftReport +from datahub.utilities.sqlglot_lineage import ColumnLineageInfo, DownstreamColumnRef def test_get_sources_from_query(): @@ -10,14 +12,20 @@ def test_get_sources_from_query(): test_query = """ select * from my_schema.my_table """ - lineage_extractor = RedshiftLineageExtractor(config, report) - lineage_datasets = lineage_extractor._get_sources_from_query( + lineage_extractor = RedshiftLineageExtractor( + config, report, PipelineContext(run_id="foo") + ) + lineage_datasets, _ = lineage_extractor._get_sources_from_query( db_name="test", query=test_query ) assert len(lineage_datasets) == 1 lineage = lineage_datasets[0] - assert lineage.path == "test.my_schema.my_table" + + assert ( + lineage.urn + == "urn:li:dataset:(urn:li:dataPlatform:redshift,test.my_schema.my_table,PROD)" + ) def test_get_sources_from_query_with_only_table_name(): @@ -27,14 +35,20 @@ def test_get_sources_from_query_with_only_table_name(): test_query = """ select * from my_table """ - lineage_extractor = RedshiftLineageExtractor(config, report) - lineage_datasets = lineage_extractor._get_sources_from_query( + lineage_extractor = RedshiftLineageExtractor( + config, report, PipelineContext(run_id="foo") + ) + lineage_datasets, _ = lineage_extractor._get_sources_from_query( db_name="test", query=test_query ) assert len(lineage_datasets) == 1 lineage = lineage_datasets[0] - assert lineage.path == "test.public.my_table" + + assert ( + lineage.urn + == "urn:li:dataset:(urn:li:dataPlatform:redshift,test.public.my_table,PROD)" + ) def test_get_sources_from_query_with_database(): @@ -44,14 +58,20 @@ def test_get_sources_from_query_with_database(): test_query = """ select * from test.my_schema.my_table """ - lineage_extractor = RedshiftLineageExtractor(config, report) - lineage_datasets = lineage_extractor._get_sources_from_query( + lineage_extractor = RedshiftLineageExtractor( + config, report, PipelineContext(run_id="foo") + ) + lineage_datasets, _ = lineage_extractor._get_sources_from_query( db_name="test", query=test_query ) assert len(lineage_datasets) == 1 lineage = lineage_datasets[0] - assert lineage.path == "test.my_schema.my_table" + + assert ( + lineage.urn + == "urn:li:dataset:(urn:li:dataPlatform:redshift,test.my_schema.my_table,PROD)" + ) def test_get_sources_from_query_with_non_default_database(): @@ -61,14 +81,20 @@ def test_get_sources_from_query_with_non_default_database(): test_query = """ select * from test2.my_schema.my_table """ - lineage_extractor = RedshiftLineageExtractor(config, report) - lineage_datasets = lineage_extractor._get_sources_from_query( + lineage_extractor = RedshiftLineageExtractor( + config, report, PipelineContext(run_id="foo") + ) + lineage_datasets, _ = lineage_extractor._get_sources_from_query( db_name="test", query=test_query ) assert len(lineage_datasets) == 1 lineage = lineage_datasets[0] - assert lineage.path == "test2.my_schema.my_table" + + assert ( + lineage.urn + == "urn:li:dataset:(urn:li:dataPlatform:redshift,test2.my_schema.my_table,PROD)" + ) def test_get_sources_from_query_with_only_table(): @@ -78,27 +104,48 @@ def test_get_sources_from_query_with_only_table(): test_query = """ select * from my_table """ - lineage_extractor = RedshiftLineageExtractor(config, report) - lineage_datasets = lineage_extractor._get_sources_from_query( + lineage_extractor = RedshiftLineageExtractor( + config, report, PipelineContext(run_id="foo") + ) + lineage_datasets, _ = lineage_extractor._get_sources_from_query( db_name="test", query=test_query ) assert len(lineage_datasets) == 1 lineage = lineage_datasets[0] - assert lineage.path == "test.public.my_table" + + assert ( + lineage.urn + == "urn:li:dataset:(urn:li:dataPlatform:redshift,test.public.my_table,PROD)" + ) -def test_get_sources_from_query_with_four_part_table_should_throw_exception(): +def test_cll(): config = RedshiftConfig(host_port="localhost:5439", database="test") report = RedshiftReport() test_query = """ - select * from database.schema.my_table.test + select a,b,c from db.public.customer inner join db.public.order on db.public.customer.id = db.public.order.customer_id """ - lineage_extractor = RedshiftLineageExtractor(config, report) - try: - lineage_extractor._get_sources_from_query(db_name="test", query=test_query) - except ValueError: - pass - - assert f"{test_query} should have thrown a ValueError exception but it didn't" + lineage_extractor = RedshiftLineageExtractor( + config, report, PipelineContext(run_id="foo") + ) + _, cll = lineage_extractor._get_sources_from_query(db_name="db", query=test_query) + + assert cll == [ + ColumnLineageInfo( + downstream=DownstreamColumnRef(table=None, column="a"), + upstreams=[], + logic=None, + ), + ColumnLineageInfo( + downstream=DownstreamColumnRef(table=None, column="b"), + upstreams=[], + logic=None, + ), + ColumnLineageInfo( + downstream=DownstreamColumnRef(table=None, column="c"), + upstreams=[], + logic=None, + ), + ] From 4b6b941a2abf13854511c9af0e88a17d5acfd5e6 Mon Sep 17 00:00:00 2001 From: Harsha Mandadi <115464537+harsha-mandadi-4026@users.noreply.github.com> Date: Wed, 11 Oct 2023 19:01:46 +0100 Subject: [PATCH 09/14] fix(ingest): Fix postgres lineage within views (#8906) Co-authored-by: Harshal Sheth Co-authored-by: Maggie Hays --- .../datahub/ingestion/source/sql/postgres.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/metadata-ingestion/src/datahub/ingestion/source/sql/postgres.py b/metadata-ingestion/src/datahub/ingestion/source/sql/postgres.py index ba8655b83446d..a6a9d8e2c8597 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/sql/postgres.py +++ b/metadata-ingestion/src/datahub/ingestion/source/sql/postgres.py @@ -217,14 +217,15 @@ def _get_view_lineage_elements( key = (lineage.dependent_view, lineage.dependent_schema) # Append the source table to the list. lineage_elements[key].append( - mce_builder.make_dataset_urn( - self.platform, - self.get_identifier( + mce_builder.make_dataset_urn_with_platform_instance( + platform=self.platform, + name=self.get_identifier( schema=lineage.source_schema, entity=lineage.source_table, inspector=inspector, ), - self.config.env, + platform_instance=self.config.platform_instance, + env=self.config.env, ) ) @@ -244,12 +245,13 @@ def _get_view_lineage_workunits( dependent_view, dependent_schema = key # Construct a lineage object. - urn = mce_builder.make_dataset_urn( - self.platform, - self.get_identifier( + urn = mce_builder.make_dataset_urn_with_platform_instance( + platform=self.platform, + name=self.get_identifier( schema=dependent_schema, entity=dependent_view, inspector=inspector ), - self.config.env, + platform_instance=self.config.platform_instance, + env=self.config.env, ) # use the mce_builder to ensure that the change proposal inherits From 932fbcddbf7c3201898e0918218e80c9246b0cd2 Mon Sep 17 00:00:00 2001 From: Harshal Sheth Date: Wed, 11 Oct 2023 14:17:02 -0400 Subject: [PATCH 10/14] refactor(ingest/dbt): move dbt tests logic to dedicated file (#8984) --- .../src/datahub/ingestion/api/common.py | 9 + .../datahub/ingestion/source/csv_enricher.py | 8 +- .../datahub/ingestion/source/dbt/dbt_cloud.py | 3 +- .../ingestion/source/dbt/dbt_common.py | 278 +----------------- .../datahub/ingestion/source/dbt/dbt_core.py | 3 +- .../datahub/ingestion/source/dbt/dbt_tests.py | 261 ++++++++++++++++ 6 files changed, 288 insertions(+), 274 deletions(-) create mode 100644 metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_tests.py diff --git a/metadata-ingestion/src/datahub/ingestion/api/common.py b/metadata-ingestion/src/datahub/ingestion/api/common.py index 778bd119615e2..a6761a3c77d5e 100644 --- a/metadata-ingestion/src/datahub/ingestion/api/common.py +++ b/metadata-ingestion/src/datahub/ingestion/api/common.py @@ -2,6 +2,7 @@ from dataclasses import dataclass from typing import TYPE_CHECKING, Dict, Generic, Iterable, Optional, Tuple, TypeVar +from datahub.configuration.common import ConfigurationError from datahub.emitter.mce_builder import set_dataset_urn_to_lower from datahub.ingestion.api.committable import Committable from datahub.ingestion.graph.client import DataHubGraph @@ -75,3 +76,11 @@ def register_checkpointer(self, committable: Committable) -> None: def get_committables(self) -> Iterable[Tuple[str, Committable]]: yield from self.checkpointers.items() + + def require_graph(self, operation: Optional[str] = None) -> DataHubGraph: + if not self.graph: + raise ConfigurationError( + f"{operation or 'This operation'} requires a graph, but none was provided. " + "To provide one, either use the datahub-rest sink or set the top-level datahub_api config in the recipe." + ) + return self.graph diff --git a/metadata-ingestion/src/datahub/ingestion/source/csv_enricher.py b/metadata-ingestion/src/datahub/ingestion/source/csv_enricher.py index 7cb487a86d931..611f0c5c52cc6 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/csv_enricher.py +++ b/metadata-ingestion/src/datahub/ingestion/source/csv_enricher.py @@ -129,11 +129,9 @@ def __init__(self, config: CSVEnricherConfig, ctx: PipelineContext): # Map from entity urn to a list of SubResourceRow. self.editable_schema_metadata_map: Dict[str, List[SubResourceRow]] = {} self.should_overwrite: bool = self.config.write_semantics == "OVERRIDE" - if not self.should_overwrite and not self.ctx.graph: - raise ConfigurationError( - "With PATCH semantics, the csv-enricher source requires a datahub_api to connect to. " - "Consider using the datahub-rest sink or provide a datahub_api: configuration on your ingestion recipe." - ) + + if not self.should_overwrite: + self.ctx.require_graph(operation="The csv-enricher's PATCH semantics flag") def get_resource_glossary_terms_work_unit( self, diff --git a/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_cloud.py b/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_cloud.py index af9769bc9d94c..da1ea8ecb4678 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_cloud.py +++ b/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_cloud.py @@ -20,9 +20,8 @@ DBTCommonConfig, DBTNode, DBTSourceBase, - DBTTest, - DBTTestResult, ) +from datahub.ingestion.source.dbt.dbt_tests import DBTTest, DBTTestResult logger = logging.getLogger(__name__) diff --git a/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_common.py b/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_common.py index 0f5c08eb6ac54..48d2118a9b091 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_common.py +++ b/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_common.py @@ -1,11 +1,10 @@ -import json import logging import re from abc import abstractmethod from dataclasses import dataclass, field from datetime import datetime from enum import auto -from typing import Any, Callable, ClassVar, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, List, Optional, Tuple import pydantic from pydantic import root_validator, validator @@ -34,6 +33,12 @@ from datahub.ingestion.api.source import MetadataWorkUnitProcessor from datahub.ingestion.api.workunit import MetadataWorkUnit from datahub.ingestion.source.common.subtypes import DatasetSubTypes +from datahub.ingestion.source.dbt.dbt_tests import ( + DBTTest, + DBTTestResult, + make_assertion_from_test, + make_assertion_result_from_test, +) from datahub.ingestion.source.sql.sql_types import ( ATHENA_SQL_TYPES_MAP, BIGQUERY_TYPES_MAP, @@ -81,20 +86,7 @@ TimeTypeClass, ) from datahub.metadata.schema_classes import ( - AssertionInfoClass, - AssertionResultClass, - AssertionResultTypeClass, - AssertionRunEventClass, - AssertionRunStatusClass, - AssertionStdAggregationClass, - AssertionStdOperatorClass, - AssertionStdParameterClass, - AssertionStdParametersClass, - AssertionStdParameterTypeClass, - AssertionTypeClass, DataPlatformInstanceClass, - DatasetAssertionInfoClass, - DatasetAssertionScopeClass, DatasetPropertiesClass, GlobalTagsClass, GlossaryTermsClass, @@ -551,134 +543,6 @@ def get_column_type( return SchemaFieldDataType(type=TypeClass()) -@dataclass -class AssertionParams: - scope: Union[DatasetAssertionScopeClass, str] - operator: Union[AssertionStdOperatorClass, str] - aggregation: Union[AssertionStdAggregationClass, str] - parameters: Optional[Callable[[Dict[str, str]], AssertionStdParametersClass]] = None - logic_fn: Optional[Callable[[Dict[str, str]], Optional[str]]] = None - - -def _get_name_for_relationship_test(kw_args: Dict[str, str]) -> Optional[str]: - """ - Try to produce a useful string for the name of a relationship constraint. - Return None if we fail to - """ - destination_ref = kw_args.get("to") - source_ref = kw_args.get("model") - column_name = kw_args.get("column_name") - dest_field_name = kw_args.get("field") - if not destination_ref or not source_ref or not column_name or not dest_field_name: - # base assertions are violated, bail early - return None - m = re.match(r"^ref\(\'(.*)\'\)$", destination_ref) - if m: - destination_table = m.group(1) - else: - destination_table = destination_ref - m = re.search(r"ref\(\'(.*)\'\)", source_ref) - if m: - source_table = m.group(1) - else: - source_table = source_ref - return f"{source_table}.{column_name} referential integrity to {destination_table}.{dest_field_name}" - - -@dataclass -class DBTTest: - qualified_test_name: str - column_name: Optional[str] - kw_args: dict - - TEST_NAME_TO_ASSERTION_MAP: ClassVar[Dict[str, AssertionParams]] = { - "not_null": AssertionParams( - scope=DatasetAssertionScopeClass.DATASET_COLUMN, - operator=AssertionStdOperatorClass.NOT_NULL, - aggregation=AssertionStdAggregationClass.IDENTITY, - ), - "unique": AssertionParams( - scope=DatasetAssertionScopeClass.DATASET_COLUMN, - operator=AssertionStdOperatorClass.EQUAL_TO, - aggregation=AssertionStdAggregationClass.UNIQUE_PROPOTION, - parameters=lambda _: AssertionStdParametersClass( - value=AssertionStdParameterClass( - value="1.0", - type=AssertionStdParameterTypeClass.NUMBER, - ) - ), - ), - "accepted_values": AssertionParams( - scope=DatasetAssertionScopeClass.DATASET_COLUMN, - operator=AssertionStdOperatorClass.IN, - aggregation=AssertionStdAggregationClass.IDENTITY, - parameters=lambda kw_args: AssertionStdParametersClass( - value=AssertionStdParameterClass( - value=json.dumps(kw_args.get("values")), - type=AssertionStdParameterTypeClass.SET, - ), - ), - ), - "relationships": AssertionParams( - scope=DatasetAssertionScopeClass.DATASET_COLUMN, - operator=AssertionStdOperatorClass._NATIVE_, - aggregation=AssertionStdAggregationClass.IDENTITY, - parameters=lambda kw_args: AssertionStdParametersClass( - value=AssertionStdParameterClass( - value=json.dumps(kw_args.get("values")), - type=AssertionStdParameterTypeClass.SET, - ), - ), - logic_fn=_get_name_for_relationship_test, - ), - "dbt_expectations.expect_column_values_to_not_be_null": AssertionParams( - scope=DatasetAssertionScopeClass.DATASET_COLUMN, - operator=AssertionStdOperatorClass.NOT_NULL, - aggregation=AssertionStdAggregationClass.IDENTITY, - ), - "dbt_expectations.expect_column_values_to_be_between": AssertionParams( - scope=DatasetAssertionScopeClass.DATASET_COLUMN, - operator=AssertionStdOperatorClass.BETWEEN, - aggregation=AssertionStdAggregationClass.IDENTITY, - parameters=lambda x: AssertionStdParametersClass( - minValue=AssertionStdParameterClass( - value=str(x.get("min_value", "unknown")), - type=AssertionStdParameterTypeClass.NUMBER, - ), - maxValue=AssertionStdParameterClass( - value=str(x.get("max_value", "unknown")), - type=AssertionStdParameterTypeClass.NUMBER, - ), - ), - ), - "dbt_expectations.expect_column_values_to_be_in_set": AssertionParams( - scope=DatasetAssertionScopeClass.DATASET_COLUMN, - operator=AssertionStdOperatorClass.IN, - aggregation=AssertionStdAggregationClass.IDENTITY, - parameters=lambda kw_args: AssertionStdParametersClass( - value=AssertionStdParameterClass( - value=json.dumps(kw_args.get("value_set")), - type=AssertionStdParameterTypeClass.SET, - ), - ), - ), - } - - -@dataclass -class DBTTestResult: - invocation_id: str - - status: str - execution_time: datetime - - native_results: Dict[str, str] - - -def string_map(input_map: Dict[str, Any]) -> Dict[str, str]: - return {k: str(v) for k, v in input_map.items()} - - @platform_name("dbt") @config_class(DBTCommonConfig) @support_status(SupportStatus.CERTIFIED) @@ -750,7 +614,7 @@ def create_test_entity_mcps( for upstream_urn in sorted(upstream_urns): if self.config.entities_enabled.can_emit_node_type("test"): - yield self._make_assertion_from_test( + yield make_assertion_from_test( custom_props, node, assertion_urn, @@ -759,133 +623,17 @@ def create_test_entity_mcps( if node.test_result: if self.config.entities_enabled.can_emit_test_results: - yield self._make_assertion_result_from_test( - node, assertion_urn, upstream_urn + yield make_assertion_result_from_test( + node, + assertion_urn, + upstream_urn, + test_warnings_are_errors=self.config.test_warnings_are_errors, ) else: logger.debug( f"Skipping test result {node.name} emission since it is turned off." ) - def _make_assertion_from_test( - self, - extra_custom_props: Dict[str, str], - node: DBTNode, - assertion_urn: str, - upstream_urn: str, - ) -> MetadataWorkUnit: - assert node.test_info - qualified_test_name = node.test_info.qualified_test_name - column_name = node.test_info.column_name - kw_args = node.test_info.kw_args - - if qualified_test_name in DBTTest.TEST_NAME_TO_ASSERTION_MAP: - assertion_params = DBTTest.TEST_NAME_TO_ASSERTION_MAP[qualified_test_name] - assertion_info = AssertionInfoClass( - type=AssertionTypeClass.DATASET, - customProperties=extra_custom_props, - datasetAssertion=DatasetAssertionInfoClass( - dataset=upstream_urn, - scope=assertion_params.scope, - operator=assertion_params.operator, - fields=[ - mce_builder.make_schema_field_urn(upstream_urn, column_name) - ] - if ( - assertion_params.scope - == DatasetAssertionScopeClass.DATASET_COLUMN - and column_name - ) - else [], - nativeType=node.name, - aggregation=assertion_params.aggregation, - parameters=assertion_params.parameters(kw_args) - if assertion_params.parameters - else None, - logic=assertion_params.logic_fn(kw_args) - if assertion_params.logic_fn - else None, - nativeParameters=string_map(kw_args), - ), - ) - elif column_name: - # no match with known test types, column-level test - assertion_info = AssertionInfoClass( - type=AssertionTypeClass.DATASET, - customProperties=extra_custom_props, - datasetAssertion=DatasetAssertionInfoClass( - dataset=upstream_urn, - scope=DatasetAssertionScopeClass.DATASET_COLUMN, - operator=AssertionStdOperatorClass._NATIVE_, - fields=[ - mce_builder.make_schema_field_urn(upstream_urn, column_name) - ], - nativeType=node.name, - logic=node.compiled_code or node.raw_code, - aggregation=AssertionStdAggregationClass._NATIVE_, - nativeParameters=string_map(kw_args), - ), - ) - else: - # no match with known test types, default to row-level test - assertion_info = AssertionInfoClass( - type=AssertionTypeClass.DATASET, - customProperties=extra_custom_props, - datasetAssertion=DatasetAssertionInfoClass( - dataset=upstream_urn, - scope=DatasetAssertionScopeClass.DATASET_ROWS, - operator=AssertionStdOperatorClass._NATIVE_, - logic=node.compiled_code or node.raw_code, - nativeType=node.name, - aggregation=AssertionStdAggregationClass._NATIVE_, - nativeParameters=string_map(kw_args), - ), - ) - - wu = MetadataChangeProposalWrapper( - entityUrn=assertion_urn, - aspect=assertion_info, - ).as_workunit() - - return wu - - def _make_assertion_result_from_test( - self, - node: DBTNode, - assertion_urn: str, - upstream_urn: str, - ) -> MetadataWorkUnit: - assert node.test_result - test_result = node.test_result - - assertionResult = AssertionRunEventClass( - timestampMillis=int(test_result.execution_time.timestamp() * 1000.0), - assertionUrn=assertion_urn, - asserteeUrn=upstream_urn, - runId=test_result.invocation_id, - result=AssertionResultClass( - type=AssertionResultTypeClass.SUCCESS - if test_result.status == "pass" - or ( - not self.config.test_warnings_are_errors - and test_result.status == "warn" - ) - else AssertionResultTypeClass.FAILURE, - nativeResults=test_result.native_results, - ), - status=AssertionRunStatusClass.COMPLETE, - ) - - event = MetadataChangeProposalWrapper( - entityUrn=assertion_urn, - aspect=assertionResult, - ) - wu = MetadataWorkUnit( - id=f"{assertion_urn}-assertionRunEvent-{upstream_urn}", - mcp=event, - ) - return wu - @abstractmethod def load_nodes(self) -> Tuple[List[DBTNode], Dict[str, Optional[str]]]: # return dbt nodes + global custom properties diff --git a/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_core.py b/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_core.py index c08295ed1dc59..dc3a84847beb2 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_core.py +++ b/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_core.py @@ -26,9 +26,8 @@ DBTNode, DBTSourceBase, DBTSourceReport, - DBTTest, - DBTTestResult, ) +from datahub.ingestion.source.dbt.dbt_tests import DBTTest, DBTTestResult logger = logging.getLogger(__name__) diff --git a/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_tests.py b/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_tests.py new file mode 100644 index 0000000000000..721769d214d9e --- /dev/null +++ b/metadata-ingestion/src/datahub/ingestion/source/dbt/dbt_tests.py @@ -0,0 +1,261 @@ +import json +import re +from dataclasses import dataclass +from datetime import datetime +from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Union + +from datahub.emitter import mce_builder +from datahub.emitter.mcp import MetadataChangeProposalWrapper +from datahub.ingestion.api.workunit import MetadataWorkUnit +from datahub.metadata.schema_classes import ( + AssertionInfoClass, + AssertionResultClass, + AssertionResultTypeClass, + AssertionRunEventClass, + AssertionRunStatusClass, + AssertionStdAggregationClass, + AssertionStdOperatorClass, + AssertionStdParameterClass, + AssertionStdParametersClass, + AssertionStdParameterTypeClass, + AssertionTypeClass, + DatasetAssertionInfoClass, + DatasetAssertionScopeClass, +) + +if TYPE_CHECKING: + from datahub.ingestion.source.dbt.dbt_common import DBTNode + + +@dataclass +class DBTTest: + qualified_test_name: str + column_name: Optional[str] + kw_args: dict + + +@dataclass +class DBTTestResult: + invocation_id: str + + status: str + execution_time: datetime + + native_results: Dict[str, str] + + +def _get_name_for_relationship_test(kw_args: Dict[str, str]) -> Optional[str]: + """ + Try to produce a useful string for the name of a relationship constraint. + Return None if we fail to + """ + destination_ref = kw_args.get("to") + source_ref = kw_args.get("model") + column_name = kw_args.get("column_name") + dest_field_name = kw_args.get("field") + if not destination_ref or not source_ref or not column_name or not dest_field_name: + # base assertions are violated, bail early + return None + m = re.match(r"^ref\(\'(.*)\'\)$", destination_ref) + if m: + destination_table = m.group(1) + else: + destination_table = destination_ref + m = re.search(r"ref\(\'(.*)\'\)", source_ref) + if m: + source_table = m.group(1) + else: + source_table = source_ref + return f"{source_table}.{column_name} referential integrity to {destination_table}.{dest_field_name}" + + +@dataclass +class AssertionParams: + scope: Union[DatasetAssertionScopeClass, str] + operator: Union[AssertionStdOperatorClass, str] + aggregation: Union[AssertionStdAggregationClass, str] + parameters: Optional[Callable[[Dict[str, str]], AssertionStdParametersClass]] = None + logic_fn: Optional[Callable[[Dict[str, str]], Optional[str]]] = None + + +_DBT_TEST_NAME_TO_ASSERTION_MAP: Dict[str, AssertionParams] = { + "not_null": AssertionParams( + scope=DatasetAssertionScopeClass.DATASET_COLUMN, + operator=AssertionStdOperatorClass.NOT_NULL, + aggregation=AssertionStdAggregationClass.IDENTITY, + ), + "unique": AssertionParams( + scope=DatasetAssertionScopeClass.DATASET_COLUMN, + operator=AssertionStdOperatorClass.EQUAL_TO, + aggregation=AssertionStdAggregationClass.UNIQUE_PROPOTION, + parameters=lambda _: AssertionStdParametersClass( + value=AssertionStdParameterClass( + value="1.0", + type=AssertionStdParameterTypeClass.NUMBER, + ) + ), + ), + "accepted_values": AssertionParams( + scope=DatasetAssertionScopeClass.DATASET_COLUMN, + operator=AssertionStdOperatorClass.IN, + aggregation=AssertionStdAggregationClass.IDENTITY, + parameters=lambda kw_args: AssertionStdParametersClass( + value=AssertionStdParameterClass( + value=json.dumps(kw_args.get("values")), + type=AssertionStdParameterTypeClass.SET, + ), + ), + ), + "relationships": AssertionParams( + scope=DatasetAssertionScopeClass.DATASET_COLUMN, + operator=AssertionStdOperatorClass._NATIVE_, + aggregation=AssertionStdAggregationClass.IDENTITY, + parameters=lambda kw_args: AssertionStdParametersClass( + value=AssertionStdParameterClass( + value=json.dumps(kw_args.get("values")), + type=AssertionStdParameterTypeClass.SET, + ), + ), + logic_fn=_get_name_for_relationship_test, + ), + "dbt_expectations.expect_column_values_to_not_be_null": AssertionParams( + scope=DatasetAssertionScopeClass.DATASET_COLUMN, + operator=AssertionStdOperatorClass.NOT_NULL, + aggregation=AssertionStdAggregationClass.IDENTITY, + ), + "dbt_expectations.expect_column_values_to_be_between": AssertionParams( + scope=DatasetAssertionScopeClass.DATASET_COLUMN, + operator=AssertionStdOperatorClass.BETWEEN, + aggregation=AssertionStdAggregationClass.IDENTITY, + parameters=lambda x: AssertionStdParametersClass( + minValue=AssertionStdParameterClass( + value=str(x.get("min_value", "unknown")), + type=AssertionStdParameterTypeClass.NUMBER, + ), + maxValue=AssertionStdParameterClass( + value=str(x.get("max_value", "unknown")), + type=AssertionStdParameterTypeClass.NUMBER, + ), + ), + ), + "dbt_expectations.expect_column_values_to_be_in_set": AssertionParams( + scope=DatasetAssertionScopeClass.DATASET_COLUMN, + operator=AssertionStdOperatorClass.IN, + aggregation=AssertionStdAggregationClass.IDENTITY, + parameters=lambda kw_args: AssertionStdParametersClass( + value=AssertionStdParameterClass( + value=json.dumps(kw_args.get("value_set")), + type=AssertionStdParameterTypeClass.SET, + ), + ), + ), +} + + +def _string_map(input_map: Dict[str, Any]) -> Dict[str, str]: + return {k: str(v) for k, v in input_map.items()} + + +def make_assertion_from_test( + extra_custom_props: Dict[str, str], + node: "DBTNode", + assertion_urn: str, + upstream_urn: str, +) -> MetadataWorkUnit: + assert node.test_info + qualified_test_name = node.test_info.qualified_test_name + column_name = node.test_info.column_name + kw_args = node.test_info.kw_args + + if qualified_test_name in _DBT_TEST_NAME_TO_ASSERTION_MAP: + assertion_params = _DBT_TEST_NAME_TO_ASSERTION_MAP[qualified_test_name] + assertion_info = AssertionInfoClass( + type=AssertionTypeClass.DATASET, + customProperties=extra_custom_props, + datasetAssertion=DatasetAssertionInfoClass( + dataset=upstream_urn, + scope=assertion_params.scope, + operator=assertion_params.operator, + fields=[mce_builder.make_schema_field_urn(upstream_urn, column_name)] + if ( + assertion_params.scope == DatasetAssertionScopeClass.DATASET_COLUMN + and column_name + ) + else [], + nativeType=node.name, + aggregation=assertion_params.aggregation, + parameters=assertion_params.parameters(kw_args) + if assertion_params.parameters + else None, + logic=assertion_params.logic_fn(kw_args) + if assertion_params.logic_fn + else None, + nativeParameters=_string_map(kw_args), + ), + ) + elif column_name: + # no match with known test types, column-level test + assertion_info = AssertionInfoClass( + type=AssertionTypeClass.DATASET, + customProperties=extra_custom_props, + datasetAssertion=DatasetAssertionInfoClass( + dataset=upstream_urn, + scope=DatasetAssertionScopeClass.DATASET_COLUMN, + operator=AssertionStdOperatorClass._NATIVE_, + fields=[mce_builder.make_schema_field_urn(upstream_urn, column_name)], + nativeType=node.name, + logic=node.compiled_code or node.raw_code, + aggregation=AssertionStdAggregationClass._NATIVE_, + nativeParameters=_string_map(kw_args), + ), + ) + else: + # no match with known test types, default to row-level test + assertion_info = AssertionInfoClass( + type=AssertionTypeClass.DATASET, + customProperties=extra_custom_props, + datasetAssertion=DatasetAssertionInfoClass( + dataset=upstream_urn, + scope=DatasetAssertionScopeClass.DATASET_ROWS, + operator=AssertionStdOperatorClass._NATIVE_, + logic=node.compiled_code or node.raw_code, + nativeType=node.name, + aggregation=AssertionStdAggregationClass._NATIVE_, + nativeParameters=_string_map(kw_args), + ), + ) + + return MetadataChangeProposalWrapper( + entityUrn=assertion_urn, + aspect=assertion_info, + ).as_workunit() + + +def make_assertion_result_from_test( + node: "DBTNode", + assertion_urn: str, + upstream_urn: str, + test_warnings_are_errors: bool, +) -> MetadataWorkUnit: + assert node.test_result + test_result = node.test_result + + assertionResult = AssertionRunEventClass( + timestampMillis=int(test_result.execution_time.timestamp() * 1000.0), + assertionUrn=assertion_urn, + asserteeUrn=upstream_urn, + runId=test_result.invocation_id, + result=AssertionResultClass( + type=AssertionResultTypeClass.SUCCESS + if test_result.status == "pass" + or (not test_warnings_are_errors and test_result.status == "warn") + else AssertionResultTypeClass.FAILURE, + nativeResults=test_result.native_results, + ), + status=AssertionRunStatusClass.COMPLETE, + ) + + return MetadataChangeProposalWrapper( + entityUrn=assertion_urn, + aspect=assertionResult, + ).as_workunit() From 1b06c6a30c8d6c0ee57f75f75ee6a436aa6c13a7 Mon Sep 17 00:00:00 2001 From: Mayuri Nehate <33225191+mayurinehate@users.noreply.github.com> Date: Thu, 12 Oct 2023 00:31:42 +0530 Subject: [PATCH 11/14] fix(ingest/snowflake): fix sample fraction for very large tables (#8988) --- .../datahub/ingestion/source/snowflake/snowflake_profiler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_profiler.py b/metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_profiler.py index 24275dcdff34d..8e18d85d6f3ca 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_profiler.py +++ b/metadata-ingestion/src/datahub/ingestion/source/snowflake/snowflake_profiler.py @@ -86,7 +86,7 @@ def get_batch_kwargs( # Fixed-size sampling can be slower than equivalent fraction-based sampling # as per https://docs.snowflake.com/en/sql-reference/constructs/sample#performance-considerations sample_pc = 100 * self.config.profiling.sample_size / table.rows_count - custom_sql = f'select * from "{db_name}"."{schema_name}"."{table.name}" TABLESAMPLE ({sample_pc:.3f})' + custom_sql = f'select * from "{db_name}"."{schema_name}"."{table.name}" TABLESAMPLE ({sample_pc:.8f})' return { **super().get_batch_kwargs(table, schema_name, db_name), # Lowercase/Mixedcase table names in Snowflake do not work by default. From 245284ec6c6b754b22943ba42d7139ddd5772377 Mon Sep 17 00:00:00 2001 From: jayasimhankv <145704974+jayasimhankv@users.noreply.github.com> Date: Wed, 11 Oct 2023 17:40:20 -0500 Subject: [PATCH 12/14] fix(): Display generic not found page for corp groups that do not exist (#8880) Co-authored-by: Jay Kadambi --- .../java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java | 3 ++- datahub-graphql-core/src/main/resources/entity.graphql | 5 +++++ datahub-web-react/src/app/entity/group/GroupProfile.tsx | 4 ++++ datahub-web-react/src/graphql/group.graphql | 1 + 4 files changed, 12 insertions(+), 1 deletion(-) diff --git a/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java b/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java index ebb5c7d62c7d3..b99f712034fe0 100644 --- a/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java +++ b/datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/GmsGraphQLEngine.java @@ -1292,7 +1292,8 @@ private void configureCorpUserResolvers(final RuntimeWiring.Builder builder) { */ private void configureCorpGroupResolvers(final RuntimeWiring.Builder builder) { builder.type("CorpGroup", typeWiring -> typeWiring - .dataFetcher("relationships", new EntityRelationshipsResultResolver(graphClient))); + .dataFetcher("relationships", new EntityRelationshipsResultResolver(graphClient)) + .dataFetcher("exists", new EntityExistsResolver(entityService))); builder.type("CorpGroupInfo", typeWiring -> typeWiring .dataFetcher("admins", new LoadableTypeBatchResolver<>(corpUserType, diff --git a/datahub-graphql-core/src/main/resources/entity.graphql b/datahub-graphql-core/src/main/resources/entity.graphql index 0b15d7b875a9c..b37a8f34fa056 100644 --- a/datahub-graphql-core/src/main/resources/entity.graphql +++ b/datahub-graphql-core/src/main/resources/entity.graphql @@ -3788,6 +3788,11 @@ type CorpGroup implements Entity { Additional read only info about the group """ info: CorpGroupInfo @deprecated + + """ + Whether or not this entity exists on DataHub + """ + exists: Boolean } """ diff --git a/datahub-web-react/src/app/entity/group/GroupProfile.tsx b/datahub-web-react/src/app/entity/group/GroupProfile.tsx index d5e284af931df..53d2062277dec 100644 --- a/datahub-web-react/src/app/entity/group/GroupProfile.tsx +++ b/datahub-web-react/src/app/entity/group/GroupProfile.tsx @@ -11,6 +11,7 @@ import { RoutedTabs } from '../../shared/RoutedTabs'; import GroupInfoSidebar from './GroupInfoSideBar'; import { GroupAssets } from './GroupAssets'; import { ErrorSection } from '../../shared/error/ErrorSection'; +import NonExistentEntityPage from '../shared/entity/NonExistentEntityPage'; const messageStyle = { marginTop: '10%' }; @@ -110,6 +111,9 @@ export default function GroupProfile() { urn, }; + if (data?.corpGroup?.exists === false) { + return ; + } return ( <> {error && } diff --git a/datahub-web-react/src/graphql/group.graphql b/datahub-web-react/src/graphql/group.graphql index 9aa6e2b005f16..1007721e51a4e 100644 --- a/datahub-web-react/src/graphql/group.graphql +++ b/datahub-web-react/src/graphql/group.graphql @@ -3,6 +3,7 @@ query getGroup($urn: String!, $membersCount: Int!) { urn type name + exists origin { type externalType From 245c5c00087116d236acf7a9bbddbdb4dee15949 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sergio=20G=C3=B3mez=20Villamor?= Date: Thu, 12 Oct 2023 02:06:19 +0200 Subject: [PATCH 13/14] fix(ingest/looker): stop emitting tag owner (#8942) --- docs/how/updating-datahub.md | 2 + .../ingestion/source/looker/looker_common.py | 13 +----- .../looker/golden_looker_mces.json | 42 ------------------- .../looker/golden_test_allow_ingest.json | 42 ------------------- ...olden_test_external_project_view_mces.json | 42 ------------------- .../looker/golden_test_file_path_ingest.json | 42 ------------------- .../golden_test_independent_look_ingest.json | 42 ------------------- .../looker/golden_test_ingest.json | 42 ------------------- .../looker/golden_test_ingest_joins.json | 42 ------------------- .../golden_test_ingest_unaliased_joins.json | 42 ------------------- .../looker_mces_golden_deleted_stateful.json | 42 ------------------- .../looker/looker_mces_usage_history.json | 42 ------------------- .../lookml/lookml_mces_api_bigquery.json | 42 ------------------- .../lookml/lookml_mces_api_hive2.json | 42 ------------------- .../lookml/lookml_mces_badsql_parser.json | 42 ------------------- .../lookml/lookml_mces_offline.json | 42 ------------------- .../lookml_mces_offline_deny_pattern.json | 42 ------------------- ...lookml_mces_offline_platform_instance.json | 42 ------------------- .../lookml_mces_with_external_urls.json | 42 ------------------- .../lookml/lookml_reachable_views.json | 42 ------------------- 20 files changed, 3 insertions(+), 768 deletions(-) diff --git a/docs/how/updating-datahub.md b/docs/how/updating-datahub.md index 5d0ad5eaf8f7e..9cd4ad5c6f02d 100644 --- a/docs/how/updating-datahub.md +++ b/docs/how/updating-datahub.md @@ -7,6 +7,8 @@ This file documents any backwards-incompatible changes in DataHub and assists pe ### Breaking Changes - #8810 - Removed support for SQLAlchemy 1.3.x. Only SQLAlchemy 1.4.x is supported now. +- #8942 - Removed `urn:li:corpuser:datahub` owner for the `Measure`, `Dimension` and `Temporal` tags emitted + by Looker and LookML source connectors. - #8853 - The Airflow plugin no longer supports Airflow 2.0.x or Python 3.7. See the docs for more details. - #8853 - Introduced the Airflow plugin v2. If you're using Airflow 2.3+, the v2 plugin will be enabled by default, and so you'll need to switch your requirements to include `pip install 'acryl-datahub-airflow-plugin[plugin-v2]'`. To continue using the v1 plugin, set the `DATAHUB_AIRFLOW_PLUGIN_USE_V1_PLUGIN` environment variable to `true`. - #8943 The Unity Catalog ingestion source has a new option `include_metastore`, which will cause all urns to be changed when disabled. diff --git a/metadata-ingestion/src/datahub/ingestion/source/looker/looker_common.py b/metadata-ingestion/src/datahub/ingestion/source/looker/looker_common.py index 89b1e45695c57..30c38720dd96c 100644 --- a/metadata-ingestion/src/datahub/ingestion/source/looker/looker_common.py +++ b/metadata-ingestion/src/datahub/ingestion/source/looker/looker_common.py @@ -81,9 +81,6 @@ EnumTypeClass, FineGrainedLineageClass, GlobalTagsClass, - OwnerClass, - OwnershipClass, - OwnershipTypeClass, SchemaMetadataClass, StatusClass, SubTypesClass, @@ -453,17 +450,9 @@ def _get_schema( @staticmethod def _get_tag_mce_for_urn(tag_urn: str) -> MetadataChangeEvent: assert tag_urn in LookerUtil.tag_definitions - ownership = OwnershipClass( - owners=[ - OwnerClass( - owner="urn:li:corpuser:datahub", - type=OwnershipTypeClass.DATAOWNER, - ) - ] - ) return MetadataChangeEvent( proposedSnapshot=TagSnapshotClass( - urn=tag_urn, aspects=[ownership, LookerUtil.tag_definitions[tag_urn]] + urn=tag_urn, aspects=[LookerUtil.tag_definitions[tag_urn]] ) ) diff --git a/metadata-ingestion/tests/integration/looker/golden_looker_mces.json b/metadata-ingestion/tests/integration/looker/golden_looker_mces.json index dee85b40bb7a8..1da42b94e320c 100644 --- a/metadata-ingestion/tests/integration/looker/golden_looker_mces.json +++ b/metadata-ingestion/tests/integration/looker/golden_looker_mces.json @@ -533,20 +533,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -566,20 +552,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -599,20 +571,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/golden_test_allow_ingest.json b/metadata-ingestion/tests/integration/looker/golden_test_allow_ingest.json index 72db36e63daf7..685a606a57c33 100644 --- a/metadata-ingestion/tests/integration/looker/golden_test_allow_ingest.json +++ b/metadata-ingestion/tests/integration/looker/golden_test_allow_ingest.json @@ -327,20 +327,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -360,20 +346,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -393,20 +365,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/golden_test_external_project_view_mces.json b/metadata-ingestion/tests/integration/looker/golden_test_external_project_view_mces.json index e5508bdb06b9e..069788cb088ac 100644 --- a/metadata-ingestion/tests/integration/looker/golden_test_external_project_view_mces.json +++ b/metadata-ingestion/tests/integration/looker/golden_test_external_project_view_mces.json @@ -327,20 +327,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -360,20 +346,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -393,20 +365,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/golden_test_file_path_ingest.json b/metadata-ingestion/tests/integration/looker/golden_test_file_path_ingest.json index b0f66e7b245c9..f1c932ebd5a70 100644 --- a/metadata-ingestion/tests/integration/looker/golden_test_file_path_ingest.json +++ b/metadata-ingestion/tests/integration/looker/golden_test_file_path_ingest.json @@ -335,20 +335,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -369,20 +355,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -403,20 +375,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/golden_test_independent_look_ingest.json b/metadata-ingestion/tests/integration/looker/golden_test_independent_look_ingest.json index 91e13debfa028..9521c9af4bbdc 100644 --- a/metadata-ingestion/tests/integration/looker/golden_test_independent_look_ingest.json +++ b/metadata-ingestion/tests/integration/looker/golden_test_independent_look_ingest.json @@ -550,20 +550,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -583,20 +569,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -616,20 +588,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/golden_test_ingest.json b/metadata-ingestion/tests/integration/looker/golden_test_ingest.json index e93079119e4f4..dbacd52fe83de 100644 --- a/metadata-ingestion/tests/integration/looker/golden_test_ingest.json +++ b/metadata-ingestion/tests/integration/looker/golden_test_ingest.json @@ -327,20 +327,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -360,20 +346,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -393,20 +365,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/golden_test_ingest_joins.json b/metadata-ingestion/tests/integration/looker/golden_test_ingest_joins.json index a9c8efa7cdb98..aaa874d9ff348 100644 --- a/metadata-ingestion/tests/integration/looker/golden_test_ingest_joins.json +++ b/metadata-ingestion/tests/integration/looker/golden_test_ingest_joins.json @@ -351,20 +351,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -384,20 +370,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -417,20 +389,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/golden_test_ingest_unaliased_joins.json b/metadata-ingestion/tests/integration/looker/golden_test_ingest_unaliased_joins.json index edd15624a14cd..be8db0722aea3 100644 --- a/metadata-ingestion/tests/integration/looker/golden_test_ingest_unaliased_joins.json +++ b/metadata-ingestion/tests/integration/looker/golden_test_ingest_unaliased_joins.json @@ -343,20 +343,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -376,20 +362,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -409,20 +381,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/looker_mces_golden_deleted_stateful.json b/metadata-ingestion/tests/integration/looker/looker_mces_golden_deleted_stateful.json index aebc89b609a08..05b74f163ad45 100644 --- a/metadata-ingestion/tests/integration/looker/looker_mces_golden_deleted_stateful.json +++ b/metadata-ingestion/tests/integration/looker/looker_mces_golden_deleted_stateful.json @@ -327,20 +327,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -360,20 +346,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -393,20 +365,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/looker/looker_mces_usage_history.json b/metadata-ingestion/tests/integration/looker/looker_mces_usage_history.json index 34bded3cf691e..0778aa0050b00 100644 --- a/metadata-ingestion/tests/integration/looker/looker_mces_usage_history.json +++ b/metadata-ingestion/tests/integration/looker/looker_mces_usage_history.json @@ -279,20 +279,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -312,20 +298,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -345,20 +317,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/lookml/lookml_mces_api_bigquery.json b/metadata-ingestion/tests/integration/lookml/lookml_mces_api_bigquery.json index 238f4c2580cdf..5a0bd4e12fd3a 100644 --- a/metadata-ingestion/tests/integration/lookml/lookml_mces_api_bigquery.json +++ b/metadata-ingestion/tests/integration/lookml/lookml_mces_api_bigquery.json @@ -2121,20 +2121,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -2154,20 +2140,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -2187,20 +2159,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/lookml/lookml_mces_api_hive2.json b/metadata-ingestion/tests/integration/lookml/lookml_mces_api_hive2.json index 45d5d839e9d21..1b0ee3216383c 100644 --- a/metadata-ingestion/tests/integration/lookml/lookml_mces_api_hive2.json +++ b/metadata-ingestion/tests/integration/lookml/lookml_mces_api_hive2.json @@ -2121,20 +2121,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -2154,20 +2140,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -2187,20 +2159,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/lookml/lookml_mces_badsql_parser.json b/metadata-ingestion/tests/integration/lookml/lookml_mces_badsql_parser.json index 187cedaefb6b2..b960ba581e6b5 100644 --- a/metadata-ingestion/tests/integration/lookml/lookml_mces_badsql_parser.json +++ b/metadata-ingestion/tests/integration/lookml/lookml_mces_badsql_parser.json @@ -2004,20 +2004,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -2037,20 +2023,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -2070,20 +2042,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/lookml/lookml_mces_offline.json b/metadata-ingestion/tests/integration/lookml/lookml_mces_offline.json index c2c879e38f37b..e29292a44c949 100644 --- a/metadata-ingestion/tests/integration/lookml/lookml_mces_offline.json +++ b/metadata-ingestion/tests/integration/lookml/lookml_mces_offline.json @@ -2121,20 +2121,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -2154,20 +2140,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -2187,20 +2159,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/lookml/lookml_mces_offline_deny_pattern.json b/metadata-ingestion/tests/integration/lookml/lookml_mces_offline_deny_pattern.json index c1ac54b0fb588..04ecaecbd4afb 100644 --- a/metadata-ingestion/tests/integration/lookml/lookml_mces_offline_deny_pattern.json +++ b/metadata-ingestion/tests/integration/lookml/lookml_mces_offline_deny_pattern.json @@ -584,20 +584,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -617,20 +603,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -650,20 +622,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/lookml/lookml_mces_offline_platform_instance.json b/metadata-ingestion/tests/integration/lookml/lookml_mces_offline_platform_instance.json index f602ca37b3160..080931ae637bc 100644 --- a/metadata-ingestion/tests/integration/lookml/lookml_mces_offline_platform_instance.json +++ b/metadata-ingestion/tests/integration/lookml/lookml_mces_offline_platform_instance.json @@ -2121,20 +2121,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -2154,20 +2140,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -2187,20 +2159,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/lookml/lookml_mces_with_external_urls.json b/metadata-ingestion/tests/integration/lookml/lookml_mces_with_external_urls.json index 104bd365669e3..5826c4316b539 100644 --- a/metadata-ingestion/tests/integration/lookml/lookml_mces_with_external_urls.json +++ b/metadata-ingestion/tests/integration/lookml/lookml_mces_with_external_urls.json @@ -2134,20 +2134,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -2167,20 +2153,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -2200,20 +2172,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", diff --git a/metadata-ingestion/tests/integration/lookml/lookml_reachable_views.json b/metadata-ingestion/tests/integration/lookml/lookml_reachable_views.json index 37a6c94c6952e..53d1ec0229de1 100644 --- a/metadata-ingestion/tests/integration/lookml/lookml_reachable_views.json +++ b/metadata-ingestion/tests/integration/lookml/lookml_reachable_views.json @@ -681,20 +681,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Dimension", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Dimension", @@ -714,20 +700,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Temporal", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Temporal", @@ -747,20 +719,6 @@ "com.linkedin.pegasus2avro.metadata.snapshot.TagSnapshot": { "urn": "urn:li:tag:Measure", "aspects": [ - { - "com.linkedin.pegasus2avro.common.Ownership": { - "owners": [ - { - "owner": "urn:li:corpuser:datahub", - "type": "DATAOWNER" - } - ], - "lastModified": { - "time": 0, - "actor": "urn:li:corpuser:unknown" - } - } - }, { "com.linkedin.pegasus2avro.tag.TagProperties": { "name": "Measure", From 84bba4dc446ee97f8991689fd17bfa6d14232601 Mon Sep 17 00:00:00 2001 From: Harshal Sheth Date: Thu, 12 Oct 2023 01:31:17 -0400 Subject: [PATCH 14/14] feat(ingest): add output schema inference for sql parser (#8989) --- .../src/datahub/utilities/sqlglot_lineage.py | 119 ++++++++++++++++-- .../integration/powerbi/test_m_parser.py | 93 ++++---------- .../test_bigquery_create_view_with_cte.json | 32 ++++- ..._bigquery_from_sharded_table_wildcard.json | 16 ++- .../test_bigquery_nested_subqueries.json | 16 ++- ..._bigquery_sharded_table_normalization.json | 16 ++- .../test_bigquery_star_with_replace.json | 24 +++- .../test_bigquery_view_from_union.json | 16 ++- .../goldens/test_create_view_as_select.json | 16 ++- .../test_expand_select_star_basic.json | 80 ++++++++++-- .../goldens/test_insert_as_select.json | 36 +++++- ...est_select_ambiguous_column_no_schema.json | 12 +- .../goldens/test_select_count.json | 8 +- .../test_select_from_struct_subfields.json | 16 ++- .../goldens/test_select_from_union.json | 16 ++- .../sql_parsing/goldens/test_select_max.json | 4 +- .../goldens/test_select_with_ctes.json | 8 +- .../test_select_with_full_col_name.json | 12 +- .../test_snowflake_case_statement.json | 16 ++- .../goldens/test_snowflake_column_cast.json | 63 ++++++++++ .../test_snowflake_column_normalization.json | 32 ++++- ...t_snowflake_ctas_column_normalization.json | 32 ++++- .../test_snowflake_default_normalization.json | 48 ++++++- .../unit/sql_parsing/test_sqlglot_lineage.py | 21 ++++ 24 files changed, 604 insertions(+), 148 deletions(-) create mode 100644 metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_column_cast.json diff --git a/metadata-ingestion/src/datahub/utilities/sqlglot_lineage.py b/metadata-ingestion/src/datahub/utilities/sqlglot_lineage.py index 81c43884fdf7d..349eb40a5e865 100644 --- a/metadata-ingestion/src/datahub/utilities/sqlglot_lineage.py +++ b/metadata-ingestion/src/datahub/utilities/sqlglot_lineage.py @@ -5,12 +5,13 @@ import logging import pathlib from collections import defaultdict -from typing import Dict, List, Optional, Set, Tuple, Union +from typing import Any, Dict, List, Optional, Set, Tuple, Union import pydantic.dataclasses import sqlglot import sqlglot.errors import sqlglot.lineage +import sqlglot.optimizer.annotate_types import sqlglot.optimizer.qualify import sqlglot.optimizer.qualify_columns from pydantic import BaseModel @@ -23,7 +24,17 @@ from datahub.ingestion.api.closeable import Closeable from datahub.ingestion.graph.client import DataHubGraph from datahub.ingestion.source.bigquery_v2.bigquery_audit import BigqueryTableIdentifier -from datahub.metadata.schema_classes import OperationTypeClass, SchemaMetadataClass +from datahub.metadata.schema_classes import ( + ArrayTypeClass, + BooleanTypeClass, + DateTypeClass, + NumberTypeClass, + OperationTypeClass, + SchemaFieldDataTypeClass, + SchemaMetadataClass, + StringTypeClass, + TimeTypeClass, +) from datahub.utilities.file_backed_collections import ConnectionWrapper, FileBackedDict from datahub.utilities.urns.dataset_urn import DatasetUrn @@ -90,8 +101,18 @@ def get_query_type_of_sql(expression: sqlglot.exp.Expression) -> QueryType: return QueryType.UNKNOWN +class _ParserBaseModel( + BaseModel, + arbitrary_types_allowed=True, + json_encoders={ + SchemaFieldDataTypeClass: lambda v: v.to_obj(), + }, +): + pass + + @functools.total_ordering -class _FrozenModel(BaseModel, frozen=True): +class _FrozenModel(_ParserBaseModel, frozen=True): def __lt__(self, other: "_FrozenModel") -> bool: for field in self.__fields__: self_v = getattr(self, field) @@ -146,29 +167,42 @@ class _ColumnRef(_FrozenModel): column: str -class ColumnRef(BaseModel): +class ColumnRef(_ParserBaseModel): table: Urn column: str -class _DownstreamColumnRef(BaseModel): +class _DownstreamColumnRef(_ParserBaseModel): table: Optional[_TableName] column: str + column_type: Optional[sqlglot.exp.DataType] -class DownstreamColumnRef(BaseModel): +class DownstreamColumnRef(_ParserBaseModel): table: Optional[Urn] column: str + column_type: Optional[SchemaFieldDataTypeClass] + native_column_type: Optional[str] + + @pydantic.validator("column_type", pre=True) + def _load_column_type( + cls, v: Optional[Union[dict, SchemaFieldDataTypeClass]] + ) -> Optional[SchemaFieldDataTypeClass]: + if v is None: + return None + if isinstance(v, SchemaFieldDataTypeClass): + return v + return SchemaFieldDataTypeClass.from_obj(v) -class _ColumnLineageInfo(BaseModel): +class _ColumnLineageInfo(_ParserBaseModel): downstream: _DownstreamColumnRef upstreams: List[_ColumnRef] logic: Optional[str] -class ColumnLineageInfo(BaseModel): +class ColumnLineageInfo(_ParserBaseModel): downstream: DownstreamColumnRef upstreams: List[ColumnRef] @@ -176,7 +210,7 @@ class ColumnLineageInfo(BaseModel): logic: Optional[str] = pydantic.Field(default=None, exclude=True) -class SqlParsingDebugInfo(BaseModel, arbitrary_types_allowed=True): +class SqlParsingDebugInfo(_ParserBaseModel): confidence: float = 0.0 tables_discovered: int = 0 @@ -190,7 +224,7 @@ def error(self) -> Optional[Exception]: return self.table_error or self.column_error -class SqlParsingResult(BaseModel): +class SqlParsingResult(_ParserBaseModel): query_type: QueryType = QueryType.UNKNOWN in_tables: List[Urn] @@ -541,6 +575,15 @@ def _schema_aware_fuzzy_column_resolve( ) from e logger.debug("Qualified sql %s", statement.sql(pretty=True, dialect=dialect)) + # Try to figure out the types of the output columns. + try: + statement = sqlglot.optimizer.annotate_types.annotate_types( + statement, schema=sqlglot_db_schema + ) + except sqlglot.errors.OptimizeError as e: + # This is not a fatal error, so we can continue. + logger.debug("sqlglot failed to annotate types: %s", e) + column_lineage = [] try: @@ -553,7 +596,6 @@ def _schema_aware_fuzzy_column_resolve( logger.debug("output columns: %s", [col[0] for col in output_columns]) output_col: str for output_col, original_col_expression in output_columns: - # print(f"output column: {output_col}") if output_col == "*": # If schema information is available, the * will be expanded to the actual columns. # Otherwise, we can't process it. @@ -613,12 +655,19 @@ def _schema_aware_fuzzy_column_resolve( output_col = _schema_aware_fuzzy_column_resolve(output_table, output_col) + # Guess the output column type. + output_col_type = None + if original_col_expression.type: + output_col_type = original_col_expression.type + if not direct_col_upstreams: logger.debug(f' "{output_col}" has no upstreams') column_lineage.append( _ColumnLineageInfo( downstream=_DownstreamColumnRef( - table=output_table, column=output_col + table=output_table, + column=output_col, + column_type=output_col_type, ), upstreams=sorted(direct_col_upstreams), # logic=column_logic.sql(pretty=True, dialect=dialect), @@ -673,6 +722,42 @@ def _try_extract_select( return statement +def _translate_sqlglot_type( + sqlglot_type: sqlglot.exp.DataType.Type, +) -> Optional[SchemaFieldDataTypeClass]: + TypeClass: Any + if sqlglot_type in sqlglot.exp.DataType.TEXT_TYPES: + TypeClass = StringTypeClass + elif sqlglot_type in sqlglot.exp.DataType.NUMERIC_TYPES or sqlglot_type in { + sqlglot.exp.DataType.Type.DECIMAL, + }: + TypeClass = NumberTypeClass + elif sqlglot_type in { + sqlglot.exp.DataType.Type.BOOLEAN, + sqlglot.exp.DataType.Type.BIT, + }: + TypeClass = BooleanTypeClass + elif sqlglot_type in { + sqlglot.exp.DataType.Type.DATE, + }: + TypeClass = DateTypeClass + elif sqlglot_type in sqlglot.exp.DataType.TEMPORAL_TYPES: + TypeClass = TimeTypeClass + elif sqlglot_type in { + sqlglot.exp.DataType.Type.ARRAY, + }: + TypeClass = ArrayTypeClass + elif sqlglot_type in { + sqlglot.exp.DataType.Type.UNKNOWN, + }: + return None + else: + logger.debug("Unknown sqlglot type: %s", sqlglot_type) + return None + + return SchemaFieldDataTypeClass(type=TypeClass()) + + def _translate_internal_column_lineage( table_name_urn_mapping: Dict[_TableName, str], raw_column_lineage: _ColumnLineageInfo, @@ -684,6 +769,16 @@ def _translate_internal_column_lineage( downstream=DownstreamColumnRef( table=downstream_urn, column=raw_column_lineage.downstream.column, + column_type=_translate_sqlglot_type( + raw_column_lineage.downstream.column_type.this + ) + if raw_column_lineage.downstream.column_type + else None, + native_column_type=raw_column_lineage.downstream.column_type.sql() + if raw_column_lineage.downstream.column_type + and raw_column_lineage.downstream.column_type.this + != sqlglot.exp.DataType.Type.UNKNOWN + else None, ), upstreams=[ ColumnRef( diff --git a/metadata-ingestion/tests/integration/powerbi/test_m_parser.py b/metadata-ingestion/tests/integration/powerbi/test_m_parser.py index e3cc6c8101650..b6cb578217a2c 100644 --- a/metadata-ingestion/tests/integration/powerbi/test_m_parser.py +++ b/metadata-ingestion/tests/integration/powerbi/test_m_parser.py @@ -17,7 +17,6 @@ ) from datahub.ingestion.source.powerbi.m_query import parser, resolver, tree_function from datahub.ingestion.source.powerbi.m_query.resolver import DataPlatformTable, Lineage -from datahub.utilities.sqlglot_lineage import ColumnLineageInfo, DownstreamColumnRef pytestmark = pytest.mark.integration_batch_2 @@ -742,75 +741,25 @@ def test_sqlglot_parser(): == "urn:li:dataset:(urn:li:dataPlatform:snowflake,sales_deployment.operations_analytics.transformed_prod.v_sme_unit_targets,PROD)" ) - assert lineage[0].column_lineage == [ - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="client_director"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="tier"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column='upper("manager")'), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="team_type"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="date_target"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="monthid"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="target_team"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="seller_email"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="agent_key"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="sme_quota"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="revenue_quota"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="service_quota"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="bl_target"), - upstreams=[], - logic=None, - ), - ColumnLineageInfo( - downstream=DownstreamColumnRef(table=None, column="software_quota"), - upstreams=[], - logic=None, - ), + # TODO: None of these columns have upstreams? + # That doesn't seem right - we probably need to add fake schemas for the two tables above. + cols = [ + "client_director", + "tier", + 'upper("manager")', + "team_type", + "date_target", + "monthid", + "target_team", + "seller_email", + "agent_key", + "sme_quota", + "revenue_quota", + "service_quota", + "bl_target", + "software_quota", ] + for i, column in enumerate(cols): + assert lineage[0].column_lineage[i].downstream.table is None + assert lineage[0].column_lineage[i].downstream.column == column + assert lineage[0].column_lineage[i].upstreams == [] diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_create_view_with_cte.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_create_view_with_cte.json index e50d944ce72e3..f0175b4dc8892 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_create_view_with_cte.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_create_view_with_cte.json @@ -12,7 +12,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my-proj-2.dataset.my_view,PROD)", - "column": "col5" + "column": "col5", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -24,7 +30,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my-proj-2.dataset.my_view,PROD)", - "column": "col1" + "column": "col1", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -36,7 +48,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my-proj-2.dataset.my_view,PROD)", - "column": "col2" + "column": "col2", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -48,7 +66,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my-proj-2.dataset.my_view,PROD)", - "column": "col3" + "column": "col3", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_from_sharded_table_wildcard.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_from_sharded_table_wildcard.json index 78591286feb50..b7df5444987f2 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_from_sharded_table_wildcard.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_from_sharded_table_wildcard.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "col1" + "column": "col1", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -20,7 +26,13 @@ { "downstream": { "table": null, - "column": "col2" + "column": "col2", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_nested_subqueries.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_nested_subqueries.json index 0e93d31fbb6a6..67e306bebf545 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_nested_subqueries.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_nested_subqueries.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "col1" + "column": "col1", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -20,7 +26,13 @@ { "downstream": { "table": null, - "column": "col2" + "column": "col2", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_sharded_table_normalization.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_sharded_table_normalization.json index 78591286feb50..b7df5444987f2 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_sharded_table_normalization.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_sharded_table_normalization.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "col1" + "column": "col1", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -20,7 +26,13 @@ { "downstream": { "table": null, - "column": "col2" + "column": "col2", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_star_with_replace.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_star_with_replace.json index 17a801a63e3ff..b393b2445d6c4 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_star_with_replace.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_star_with_replace.json @@ -10,7 +10,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.my-dataset.test_table,PROD)", - "column": "col1" + "column": "col1", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -22,7 +28,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.my-dataset.test_table,PROD)", - "column": "col2" + "column": "col2", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -34,7 +46,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.my-dataset.test_table,PROD)", - "column": "something" + "column": "something", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_view_from_union.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_view_from_union.json index fd8a586ac74ac..53fb94300e804 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_view_from_union.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_bigquery_view_from_union.json @@ -11,7 +11,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my_view,PROD)", - "column": "col1" + "column": "col1", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -27,7 +33,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:bigquery,my_view,PROD)", - "column": "col2" + "column": "col2", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_create_view_as_select.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_create_view_as_select.json index 1ca56840531e4..ff452467aa5bd 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_create_view_as_select.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_create_view_as_select.json @@ -10,7 +10,9 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:oracle,vsal,PROD)", - "column": "Department" + "column": "Department", + "column_type": null, + "native_column_type": null }, "upstreams": [ { @@ -22,14 +24,22 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:oracle,vsal,PROD)", - "column": "Employees" + "column": "Employees", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "BIGINT" }, "upstreams": [] }, { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:oracle,vsal,PROD)", - "column": "Salary" + "column": "Salary", + "column_type": null, + "native_column_type": null }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_expand_select_star_basic.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_expand_select_star_basic.json index e241bdd08e243..eecb2265eaec5 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_expand_select_star_basic.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_expand_select_star_basic.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "total_agg" + "column": "total_agg", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DOUBLE" }, "upstreams": [ { @@ -20,7 +26,13 @@ { "downstream": { "table": null, - "column": "orderkey" + "column": "orderkey", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL" }, "upstreams": [ { @@ -32,7 +44,13 @@ { "downstream": { "table": null, - "column": "custkey" + "column": "custkey", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL" }, "upstreams": [ { @@ -44,7 +62,13 @@ { "downstream": { "table": null, - "column": "orderstatus" + "column": "orderstatus", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -56,7 +80,13 @@ { "downstream": { "table": null, - "column": "totalprice" + "column": "totalprice", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "FLOAT" }, "upstreams": [ { @@ -68,7 +98,13 @@ { "downstream": { "table": null, - "column": "orderdate" + "column": "orderdate", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.DateType": {} + } + }, + "native_column_type": "DATE" }, "upstreams": [ { @@ -80,7 +116,13 @@ { "downstream": { "table": null, - "column": "orderpriority" + "column": "orderpriority", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -92,7 +134,13 @@ { "downstream": { "table": null, - "column": "clerk" + "column": "clerk", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { @@ -104,7 +152,13 @@ { "downstream": { "table": null, - "column": "shippriority" + "column": "shippriority", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL" }, "upstreams": [ { @@ -116,7 +170,13 @@ { "downstream": { "table": null, - "column": "comment" + "column": "comment", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "TEXT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_insert_as_select.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_insert_as_select.json index d7264fd2db6b2..326db47e7ab33 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_insert_as_select.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_insert_as_select.json @@ -18,21 +18,27 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:hive,query72,PROD)", - "column": "i_item_desc" + "column": "i_item_desc", + "column_type": null, + "native_column_type": null }, "upstreams": [] }, { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:hive,query72,PROD)", - "column": "w_warehouse_name" + "column": "w_warehouse_name", + "column_type": null, + "native_column_type": null }, "upstreams": [] }, { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:hive,query72,PROD)", - "column": "d_week_seq" + "column": "d_week_seq", + "column_type": null, + "native_column_type": null }, "upstreams": [ { @@ -44,7 +50,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:hive,query72,PROD)", - "column": "no_promo" + "column": "no_promo", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "BIGINT" }, "upstreams": [ { @@ -56,7 +68,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:hive,query72,PROD)", - "column": "promo" + "column": "promo", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "BIGINT" }, "upstreams": [ { @@ -68,7 +86,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:hive,query72,PROD)", - "column": "total_cnt" + "column": "total_cnt", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "BIGINT" }, "upstreams": [] } diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_ambiguous_column_no_schema.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_ambiguous_column_no_schema.json index 10f5ee20b0c1f..b5fd5eebeb1b1 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_ambiguous_column_no_schema.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_ambiguous_column_no_schema.json @@ -9,21 +9,27 @@ { "downstream": { "table": null, - "column": "a" + "column": "a", + "column_type": null, + "native_column_type": null }, "upstreams": [] }, { "downstream": { "table": null, - "column": "b" + "column": "b", + "column_type": null, + "native_column_type": null }, "upstreams": [] }, { "downstream": { "table": null, - "column": "c" + "column": "c", + "column_type": null, + "native_column_type": null }, "upstreams": [] } diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_count.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_count.json index 9f6eeae46c294..a67c944822138 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_count.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_count.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "COUNT(`fact_complaint_snapshot`.`etl_data_dt_id`)" + "column": "COUNT(`fact_complaint_snapshot`.`etl_data_dt_id`)", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "BIGINT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_from_struct_subfields.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_from_struct_subfields.json index 109de96180422..5ad847e252497 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_from_struct_subfields.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_from_struct_subfields.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "post_id" + "column": "post_id", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL" }, "upstreams": [ { @@ -20,7 +26,9 @@ { "downstream": { "table": null, - "column": "id" + "column": "id", + "column_type": null, + "native_column_type": null }, "upstreams": [ { @@ -32,7 +40,9 @@ { "downstream": { "table": null, - "column": "min_metric" + "column": "min_metric", + "column_type": null, + "native_column_type": null }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_from_union.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_from_union.json index 2340b2e95b0d0..902aa010c8afc 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_from_union.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_from_union.json @@ -9,14 +9,26 @@ { "downstream": { "table": null, - "column": "label" + "column": "label", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "VARCHAR" }, "upstreams": [] }, { "downstream": { "table": null, - "column": "total_agg" + "column": "total_agg", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DOUBLE" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_max.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_max.json index 326c07d332c26..6ea88f45847ce 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_max.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_max.json @@ -8,7 +8,9 @@ { "downstream": { "table": null, - "column": "max_col" + "column": "max_col", + "column_type": null, + "native_column_type": null }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_with_ctes.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_with_ctes.json index 3e02314d6e8c3..67e9fd2d21a0e 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_with_ctes.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_with_ctes.json @@ -9,7 +9,9 @@ { "downstream": { "table": null, - "column": "COL1" + "column": "COL1", + "column_type": null, + "native_column_type": null }, "upstreams": [ { @@ -21,7 +23,9 @@ { "downstream": { "table": null, - "column": "COL3" + "column": "COL3", + "column_type": null, + "native_column_type": null }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_with_full_col_name.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_with_full_col_name.json index c12ad23b2f03b..6ee3d2e61c39b 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_with_full_col_name.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_select_with_full_col_name.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "post_id" + "column": "post_id", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL" }, "upstreams": [ { @@ -20,7 +26,9 @@ { "downstream": { "table": null, - "column": "id" + "column": "id", + "column_type": null, + "native_column_type": null }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_case_statement.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_case_statement.json index 64cd80e9a2d69..a876824127ec1 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_case_statement.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_case_statement.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "total_price_category" + "column": "total_price_category", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "VARCHAR" }, "upstreams": [ { @@ -20,7 +26,13 @@ { "downstream": { "table": null, - "column": "total_price_success" + "column": "total_price_success", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "FLOAT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_column_cast.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_column_cast.json new file mode 100644 index 0000000000000..7545e2b3269dc --- /dev/null +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_column_cast.json @@ -0,0 +1,63 @@ +{ + "query_type": "SELECT", + "in_tables": [ + "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders,PROD)" + ], + "out_tables": [], + "column_lineage": [ + { + "downstream": { + "table": null, + "column": "orderkey", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL(20, 0)" + }, + "upstreams": [ + { + "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders,PROD)", + "column": "o_orderkey" + } + ] + }, + { + "downstream": { + "table": null, + "column": "total_cast_int", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "INT" + }, + "upstreams": [ + { + "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders,PROD)", + "column": "o_totalprice" + } + ] + }, + { + "downstream": { + "table": null, + "column": "total_cast_float", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL(16, 4)" + }, + "upstreams": [ + { + "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders,PROD)", + "column": "o_totalprice" + } + ] + } + ] +} \ No newline at end of file diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_column_normalization.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_column_normalization.json index 7b22a46757e39..84e6b053000f1 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_column_normalization.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_column_normalization.json @@ -8,7 +8,13 @@ { "downstream": { "table": null, - "column": "total_agg" + "column": "total_agg", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DOUBLE" }, "upstreams": [ { @@ -20,7 +26,13 @@ { "downstream": { "table": null, - "column": "total_avg" + "column": "total_avg", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DOUBLE" }, "upstreams": [ { @@ -32,7 +44,13 @@ { "downstream": { "table": null, - "column": "total_min" + "column": "total_min", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "FLOAT" }, "upstreams": [ { @@ -44,7 +62,13 @@ { "downstream": { "table": null, - "column": "total_max" + "column": "total_max", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "FLOAT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_ctas_column_normalization.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_ctas_column_normalization.json index c912d99a3a8a3..39c94cf83c561 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_ctas_column_normalization.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_ctas_column_normalization.json @@ -10,7 +10,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders_normalized,PROD)", - "column": "Total_Agg" + "column": "Total_Agg", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DOUBLE" }, "upstreams": [ { @@ -22,7 +28,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders_normalized,PROD)", - "column": "total_avg" + "column": "total_avg", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DOUBLE" }, "upstreams": [ { @@ -34,7 +46,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders_normalized,PROD)", - "column": "TOTAL_MIN" + "column": "TOTAL_MIN", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "FLOAT" }, "upstreams": [ { @@ -46,7 +64,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders_normalized,PROD)", - "column": "total_max" + "column": "total_max", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "FLOAT" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_default_normalization.json b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_default_normalization.json index 2af308ec60623..dbf5b1b9a4453 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_default_normalization.json +++ b/metadata-ingestion/tests/unit/sql_parsing/goldens/test_snowflake_default_normalization.json @@ -11,7 +11,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,long_tail_companions.analytics.active_customer_ltv,PROD)", - "column": "user_fk" + "column": "user_fk", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL(38, 0)" }, "upstreams": [ { @@ -23,7 +29,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,long_tail_companions.analytics.active_customer_ltv,PROD)", - "column": "email" + "column": "email", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.StringType": {} + } + }, + "native_column_type": "VARCHAR(16777216)" }, "upstreams": [ { @@ -35,7 +47,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,long_tail_companions.analytics.active_customer_ltv,PROD)", - "column": "last_purchase_date" + "column": "last_purchase_date", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.DateType": {} + } + }, + "native_column_type": "DATE" }, "upstreams": [ { @@ -47,7 +65,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,long_tail_companions.analytics.active_customer_ltv,PROD)", - "column": "lifetime_purchase_amount" + "column": "lifetime_purchase_amount", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL" }, "upstreams": [ { @@ -59,7 +83,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,long_tail_companions.analytics.active_customer_ltv,PROD)", - "column": "lifetime_purchase_count" + "column": "lifetime_purchase_count", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "BIGINT" }, "upstreams": [ { @@ -71,7 +101,13 @@ { "downstream": { "table": "urn:li:dataset:(urn:li:dataPlatform:snowflake,long_tail_companions.analytics.active_customer_ltv,PROD)", - "column": "average_purchase_amount" + "column": "average_purchase_amount", + "column_type": { + "type": { + "com.linkedin.pegasus2avro.schema.NumberType": {} + } + }, + "native_column_type": "DECIMAL" }, "upstreams": [ { diff --git a/metadata-ingestion/tests/unit/sql_parsing/test_sqlglot_lineage.py b/metadata-ingestion/tests/unit/sql_parsing/test_sqlglot_lineage.py index 2a965a9bb1e61..bb6e5f1581754 100644 --- a/metadata-ingestion/tests/unit/sql_parsing/test_sqlglot_lineage.py +++ b/metadata-ingestion/tests/unit/sql_parsing/test_sqlglot_lineage.py @@ -608,4 +608,25 @@ def test_snowflake_default_normalization(): ) +def test_snowflake_column_cast(): + assert_sql_result( + """ +SELECT + o.o_orderkey::NUMBER(20,0) as orderkey, + CAST(o.o_totalprice AS INT) as total_cast_int, + CAST(o.o_totalprice AS NUMBER(16,4)) as total_cast_float +FROM snowflake_sample_data.tpch_sf1.orders o +LIMIT 10 +""", + dialect="snowflake", + schemas={ + "urn:li:dataset:(urn:li:dataPlatform:snowflake,snowflake_sample_data.tpch_sf1.orders,PROD)": { + "orderkey": "NUMBER(38,0)", + "totalprice": "NUMBER(12,2)", + }, + }, + expected_file=RESOURCE_DIR / "test_snowflake_column_cast.json", + ) + + # TODO: Add a test for setting platform_instance or env