diff --git a/404.html b/404.html index 12cda8eafd9d0..4f5931d66c1f4 100644 --- a/404.html +++ b/404.html @@ -8,13 +8,13 @@ - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/artifacts/wheels/acryl-datahub-0.0.0.dev1.tar.gz b/artifacts/wheels/acryl-datahub-0.0.0.dev1.tar.gz index 68c34e86fc46d..959927f572c45 100644 Binary files a/artifacts/wheels/acryl-datahub-0.0.0.dev1.tar.gz and b/artifacts/wheels/acryl-datahub-0.0.0.dev1.tar.gz differ diff --git a/artifacts/wheels/acryl-datahub-airflow-plugin-0.0.0.dev1.tar.gz b/artifacts/wheels/acryl-datahub-airflow-plugin-0.0.0.dev1.tar.gz index 2e427380b1756..61f528a7255b1 100644 Binary files a/artifacts/wheels/acryl-datahub-airflow-plugin-0.0.0.dev1.tar.gz and b/artifacts/wheels/acryl-datahub-airflow-plugin-0.0.0.dev1.tar.gz differ diff --git a/artifacts/wheels/acryl_datahub-0.0.0.dev1-py3-none-any.whl b/artifacts/wheels/acryl_datahub-0.0.0.dev1-py3-none-any.whl index 70ae64cbec32c..f9616d2fb11c4 100644 Binary files a/artifacts/wheels/acryl_datahub-0.0.0.dev1-py3-none-any.whl and b/artifacts/wheels/acryl_datahub-0.0.0.dev1-py3-none-any.whl differ diff --git a/artifacts/wheels/acryl_datahub_airflow_plugin-0.0.0.dev1-py3-none-any.whl b/artifacts/wheels/acryl_datahub_airflow_plugin-0.0.0.dev1-py3-none-any.whl index b057577d27668..82462d00fd973 100644 Binary files a/artifacts/wheels/acryl_datahub_airflow_plugin-0.0.0.dev1-py3-none-any.whl and b/artifacts/wheels/acryl_datahub_airflow_plugin-0.0.0.dev1-py3-none-any.whl differ diff --git a/assets/js/1043e548.55aa7524.js b/assets/js/1043e548.dc92a5b9.js similarity index 99% rename from assets/js/1043e548.55aa7524.js rename to assets/js/1043e548.dc92a5b9.js index f46d734d4c772..8fc1520da61f2 100644 --- a/assets/js/1043e548.55aa7524.js +++ b/assets/js/1043e548.dc92a5b9.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdocs_website=self.webpackChunkdocs_website||[]).push([[8080],{3905:(e,t,a)=>{a.d(t,{Zo:()=>d,kt:()=>c});var n=a(67294);function s(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function l(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(s[a]=e[a]);return s}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(s[a]=e[a])}return s}var o=n.createContext({}),p=function(e){var t=n.useContext(o),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},d=function(e){var t=p(e.components);return n.createElement(o.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,s=e.mdxType,l=e.originalType,o=e.parentName,d=r(e,["components","mdxType","originalType","parentName"]),m=p(a),c=s,f=m["".concat(o,".").concat(c)]||m[c]||u[c]||l;return a?n.createElement(f,i(i({ref:t},d),{},{components:a})):n.createElement(f,i({ref:t},d))}));function c(e,t){var a=arguments,s=t&&t.mdxType;if("string"==typeof e||s){var l=a.length,i=new Array(l);i[0]=m;var r={};for(var o in t)hasOwnProperty.call(t,o)&&(r[o]=t[o]);r.originalType=e,r.mdxType="string"==typeof e?e:s,i[1]=r;for(var p=2;p{a.d(t,{Z:()=>i});var n=a(67294),s=a(86010);const l="tabItem_Ymn6";function i(e){let{children:t,hidden:a,className:i}=e;return n.createElement("div",{role:"tabpanel",className:(0,s.Z)(l,i),hidden:a},t)}},34259:(e,t,a)=>{a.d(t,{Z:()=>c});var n=a(83117),s=a(67294),l=a(86010),i=a(51048),r=a(33609),o=a(1943),p=a(72957);const d="tabList__CuJ",u="tabItem_LNqP";function m(e){const{lazy:t,block:a,defaultValue:i,values:m,groupId:c,className:f}=e,g=s.Children.map(e.children,(e=>{if((0,s.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),k=m??g.map((e=>{let{props:{value:t,label:a,attributes:n}}=e;return{value:t,label:a,attributes:n}})),h=(0,r.l)(k,((e,t)=>e.value===t.value));if(h.length>0)throw new Error(`Docusaurus error: Duplicate values "${h.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const N=null===i?i:i??g.find((e=>e.props.default))?.props.value??g[0].props.value;if(null!==N&&!k.some((e=>e.value===N)))throw new Error(`Docusaurus error: The has a defaultValue "${N}" but none of its children has the corresponding value. Available values are: ${k.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:b,setTabGroupChoices:y}=(0,o.U)(),[v,_]=(0,s.useState)(N),w=[],{blockElementScrollPositionUntilNextRender:q}=(0,p.o5)();if(null!=c){const e=b[c];null!=e&&e!==v&&k.some((t=>t.value===e))&&_(e)}const T=e=>{const t=e.currentTarget,a=w.indexOf(t),n=k[a].value;n!==v&&(q(t),_(n),null!=c&&y(c,String(n)))},D=e=>{let t=null;switch(e.key){case"Enter":T(e);break;case"ArrowRight":{const a=w.indexOf(e.currentTarget)+1;t=w[a]??w[0];break}case"ArrowLeft":{const a=w.indexOf(e.currentTarget)-1;t=w[a]??w[w.length-1];break}}t?.focus()};return s.createElement("div",{className:(0,l.Z)("tabs-container",d)},s.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,l.Z)("tabs",{"tabs--block":a},f)},k.map((e=>{let{value:t,label:a,attributes:i}=e;return s.createElement("li",(0,n.Z)({role:"tab",tabIndex:v===t?0:-1,"aria-selected":v===t,key:t,ref:e=>w.push(e),onKeyDown:D,onClick:T},i,{className:(0,l.Z)("tabs__item",u,i?.className,{"tabs__item--active":v===t})}),a??t)}))),t?(0,s.cloneElement)(g.filter((e=>e.props.value===v))[0],{className:"margin-top--md"}):s.createElement("div",{className:"margin-top--md"},g.map(((e,t)=>(0,s.cloneElement)(e,{key:t,hidden:e.props.value!==v})))))}function c(e){const t=(0,i.Z)();return s.createElement(m,(0,n.Z)({key:String(t)},e))}},96647:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>d,contentTitle:()=>o,default:()=>c,frontMatter:()=>r,metadata:()=>p,toc:()=>u});var n=a(83117),s=(a(67294),a(3905)),l=a(34259),i=a(18679);const r={sidebar_position:48,title:"SQL Queries",slug:"/generated/ingestion/sources/sql-queries",custom_edit_url:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/sql-queries.md"},o="SQL Queries",p={unversionedId:"docs/generated/ingestion/sources/sql-queries",id:"docs/generated/ingestion/sources/sql-queries",title:"SQL Queries",description:"Testing",source:"@site/genDocs/docs/generated/ingestion/sources/sql-queries.md",sourceDirName:"docs/generated/ingestion/sources",slug:"/generated/ingestion/sources/sql-queries",permalink:"/docs/generated/ingestion/sources/sql-queries",draft:!1,editUrl:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/sql-queries.md",tags:[],version:"current",sidebarPosition:48,frontMatter:{sidebar_position:48,title:"SQL Queries",slug:"/generated/ingestion/sources/sql-queries",custom_edit_url:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/sql-queries.md"},sidebar:"overviewSidebar",previous:{title:"Snowflake",permalink:"/docs/generated/ingestion/sources/snowflake"},next:{title:"SQLAlchemy",permalink:"/docs/generated/ingestion/sources/sqlalchemy"}},d={},u=[{value:"CLI based Ingestion",id:"cli-based-ingestion",level:3},{value:"Install the Plugin",id:"install-the-plugin",level:4},{value:"Config Details",id:"config-details",level:3},{value:"Code Coordinates",id:"code-coordinates",level:3}],m={toc:u};function c(e){let{components:t,...a}=e;return(0,s.kt)("wrapper",(0,n.Z)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,s.kt)("h1",{id:"sql-queries"},"SQL Queries"),(0,s.kt)("p",null,(0,s.kt)("img",{parentName:"p",src:"https://img.shields.io/badge/support%20status-testing-lightgrey",alt:"Testing"})),(0,s.kt)("h3",{id:"cli-based-ingestion"},"CLI based Ingestion"),(0,s.kt)("h4",{id:"install-the-plugin"},"Install the Plugin"),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-shell"},"pip install 'acryl-datahub[sql-queries]'\n")),(0,s.kt)("h3",{id:"config-details"},"Config Details"),(0,s.kt)(l.Z,{mdxType:"Tabs"},(0,s.kt)(i.Z,{value:"options",label:"Options",default:!0,mdxType:"TabItem"},(0,s.kt)("p",null,"Note that a ",(0,s.kt)("inlineCode",{parentName:"p"},".")," is used to denote nested fields in the YAML recipe."),(0,s.kt)("div",{className:"config-table"},(0,s.kt)("table",null,(0,s.kt)("thead",{parentName:"table"},(0,s.kt)("tr",{parentName:"thead"},(0,s.kt)("th",{parentName:"tr",align:"left"},"Field"),(0,s.kt)("th",{parentName:"tr",align:"left"},"Description"))),(0,s.kt)("tbody",{parentName:"table"},(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"platform"),"\xa0",(0,s.kt)("abbr",{title:"Required"},"\u2705"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The platform for which to generate data, e.g. snowflake")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"query_file"),"\xa0",(0,s.kt)("abbr",{title:"Required"},"\u2705"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Path to file to ingest")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"default_db"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The default database to use for unqualified table names")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"default_schema"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The default schema to use for unqualified table names")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"platform_instance"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The instance of the platform that all assets produced by this recipe belong to")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"env"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The environment that all assets produced by this connector belong to ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"PROD")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"usage"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"BaseUsageConfig"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The usage config to use when generating usage statistics ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"{","'","bucket","_","duration","'",": ","'","DAY","'",", ","'","end","_","time","'",": ","'","2023-08-24...")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"bucket_duration"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"Enum"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Size of the time window to aggregate usage stats. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"DAY")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"end_time"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string(date-time)"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Latest date of lineage/usage to consider. Default: Current time in UTC")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"format_sql_queries"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to format sql queries ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"False")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"include_operational_stats"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to display operational stats. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"True")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"include_read_operational_stats"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to report read operational stats. Experimental. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"False")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"include_top_n_queries"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to ingest the top_n_queries. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"True")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"start_time"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string(date-time)"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on ",(0,s.kt)("inlineCode",{parentName:"td"},"bucket_duration"),"). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"top_n_queries"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"integer"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Number of top queries to save to each table. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"10")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"user_email_pattern"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"regex patterns for user emails to filter in usage. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,s.kt)("span",{className:"path-main"},"allow"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"array(string)"))),(0,s.kt)("td",{parentName:"tr",align:"left"})),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,s.kt)("span",{className:"path-main"},"deny"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"array(string)"))),(0,s.kt)("td",{parentName:"tr",align:"left"})),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,s.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"True")))))))),(0,s.kt)(i.Z,{value:"schema",label:"Schema",mdxType:"TabItem"},(0,s.kt)("p",null,"The ",(0,s.kt)("a",{parentName:"p",href:"https://json-schema.org/"},"JSONSchema")," for this configuration is inlined below."),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-javascript"},'{\n "title": "SqlQueriesSourceConfig",\n "description": "Any source that connects to a platform should inherit this class",\n "type": "object",\n "properties": {\n "env": {\n "title": "Env",\n "description": "The environment that all assets produced by this connector belong to",\n "default": "PROD",\n "type": "string"\n },\n "platform_instance": {\n "title": "Platform Instance",\n "description": "The instance of the platform that all assets produced by this recipe belong to",\n "type": "string"\n },\n "query_file": {\n "title": "Query File",\n "description": "Path to file to ingest",\n "type": "string"\n },\n "platform": {\n "title": "Platform",\n "description": "The platform for which to generate data, e.g. snowflake",\n "type": "string"\n },\n "usage": {\n "title": "Usage",\n "description": "The usage config to use when generating usage statistics",\n "default": {\n "bucket_duration": "DAY",\n "end_time": "2023-08-24T21:05:51.278053+00:00",\n "start_time": "2023-08-23T00:00:00+00:00",\n "top_n_queries": 10,\n "user_email_pattern": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "include_operational_stats": true,\n "include_read_operational_stats": false,\n "format_sql_queries": false,\n "include_top_n_queries": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/BaseUsageConfig"\n }\n ]\n },\n "default_db": {\n "title": "Default Db",\n "description": "The default database to use for unqualified table names",\n "type": "string"\n },\n "default_schema": {\n "title": "Default Schema",\n "description": "The default schema to use for unqualified table names",\n "type": "string"\n }\n },\n "required": [\n "query_file",\n "platform"\n ],\n "additionalProperties": false,\n "definitions": {\n "BucketDuration": {\n "title": "BucketDuration",\n "description": "An enumeration.",\n "enum": [\n "DAY",\n "HOUR"\n ],\n "type": "string"\n },\n "AllowDenyPattern": {\n "title": "AllowDenyPattern",\n "description": "A class to store allow deny regexes",\n "type": "object",\n "properties": {\n "allow": {\n "title": "Allow",\n "description": "List of regex patterns to include in ingestion",\n "default": [\n ".*"\n ],\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "deny": {\n "title": "Deny",\n "description": "List of regex patterns to exclude from ingestion.",\n "default": [],\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "ignoreCase": {\n "title": "Ignorecase",\n "description": "Whether to ignore case sensitivity during pattern matching.",\n "default": true,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n },\n "BaseUsageConfig": {\n "title": "BaseUsageConfig",\n "type": "object",\n "properties": {\n "bucket_duration": {\n "description": "Size of the time window to aggregate usage stats.",\n "default": "DAY",\n "allOf": [\n {\n "$ref": "#/definitions/BucketDuration"\n }\n ]\n },\n "end_time": {\n "title": "End Time",\n "description": "Latest date of lineage/usage to consider. Default: Current time in UTC",\n "type": "string",\n "format": "date-time"\n },\n "start_time": {\n "title": "Start Time",\n "description": "Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on `bucket_duration`). You can also specify relative time with respect to end_time such as \'-7 days\' Or \'-7d\'.",\n "type": "string",\n "format": "date-time"\n },\n "top_n_queries": {\n "title": "Top N Queries",\n "description": "Number of top queries to save to each table.",\n "default": 10,\n "exclusiveMinimum": 0,\n "type": "integer"\n },\n "user_email_pattern": {\n "title": "User Email Pattern",\n "description": "regex patterns for user emails to filter in usage.",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "include_operational_stats": {\n "title": "Include Operational Stats",\n "description": "Whether to display operational stats.",\n "default": true,\n "type": "boolean"\n },\n "include_read_operational_stats": {\n "title": "Include Read Operational Stats",\n "description": "Whether to report read operational stats. Experimental.",\n "default": false,\n "type": "boolean"\n },\n "format_sql_queries": {\n "title": "Format Sql Queries",\n "description": "Whether to format sql queries",\n "default": false,\n "type": "boolean"\n },\n "include_top_n_queries": {\n "title": "Include Top N Queries",\n "description": "Whether to ingest the top_n_queries.",\n "default": true,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n }\n }\n}\n')))),(0,s.kt)("h3",{id:"code-coordinates"},"Code Coordinates"),(0,s.kt)("ul",null,(0,s.kt)("li",{parentName:"ul"},"Class Name: ",(0,s.kt)("inlineCode",{parentName:"li"},"datahub.ingestion.source.sql_queries.SqlQueriesSource")),(0,s.kt)("li",{parentName:"ul"},"Browse on ",(0,s.kt)("a",{parentName:"li",href:"https://github.com/datahub-project/datahub/blob/master/metadata-ingestion/src/datahub/ingestion/source/sql_queries.py"},"GitHub"))),(0,s.kt)("h2",null,"Questions"),(0,s.kt)("p",null,"If you've got any questions on configuring ingestion for SQL Queries, feel free to ping us on ",(0,s.kt)("a",{parentName:"p",href:"https://slack.datahubproject.io"},"our Slack"),"."))}c.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkdocs_website=self.webpackChunkdocs_website||[]).push([[8080],{3905:(e,t,a)=>{a.d(t,{Zo:()=>d,kt:()=>c});var n=a(67294);function s(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function l(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(s[a]=e[a]);return s}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(s[a]=e[a])}return s}var o=n.createContext({}),p=function(e){var t=n.useContext(o),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},d=function(e){var t=p(e.components);return n.createElement(o.Provider,{value:t},e.children)},u={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},m=n.forwardRef((function(e,t){var a=e.components,s=e.mdxType,l=e.originalType,o=e.parentName,d=r(e,["components","mdxType","originalType","parentName"]),m=p(a),c=s,f=m["".concat(o,".").concat(c)]||m[c]||u[c]||l;return a?n.createElement(f,i(i({ref:t},d),{},{components:a})):n.createElement(f,i({ref:t},d))}));function c(e,t){var a=arguments,s=t&&t.mdxType;if("string"==typeof e||s){var l=a.length,i=new Array(l);i[0]=m;var r={};for(var o in t)hasOwnProperty.call(t,o)&&(r[o]=t[o]);r.originalType=e,r.mdxType="string"==typeof e?e:s,i[1]=r;for(var p=2;p{a.d(t,{Z:()=>i});var n=a(67294),s=a(86010);const l="tabItem_Ymn6";function i(e){let{children:t,hidden:a,className:i}=e;return n.createElement("div",{role:"tabpanel",className:(0,s.Z)(l,i),hidden:a},t)}},34259:(e,t,a)=>{a.d(t,{Z:()=>c});var n=a(83117),s=a(67294),l=a(86010),i=a(51048),r=a(33609),o=a(1943),p=a(72957);const d="tabList__CuJ",u="tabItem_LNqP";function m(e){const{lazy:t,block:a,defaultValue:i,values:m,groupId:c,className:f}=e,g=s.Children.map(e.children,(e=>{if((0,s.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),k=m??g.map((e=>{let{props:{value:t,label:a,attributes:n}}=e;return{value:t,label:a,attributes:n}})),h=(0,r.l)(k,((e,t)=>e.value===t.value));if(h.length>0)throw new Error(`Docusaurus error: Duplicate values "${h.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const N=null===i?i:i??g.find((e=>e.props.default))?.props.value??g[0].props.value;if(null!==N&&!k.some((e=>e.value===N)))throw new Error(`Docusaurus error: The has a defaultValue "${N}" but none of its children has the corresponding value. Available values are: ${k.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:b,setTabGroupChoices:y}=(0,o.U)(),[v,_]=(0,s.useState)(N),w=[],{blockElementScrollPositionUntilNextRender:q}=(0,p.o5)();if(null!=c){const e=b[c];null!=e&&e!==v&&k.some((t=>t.value===e))&&_(e)}const T=e=>{const t=e.currentTarget,a=w.indexOf(t),n=k[a].value;n!==v&&(q(t),_(n),null!=c&&y(c,String(n)))},D=e=>{let t=null;switch(e.key){case"Enter":T(e);break;case"ArrowRight":{const a=w.indexOf(e.currentTarget)+1;t=w[a]??w[0];break}case"ArrowLeft":{const a=w.indexOf(e.currentTarget)-1;t=w[a]??w[w.length-1];break}}t?.focus()};return s.createElement("div",{className:(0,l.Z)("tabs-container",d)},s.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,l.Z)("tabs",{"tabs--block":a},f)},k.map((e=>{let{value:t,label:a,attributes:i}=e;return s.createElement("li",(0,n.Z)({role:"tab",tabIndex:v===t?0:-1,"aria-selected":v===t,key:t,ref:e=>w.push(e),onKeyDown:D,onClick:T},i,{className:(0,l.Z)("tabs__item",u,i?.className,{"tabs__item--active":v===t})}),a??t)}))),t?(0,s.cloneElement)(g.filter((e=>e.props.value===v))[0],{className:"margin-top--md"}):s.createElement("div",{className:"margin-top--md"},g.map(((e,t)=>(0,s.cloneElement)(e,{key:t,hidden:e.props.value!==v})))))}function c(e){const t=(0,i.Z)();return s.createElement(m,(0,n.Z)({key:String(t)},e))}},96647:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>d,contentTitle:()=>o,default:()=>c,frontMatter:()=>r,metadata:()=>p,toc:()=>u});var n=a(83117),s=(a(67294),a(3905)),l=a(34259),i=a(18679);const r={sidebar_position:48,title:"SQL Queries",slug:"/generated/ingestion/sources/sql-queries",custom_edit_url:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/sql-queries.md"},o="SQL Queries",p={unversionedId:"docs/generated/ingestion/sources/sql-queries",id:"docs/generated/ingestion/sources/sql-queries",title:"SQL Queries",description:"Testing",source:"@site/genDocs/docs/generated/ingestion/sources/sql-queries.md",sourceDirName:"docs/generated/ingestion/sources",slug:"/generated/ingestion/sources/sql-queries",permalink:"/docs/generated/ingestion/sources/sql-queries",draft:!1,editUrl:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/sql-queries.md",tags:[],version:"current",sidebarPosition:48,frontMatter:{sidebar_position:48,title:"SQL Queries",slug:"/generated/ingestion/sources/sql-queries",custom_edit_url:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/sql-queries.md"},sidebar:"overviewSidebar",previous:{title:"Snowflake",permalink:"/docs/generated/ingestion/sources/snowflake"},next:{title:"SQLAlchemy",permalink:"/docs/generated/ingestion/sources/sqlalchemy"}},d={},u=[{value:"CLI based Ingestion",id:"cli-based-ingestion",level:3},{value:"Install the Plugin",id:"install-the-plugin",level:4},{value:"Config Details",id:"config-details",level:3},{value:"Code Coordinates",id:"code-coordinates",level:3}],m={toc:u};function c(e){let{components:t,...a}=e;return(0,s.kt)("wrapper",(0,n.Z)({},m,a,{components:t,mdxType:"MDXLayout"}),(0,s.kt)("h1",{id:"sql-queries"},"SQL Queries"),(0,s.kt)("p",null,(0,s.kt)("img",{parentName:"p",src:"https://img.shields.io/badge/support%20status-testing-lightgrey",alt:"Testing"})),(0,s.kt)("h3",{id:"cli-based-ingestion"},"CLI based Ingestion"),(0,s.kt)("h4",{id:"install-the-plugin"},"Install the Plugin"),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-shell"},"pip install 'acryl-datahub[sql-queries]'\n")),(0,s.kt)("h3",{id:"config-details"},"Config Details"),(0,s.kt)(l.Z,{mdxType:"Tabs"},(0,s.kt)(i.Z,{value:"options",label:"Options",default:!0,mdxType:"TabItem"},(0,s.kt)("p",null,"Note that a ",(0,s.kt)("inlineCode",{parentName:"p"},".")," is used to denote nested fields in the YAML recipe."),(0,s.kt)("div",{className:"config-table"},(0,s.kt)("table",null,(0,s.kt)("thead",{parentName:"table"},(0,s.kt)("tr",{parentName:"thead"},(0,s.kt)("th",{parentName:"tr",align:"left"},"Field"),(0,s.kt)("th",{parentName:"tr",align:"left"},"Description"))),(0,s.kt)("tbody",{parentName:"table"},(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"platform"),"\xa0",(0,s.kt)("abbr",{title:"Required"},"\u2705"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The platform for which to generate data, e.g. snowflake")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"query_file"),"\xa0",(0,s.kt)("abbr",{title:"Required"},"\u2705"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Path to file to ingest")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"default_db"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The default database to use for unqualified table names")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"default_schema"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The default schema to use for unqualified table names")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"platform_instance"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The instance of the platform that all assets produced by this recipe belong to")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"env"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The environment that all assets produced by this connector belong to ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"PROD")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-main"},"usage"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"BaseUsageConfig"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"The usage config to use when generating usage statistics ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"{","'","bucket","_","duration","'",": ","'","DAY","'",", ","'","end","_","time","'",": ","'","2023-08-24...")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"bucket_duration"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"Enum"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Size of the time window to aggregate usage stats. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"DAY")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"end_time"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string(date-time)"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Latest date of lineage/usage to consider. Default: Current time in UTC")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"format_sql_queries"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to format sql queries ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"False")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"include_operational_stats"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to display operational stats. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"True")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"include_read_operational_stats"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to report read operational stats. Experimental. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"False")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"include_top_n_queries"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to ingest the top_n_queries. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"True")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"start_time"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"string(date-time)"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on ",(0,s.kt)("inlineCode",{parentName:"td"},"bucket_duration"),"). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.")),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"top_n_queries"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"integer"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Number of top queries to save to each table. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"10")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage."),(0,s.kt)("span",{className:"path-main"},"user_email_pattern"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"regex patterns for user emails to filter in usage. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,s.kt)("span",{className:"path-main"},"allow"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"array(string)"))),(0,s.kt)("td",{parentName:"tr",align:"left"})),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,s.kt)("span",{className:"path-main"},"deny"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"array(string)"))),(0,s.kt)("td",{parentName:"tr",align:"left"})),(0,s.kt)("tr",{parentName:"tbody"},(0,s.kt)("td",{parentName:"tr",align:"left"},(0,s.kt)("div",{className:"path-line"},(0,s.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,s.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,s.kt)("div",{className:"type-name-line"},(0,s.kt)("span",{className:"type-name"},"boolean"))),(0,s.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,s.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,s.kt)("span",{className:"default-value"},"True")))))))),(0,s.kt)(i.Z,{value:"schema",label:"Schema",mdxType:"TabItem"},(0,s.kt)("p",null,"The ",(0,s.kt)("a",{parentName:"p",href:"https://json-schema.org/"},"JSONSchema")," for this configuration is inlined below."),(0,s.kt)("pre",null,(0,s.kt)("code",{parentName:"pre",className:"language-javascript"},'{\n "title": "SqlQueriesSourceConfig",\n "description": "Any source that connects to a platform should inherit this class",\n "type": "object",\n "properties": {\n "env": {\n "title": "Env",\n "description": "The environment that all assets produced by this connector belong to",\n "default": "PROD",\n "type": "string"\n },\n "platform_instance": {\n "title": "Platform Instance",\n "description": "The instance of the platform that all assets produced by this recipe belong to",\n "type": "string"\n },\n "query_file": {\n "title": "Query File",\n "description": "Path to file to ingest",\n "type": "string"\n },\n "platform": {\n "title": "Platform",\n "description": "The platform for which to generate data, e.g. snowflake",\n "type": "string"\n },\n "usage": {\n "title": "Usage",\n "description": "The usage config to use when generating usage statistics",\n "default": {\n "bucket_duration": "DAY",\n "end_time": "2023-08-24T22:32:38.872355+00:00",\n "start_time": "2023-08-23T00:00:00+00:00",\n "top_n_queries": 10,\n "user_email_pattern": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "include_operational_stats": true,\n "include_read_operational_stats": false,\n "format_sql_queries": false,\n "include_top_n_queries": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/BaseUsageConfig"\n }\n ]\n },\n "default_db": {\n "title": "Default Db",\n "description": "The default database to use for unqualified table names",\n "type": "string"\n },\n "default_schema": {\n "title": "Default Schema",\n "description": "The default schema to use for unqualified table names",\n "type": "string"\n }\n },\n "required": [\n "query_file",\n "platform"\n ],\n "additionalProperties": false,\n "definitions": {\n "BucketDuration": {\n "title": "BucketDuration",\n "description": "An enumeration.",\n "enum": [\n "DAY",\n "HOUR"\n ],\n "type": "string"\n },\n "AllowDenyPattern": {\n "title": "AllowDenyPattern",\n "description": "A class to store allow deny regexes",\n "type": "object",\n "properties": {\n "allow": {\n "title": "Allow",\n "description": "List of regex patterns to include in ingestion",\n "default": [\n ".*"\n ],\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "deny": {\n "title": "Deny",\n "description": "List of regex patterns to exclude from ingestion.",\n "default": [],\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "ignoreCase": {\n "title": "Ignorecase",\n "description": "Whether to ignore case sensitivity during pattern matching.",\n "default": true,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n },\n "BaseUsageConfig": {\n "title": "BaseUsageConfig",\n "type": "object",\n "properties": {\n "bucket_duration": {\n "description": "Size of the time window to aggregate usage stats.",\n "default": "DAY",\n "allOf": [\n {\n "$ref": "#/definitions/BucketDuration"\n }\n ]\n },\n "end_time": {\n "title": "End Time",\n "description": "Latest date of lineage/usage to consider. Default: Current time in UTC",\n "type": "string",\n "format": "date-time"\n },\n "start_time": {\n "title": "Start Time",\n "description": "Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on `bucket_duration`). You can also specify relative time with respect to end_time such as \'-7 days\' Or \'-7d\'.",\n "type": "string",\n "format": "date-time"\n },\n "top_n_queries": {\n "title": "Top N Queries",\n "description": "Number of top queries to save to each table.",\n "default": 10,\n "exclusiveMinimum": 0,\n "type": "integer"\n },\n "user_email_pattern": {\n "title": "User Email Pattern",\n "description": "regex patterns for user emails to filter in usage.",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "include_operational_stats": {\n "title": "Include Operational Stats",\n "description": "Whether to display operational stats.",\n "default": true,\n "type": "boolean"\n },\n "include_read_operational_stats": {\n "title": "Include Read Operational Stats",\n "description": "Whether to report read operational stats. Experimental.",\n "default": false,\n "type": "boolean"\n },\n "format_sql_queries": {\n "title": "Format Sql Queries",\n "description": "Whether to format sql queries",\n "default": false,\n "type": "boolean"\n },\n "include_top_n_queries": {\n "title": "Include Top N Queries",\n "description": "Whether to ingest the top_n_queries.",\n "default": true,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n }\n }\n}\n')))),(0,s.kt)("h3",{id:"code-coordinates"},"Code Coordinates"),(0,s.kt)("ul",null,(0,s.kt)("li",{parentName:"ul"},"Class Name: ",(0,s.kt)("inlineCode",{parentName:"li"},"datahub.ingestion.source.sql_queries.SqlQueriesSource")),(0,s.kt)("li",{parentName:"ul"},"Browse on ",(0,s.kt)("a",{parentName:"li",href:"https://github.com/datahub-project/datahub/blob/master/metadata-ingestion/src/datahub/ingestion/source/sql_queries.py"},"GitHub"))),(0,s.kt)("h2",null,"Questions"),(0,s.kt)("p",null,"If you've got any questions on configuring ingestion for SQL Queries, feel free to ping us on ",(0,s.kt)("a",{parentName:"p",href:"https://slack.datahubproject.io"},"our Slack"),"."))}c.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/27b2c5ed.567a55da.js b/assets/js/27b2c5ed.3a92e29b.js similarity index 85% rename from assets/js/27b2c5ed.567a55da.js rename to assets/js/27b2c5ed.3a92e29b.js index c57404e5cf29f..1af99de466f75 100644 --- a/assets/js/27b2c5ed.567a55da.js +++ b/assets/js/27b2c5ed.3a92e29b.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdocs_website=self.webpackChunkdocs_website||[]).push([[6806],{3905:(t,l,e)=>{e.d(l,{Zo:()=>i,kt:()=>c});var n=e(67294);function r(t,l,e){return l in t?Object.defineProperty(t,l,{value:e,enumerable:!0,configurable:!0,writable:!0}):t[l]=e,t}function u(t,l){var e=Object.keys(t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(t);l&&(n=n.filter((function(l){return Object.getOwnPropertyDescriptor(t,l).enumerable}))),e.push.apply(e,n)}return e}function a(t){for(var l=1;l=0||(r[e]=t[e]);return r}(t,l);if(Object.getOwnPropertySymbols){var u=Object.getOwnPropertySymbols(t);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(t,e)&&(r[e]=t[e])}return r}var o=n.createContext({}),s=function(t){var l=n.useContext(o),e=l;return t&&(e="function"==typeof t?t(l):a(a({},l),t)),e},i=function(t){var l=s(t.components);return n.createElement(o.Provider,{value:l},t.children)},d={inlineCode:"code",wrapper:function(t){var l=t.children;return n.createElement(n.Fragment,{},l)}},p=n.forwardRef((function(t,l){var e=t.components,r=t.mdxType,u=t.originalType,o=t.parentName,i=k(t,["components","mdxType","originalType","parentName"]),p=s(e),c=r,h=p["".concat(o,".").concat(c)]||p[c]||d[c]||u;return e?n.createElement(h,a(a({ref:l},i),{},{components:e})):n.createElement(h,a({ref:l},i))}));function c(t,l){var e=arguments,r=l&&l.mdxType;if("string"==typeof t||r){var u=e.length,a=new Array(u);a[0]=p;var k={};for(var o in l)hasOwnProperty.call(l,o)&&(k[o]=l[o]);k.originalType=t,k.mdxType="string"==typeof t?t:r,a[1]=k;for(var s=2;s{e.r(l),e.d(l,{assets:()=>o,contentTitle:()=>a,default:()=>d,frontMatter:()=>u,metadata:()=>k,toc:()=>s});var n=e(83117),r=(e(67294),e(3905));const u={id:"inputObjects",title:"Input objects",slug:"inputObjects",sidebar_position:7},a=void 0,k={unversionedId:"graphql/inputObjects",id:"graphql/inputObjects",title:"Input objects",description:"AcceptRoleInput",source:"@site/genDocs/graphql/inputObjects.md",sourceDirName:"graphql",slug:"/graphql/inputObjects",permalink:"/docs/graphql/inputObjects",draft:!1,editUrl:"https://github.com/datahub-project/datahub/blob/master/genDocs/graphql/inputObjects.md",tags:[],version:"current",sidebarPosition:7,frontMatter:{id:"inputObjects",title:"Input objects",slug:"inputObjects",sidebar_position:7},sidebar:"overviewSidebar",previous:{title:"Objects",permalink:"/docs/graphql/objects"},next:{title:"Interfaces",permalink:"/docs/graphql/interfaces"}},o={},s=[{value:"AcceptRoleInput",id:"acceptroleinput",level:2},{value:"ActorFilterInput",id:"actorfilterinput",level:2},{value:"AddGroupMembersInput",id:"addgroupmembersinput",level:2},{value:"AddLinkInput",id:"addlinkinput",level:2},{value:"AddNativeGroupMembersInput",id:"addnativegroupmembersinput",level:2},{value:"AddOwnerInput",id:"addownerinput",level:2},{value:"AddOwnersInput",id:"addownersinput",level:2},{value:"AddTagsInput",id:"addtagsinput",level:2},{value:"AddTermsInput",id:"addtermsinput",level:2},{value:"AggregateAcrossEntitiesInput",id:"aggregateacrossentitiesinput",level:2},{value:"AndFilterInput",id:"andfilterinput",level:2},{value:"AspectParams",id:"aspectparams",level:2},{value:"AutoCompleteInput",id:"autocompleteinput",level:2},{value:"AutoCompleteMultipleInput",id:"autocompletemultipleinput",level:2},{value:"BatchAddOwnersInput",id:"batchaddownersinput",level:2},{value:"BatchAddTagsInput",id:"batchaddtagsinput",level:2},{value:"BatchAddTermsInput",id:"batchaddtermsinput",level:2},{value:"BatchAssignRoleInput",id:"batchassignroleinput",level:2},{value:"BatchDatasetUpdateInput",id:"batchdatasetupdateinput",level:2},{value:"BatchGetStepStatesInput",id:"batchgetstepstatesinput",level:2},{value:"BatchRemoveOwnersInput",id:"batchremoveownersinput",level:2},{value:"BatchRemoveTagsInput",id:"batchremovetagsinput",level:2},{value:"BatchRemoveTermsInput",id:"batchremovetermsinput",level:2},{value:"BatchSetDataProductInput",id:"batchsetdataproductinput",level:2},{value:"BatchSetDomainInput",id:"batchsetdomaininput",level:2},{value:"BatchUpdateDeprecationInput",id:"batchupdatedeprecationinput",level:2},{value:"BatchUpdateSoftDeletedInput",id:"batchupdatesoftdeletedinput",level:2},{value:"BatchUpdateStepStatesInput",id:"batchupdatestepstatesinput",level:2},{value:"BrowseInput",id:"browseinput",level:2},{value:"BrowsePathsInput",id:"browsepathsinput",level:2},{value:"BrowseV2Input",id:"browsev2input",level:2},{value:"CancelIngestionExecutionRequestInput",id:"cancelingestionexecutionrequestinput",level:2},{value:"ChartEditablePropertiesUpdate",id:"charteditablepropertiesupdate",level:2},{value:"ChartUpdateInput",id:"chartupdateinput",level:2},{value:"ContainerEntitiesInput",id:"containerentitiesinput",level:2},{value:"CorpGroupUpdateInput",id:"corpgroupupdateinput",level:2},{value:"CorpUserUpdateInput",id:"corpuserupdateinput",level:2},{value:"CreateAccessTokenInput",id:"createaccesstokeninput",level:2},{value:"CreateDataProductInput",id:"createdataproductinput",level:2},{value:"CreateDataProductPropertiesInput",id:"createdataproductpropertiesinput",level:2},{value:"CreateDomainInput",id:"createdomaininput",level:2},{value:"CreateGlossaryEntityInput",id:"createglossaryentityinput",level:2},{value:"CreateGroupInput",id:"creategroupinput",level:2},{value:"CreateIngestionExecutionRequestInput",id:"createingestionexecutionrequestinput",level:2},{value:"CreateInviteTokenInput",id:"createinvitetokeninput",level:2},{value:"CreateNativeUserResetTokenInput",id:"createnativeuserresettokeninput",level:2},{value:"CreateOwnershipTypeInput",id:"createownershiptypeinput",level:2},{value:"CreatePostInput",id:"createpostinput",level:2},{value:"CreateQueryInput",id:"createqueryinput",level:2},{value:"CreateQueryPropertiesInput",id:"createquerypropertiesinput",level:2},{value:"CreateQuerySubjectInput",id:"createquerysubjectinput",level:2},{value:"CreateSecretInput",id:"createsecretinput",level:2},{value:"CreateTagInput",id:"createtaginput",level:2},{value:"CreateTestConnectionRequestInput",id:"createtestconnectionrequestinput",level:2},{value:"CreateTestInput",id:"createtestinput",level:2},{value:"CreateViewInput",id:"createviewinput",level:2},{value:"DashboardEditablePropertiesUpdate",id:"dashboardeditablepropertiesupdate",level:2},{value:"DashboardUpdateInput",id:"dashboardupdateinput",level:2},{value:"DataFlowEditablePropertiesUpdate",id:"datafloweditablepropertiesupdate",level:2},{value:"DataFlowUpdateInput",id:"dataflowupdateinput",level:2},{value:"DataHubViewDefinitionInput",id:"datahubviewdefinitioninput",level:2},{value:"DataHubViewFilterInput",id:"datahubviewfilterinput",level:2},{value:"DataJobEditablePropertiesUpdate",id:"datajobeditablepropertiesupdate",level:2},{value:"DataJobUpdateInput",id:"datajobupdateinput",level:2},{value:"DataProductEntitiesInput",id:"dataproductentitiesinput",level:2},{value:"DatasetDeprecationUpdate",id:"datasetdeprecationupdate",level:2},{value:"DatasetEditablePropertiesUpdate",id:"dataseteditablepropertiesupdate",level:2},{value:"DatasetUpdateInput",id:"datasetupdateinput",level:2},{value:"DescriptionUpdateInput",id:"descriptionupdateinput",level:2},{value:"DomainEntitiesInput",id:"domainentitiesinput",level:2},{value:"EditableSchemaFieldInfoUpdate",id:"editableschemafieldinfoupdate",level:2},{value:"EditableSchemaMetadataUpdate",id:"editableschemametadataupdate",level:2},{value:"EntityCountInput",id:"entitycountinput",level:2},{value:"EntityRequestContext",id:"entityrequestcontext",level:2},{value:"FacetFilterInput",id:"facetfilterinput",level:2},{value:"FilterInput",id:"filterinput",level:2},{value:"GetAccessTokenInput",id:"getaccesstokeninput",level:2},{value:"GetGrantedPrivilegesInput",id:"getgrantedprivilegesinput",level:2},{value:"GetInviteTokenInput",id:"getinvitetokeninput",level:2},{value:"GetQuickFiltersInput",id:"getquickfiltersinput",level:2},{value:"GetRootGlossaryEntitiesInput",id:"getrootglossaryentitiesinput",level:2},{value:"GetSchemaBlameInput",id:"getschemablameinput",level:2},{value:"GetSchemaVersionListInput",id:"getschemaversionlistinput",level:2},{value:"GetSecretValuesInput",id:"getsecretvaluesinput",level:2},{value:"GlobalTagsUpdate",id:"globaltagsupdate",level:2},{value:"InstitutionalMemoryMetadataUpdate",id:"institutionalmemorymetadataupdate",level:2},{value:"InstitutionalMemoryUpdate",id:"institutionalmemoryupdate",level:2},{value:"LineageEdge",id:"lineageedge",level:2},{value:"LineageInput",id:"lineageinput",level:2},{value:"ListAccessTokenInput",id:"listaccesstokeninput",level:2},{value:"ListDomainsInput",id:"listdomainsinput",level:2},{value:"ListGlobalViewsInput",id:"listglobalviewsinput",level:2},{value:"ListGroupsInput",id:"listgroupsinput",level:2},{value:"ListIngestionSourcesInput",id:"listingestionsourcesinput",level:2},{value:"ListMyViewsInput",id:"listmyviewsinput",level:2},{value:"ListOwnershipTypesInput",id:"listownershiptypesinput",level:2},{value:"ListPoliciesInput",id:"listpoliciesinput",level:2},{value:"ListPostsInput",id:"listpostsinput",level:2},{value:"ListQueriesInput",id:"listqueriesinput",level:2},{value:"ListRecommendationsInput",id:"listrecommendationsinput",level:2},{value:"ListRolesInput",id:"listrolesinput",level:2},{value:"ListSecretsInput",id:"listsecretsinput",level:2},{value:"ListTestsInput",id:"listtestsinput",level:2},{value:"ListUsersInput",id:"listusersinput",level:2},{value:"MetadataAnalyticsInput",id:"metadataanalyticsinput",level:2},{value:"NotebookEditablePropertiesUpdate",id:"notebookeditablepropertiesupdate",level:2},{value:"NotebookUpdateInput",id:"notebookupdateinput",level:2},{value:"OwnerInput",id:"ownerinput",level:2},{value:"OwnershipUpdate",id:"ownershipupdate",level:2},{value:"OwnerUpdate",id:"ownerupdate",level:2},{value:"PolicyMatchCriterionInput",id:"policymatchcriterioninput",level:2},{value:"PolicyMatchFilterInput",id:"policymatchfilterinput",level:2},{value:"PolicyUpdateInput",id:"policyupdateinput",level:2},{value:"QueryStatementInput",id:"querystatementinput",level:2},{value:"RecommendationRequestContext",id:"recommendationrequestcontext",level:2},{value:"RelatedTermsInput",id:"relatedtermsinput",level:2},{value:"RelationshipsInput",id:"relationshipsinput",level:2},{value:"RemoveGroupMembersInput",id:"removegroupmembersinput",level:2},{value:"RemoveLinkInput",id:"removelinkinput",level:2},{value:"RemoveNativeGroupMembersInput",id:"removenativegroupmembersinput",level:2},{value:"RemoveOwnerInput",id:"removeownerinput",level:2},{value:"ReportOperationInput",id:"reportoperationinput",level:2},{value:"ResourceFilterInput",id:"resourcefilterinput",level:2},{value:"ResourceRefInput",id:"resourcerefinput",level:2},{value:"ResourceSpec",id:"resourcespec",level:2},{value:"RollbackIngestionInput",id:"rollbackingestioninput",level:2},{value:"ScrollAcrossEntitiesInput",id:"scrollacrossentitiesinput",level:2},{value:"ScrollAcrossLineageInput",id:"scrollacrosslineageinput",level:2},{value:"SearchAcrossEntitiesInput",id:"searchacrossentitiesinput",level:2},{value:"SearchAcrossLineageInput",id:"searchacrosslineageinput",level:2},{value:"SearchFlags",id:"searchflags",level:2},{value:"SearchInput",id:"searchinput",level:2},{value:"SearchRequestContext",id:"searchrequestcontext",level:2},{value:"SearchSortInput",id:"searchsortinput",level:2},{value:"SortCriterion",id:"sortcriterion",level:2},{value:"StepStateInput",id:"stepstateinput",level:2},{value:"StringMapEntryInput",id:"stringmapentryinput",level:2},{value:"TagAssociationInput",id:"tagassociationinput",level:2},{value:"TagAssociationUpdate",id:"tagassociationupdate",level:2},{value:"TagUpdateInput",id:"tagupdateinput",level:2},{value:"TermAssociationInput",id:"termassociationinput",level:2},{value:"TestDefinitionInput",id:"testdefinitioninput",level:2},{value:"UpdateCorpUserViewsSettingsInput",id:"updatecorpuserviewssettingsinput",level:2},{value:"UpdateDataProductInput",id:"updatedataproductinput",level:2},{value:"UpdateDeprecationInput",id:"updatedeprecationinput",level:2},{value:"UpdateEmbedInput",id:"updateembedinput",level:2},{value:"UpdateGlobalViewsSettingsInput",id:"updateglobalviewssettingsinput",level:2},{value:"UpdateIngestionSourceConfigInput",id:"updateingestionsourceconfiginput",level:2},{value:"UpdateIngestionSourceInput",id:"updateingestionsourceinput",level:2},{value:"UpdateIngestionSourceScheduleInput",id:"updateingestionsourcescheduleinput",level:2},{value:"UpdateLineageInput",id:"updatelineageinput",level:2},{value:"UpdateMediaInput",id:"updatemediainput",level:2},{value:"UpdateNameInput",id:"updatenameinput",level:2},{value:"UpdateOwnershipTypeInput",id:"updateownershiptypeinput",level:2},{value:"UpdateParentNodeInput",id:"updateparentnodeinput",level:2},{value:"UpdatePostContentInput",id:"updatepostcontentinput",level:2},{value:"UpdateQueryInput",id:"updatequeryinput",level:2},{value:"UpdateQueryPropertiesInput",id:"updatequerypropertiesinput",level:2},{value:"UpdateQuerySubjectInput",id:"updatequerysubjectinput",level:2},{value:"UpdateTestInput",id:"updatetestinput",level:2},{value:"UpdateUserSettingInput",id:"updateusersettinginput",level:2},{value:"UpdateViewInput",id:"updateviewinput",level:2}],i={toc:s};function d(t){let{components:l,...e}=t;return(0,r.kt)("wrapper",(0,n.Z)({},i,e,{components:l,mdxType:"MDXLayout"}),(0,r.kt)("h2",{id:"acceptroleinput"},"AcceptRoleInput"),(0,r.kt)("p",null,"Input provided when accepting a DataHub role using an invite token"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"inviteToken",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The token needed to accept the role"))))),(0,r.kt)("h2",{id:"actorfilterinput"},"ActorFilterInput"),(0,r.kt)("p",null,"Input required when creating or updating an Access Policies Determines which actors the Policy applies to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of users to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of groups to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceOwners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should return TRUE for owners of a particular resource Only applies to policies of type METADATA, which have a resource associated with them"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceOwnersTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set of OwnershipTypes to apply the policy to (if resourceOwners field is set to True)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allUsers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should apply to all users"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allGroups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should apply to all groups"))))),(0,r.kt)("h2",{id:"addgroupmembersinput"},"AddGroupMembersInput"),(0,r.kt)("p",null,"Input required to add members to an external DataHub group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The group to add members to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The members to add to the group"))))),(0,r.kt)("h2",{id:"addlinkinput"},"AddLinkInput"),(0,r.kt)("p",null,"Input provided when adding the association between a Metadata Entity and a Link"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"linkUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The url of the link to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"label",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A label to attach to the link"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach the link to, for example a dataset urn"))))),(0,r.kt)("h2",{id:"addnativegroupmembersinput"},"AddNativeGroupMembersInput"),(0,r.kt)("p",null,"Input required to add members to a native DataHub group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The group to add members to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The members to add to the group"))))),(0,r.kt)("h2",{id:"addownerinput"},"AddOwnerInput"),(0,r.kt)("p",null,"Input provided when adding the association between a Metadata Entity and an user or group owner"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Owner to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerEntityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownerentitytype"},(0,r.kt)("code",null,"OwnerEntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The owner type, either a user or group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershiptype"},(0,r.kt)("code",null,"OwnershipType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"The ownership type for the new owner. If none is provided, then a new NONE will be added. Deprecated - Use ownershipTypeUrn field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the ownership type entity."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach or remove the owner from, for example a dataset urn"))))),(0,r.kt)("h2",{id:"addownersinput"},"AddOwnersInput"),(0,r.kt)("p",null,"Input provided when adding multiple associations between a Metadata Entity and an user or group owner"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownerinput"},(0,r.kt)("code",null,"[OwnerInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Owner to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach or remove the owner from, for example a dataset urn"))))),(0,r.kt)("h2",{id:"addtagsinput"},"AddTagsInput"),(0,r.kt)("p",null,"Input provided when adding tags to an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tagUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target Metadata Entity to add or remove the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Tag to"))))),(0,r.kt)("h2",{id:"addtermsinput"},"AddTermsInput"),(0,r.kt)("p",null,"Input provided when adding Terms to an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Term to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target Metadata Entity to add or remove the Glossary Term from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Glossary Term to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Glossary Term to"))))),(0,r.kt)("h2",{id:"aggregateacrossentitiesinput"},"AggregateAcrossEntitiesInput"),(0,r.kt)("p",null,"Input arguments for a full text search query across entities to get aggregations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of facets to get aggregations for. If list is empty or null, get aggregations for all facets Sub-aggregations can be specified with the unicode character \u241e (U+241E) as a delimiter between the subtypes. e.g. _entityType\u241eowners"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"andfilterinput"},"AndFilterInput"),(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"and",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of and criteria the filter applies to the query"))))),(0,r.kt)("h2",{id:"aspectparams"},"AspectParams"),(0,r.kt)("p",null,"Params to configure what list of aspects should be fetched by the aspects property"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"autoRenderOnly",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Only fetch auto render aspects"))))),(0,r.kt)("h2",{id:"autocompleteinput"},"AutoCompleteInput"),(0,r.kt)("p",null,"Input for performing an auto completion query against a single Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity type to be autocompleted against"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The raw query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional entity field name to autocomplete on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of autocomplete results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Faceted filters applied to autocomplete results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))))),(0,r.kt)("h2",{id:"autocompletemultipleinput"},"AutoCompleteMultipleInput"),(0,r.kt)("p",null,"Input for performing an auto completion query against a a set of Metadata Entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be autocompleted against Optional, if none supplied, all searchable types will be autocompleted against"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The raw query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional field to autocomplete against"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of autocomplete results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Faceted filters applied to autocomplete results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))))),(0,r.kt)("h2",{id:"batchaddownersinput"},"BatchAddOwnersInput"),(0,r.kt)("p",null,"Input provided when adding owners to a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownerinput"},(0,r.kt)("code",null,"[OwnerInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the owners"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The ownership type to remove, optional. By default will remove regardless of ownership type."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the owners to"))))),(0,r.kt)("h2",{id:"batchaddtagsinput"},"BatchAddTagsInput"),(0,r.kt)("p",null,"Input provided when adding tags to a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tagUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the tags to"))))),(0,r.kt)("h2",{id:"batchaddtermsinput"},"BatchAddTermsInput"),(0,r.kt)("p",null,"Input provided when adding glossary terms to a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Terms"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the glossary terms to"))))),(0,r.kt)("h2",{id:"batchassignroleinput"},"BatchAssignRoleInput"),(0,r.kt)("p",null,"Input provided when batch assigning a role to a list of users"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"roleUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the role to assign to the actors. If undefined, will remove the role."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urns of the actors to assign the role to"))))),(0,r.kt)("h2",{id:"batchdatasetupdateinput"},"BatchDatasetUpdateInput"),(0,r.kt)("p",null,"Arguments provided to batch update Dataset entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Primary key of the Dataset to which the update will be applied"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"update",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datasetupdateinput"},(0,r.kt)("code",null,"DatasetUpdateInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Arguments provided to update the Dataset"))))),(0,r.kt)("h2",{id:"batchgetstepstatesinput"},"BatchGetStepStatesInput"),(0,r.kt)("p",null,"Input arguments required for fetching step states"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ids",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique ids for the steps to retrieve"))))),(0,r.kt)("h2",{id:"batchremoveownersinput"},"BatchRemoveOwnersInput"),(0,r.kt)("p",null,"Input provided when removing owners from a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the owners"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The ownership type to remove, optional. By default will remove regardless of ownership type."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to remove the owners from"))))),(0,r.kt)("h2",{id:"batchremovetagsinput"},"BatchRemoveTagsInput"),(0,r.kt)("p",null,"Input provided when removing tags from a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tagUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to remove the tags from"))))),(0,r.kt)("h2",{id:"batchremovetermsinput"},"BatchRemoveTermsInput"),(0,r.kt)("p",null,"Input provided when removing glossary terms from a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Terms"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to remove the glossary terms from"))))),(0,r.kt)("h2",{id:"batchsetdataproductinput"},"BatchSetDataProductInput"),(0,r.kt)("p",null,"Input properties required for batch setting a DataProduct on other entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataProductUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the data product you are setting on a group of resources. If this is null, the Data Product will be unset for the given resources."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urns of the entities the given data product should be set on"))))),(0,r.kt)("h2",{id:"batchsetdomaininput"},"BatchSetDomainInput"),(0,r.kt)("p",null,"Input provided when adding tags to a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"domainUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Domain, or null if the domain will be unset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the Domain"))))),(0,r.kt)("h2",{id:"batchupdatedeprecationinput"},"BatchUpdateDeprecationInput"),(0,r.kt)("p",null,"Input provided when updating the deprecation status for a batch of assets."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Entity is marked as deprecated."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - The time user plan to decommission this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - Additional information about the entity deprecation plan"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the tags to"))))),(0,r.kt)("h2",{id:"batchupdatesoftdeletedinput"},"BatchUpdateSoftDeletedInput"),(0,r.kt)("p",null,"Input provided when updating the soft-deleted status for a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urns of the assets to soft delete"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to mark the asset as soft-deleted (hidden)"))))),(0,r.kt)("h2",{id:"batchupdatestepstatesinput"},"BatchUpdateStepStatesInput"),(0,r.kt)("p",null,"Input arguments required for updating step states"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"states",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#stepstateinput"},(0,r.kt)("code",null,"[StepStateInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set of step states. If the id does not exist, it will be created."))))),(0,r.kt)("h2",{id:"browseinput"},"BrowseInput"),(0,r.kt)("p",null,"Input required for browse queries"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `orFilters`- they are more expressive"),(0,r.kt)("p",null,"Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))))),(0,r.kt)("h2",{id:"browsepathsinput"},"BrowsePathsInput"),(0,r.kt)("p",null,"Inputs for fetching the browse paths for a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The entity urn"))))),(0,r.kt)("h2",{id:"browsev2input"},"BrowseV2Input"),(0,r.kt)("p",null,"Input required for browse queries"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 - a list with each entry being part of the browse path V2"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search query string"))))),(0,r.kt)("h2",{id:"cancelingestionexecutionrequestinput"},"CancelIngestionExecutionRequestInput"),(0,r.kt)("p",null,"Input for cancelling an execution request input"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ingestionSourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionRequestUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the specific execution request to cancel"))))),(0,r.kt)("h2",{id:"charteditablepropertiesupdate"},"ChartEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Chart fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Chart"))))),(0,r.kt)("h2",{id:"chartupdateinput"},"ChartUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Chart Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#charteditablepropertiesupdate"},(0,r.kt)("code",null,"ChartEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"containerentitiesinput"},"ContainerEntitiesInput"),(0,r.kt)("p",null,"Input required to fetch the entities inside of a container."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional query filter for particular entities inside the container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"corpgroupupdateinput"},"CorpGroupUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a CorpGroup Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"DataHub description of the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Slack handle for the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address for the group"))))),(0,r.kt)("h2",{id:"corpuserupdateinput"},"CorpUserUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a CorpUser Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aboutMe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"About me section of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"teams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Teams that the user belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skills",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Skills that the user possesses"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"pictureLink",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A URL which points to a picture which user wants to set as a profile photo"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The slack handle of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"phone",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Phone number for the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address for the user"))))),(0,r.kt)("h2",{id:"createaccesstokeninput"},"CreateAccessTokenInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesstokentype"},(0,r.kt)("code",null,"AccessTokenType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actorUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor associated with the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"duration",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesstokenduration"},(0,r.kt)("code",null,"AccessTokenDuration!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The duration for which the Access Token is valid."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the token to be generated."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the token if defined."))))),(0,r.kt)("h2",{id:"createdataproductinput"},"CreateDataProductInput"),(0,r.kt)("p",null,"Input required for creating a DataProduct."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#createdataproductpropertiesinput"},(0,r.kt)("code",null,"CreateDataProductPropertiesInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domainUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Domain"))))),(0,r.kt)("h2",{id:"createdataproductpropertiesinput"},"CreateDataProductPropertiesInput"),(0,r.kt)("p",null,"Input properties required for creating a DataProduct"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A display name for the DataProduct"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the DataProduct"))))),(0,r.kt)("h2",{id:"createdomaininput"},"CreateDomainInput"),(0,r.kt)("p",null,"Input required to create a new Domain."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional! A custom id to use as the primary key identifier for the domain. If not provided, a random UUID will be generated as the id."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name for the Domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional description for the Domain"))))),(0,r.kt)("h2",{id:"createglossaryentityinput"},"CreateGlossaryEntityInput"),(0,r.kt)("p",null,"Input required to create a new Glossary Entity - a Node or a Term."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional! A custom id to use as the primary key identifier for the domain. If not provided, a random UUID will be generated as the id."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name for the Node or Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description for the Node or Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentNode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional parent node urn for the Glossary Node or Term"))))),(0,r.kt)("h2",{id:"creategroupinput"},"CreateGroupInput"),(0,r.kt)("p",null,"Input for creating a new group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional! A custom id to use as the primary key identifier for the group. If not provided, a random UUID will be generated as the id."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name of the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the group"))))),(0,r.kt)("h2",{id:"createingestionexecutionrequestinput"},"CreateIngestionExecutionRequestInput"),(0,r.kt)("p",null,"Input for creating an execution request input"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ingestionSourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the ingestion source to execute"))))),(0,r.kt)("h2",{id:"createinvitetokeninput"},"CreateInviteTokenInput"),(0,r.kt)("p",null,"Input provided when creating an invite token"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"roleUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the role to create the invite token for"))))),(0,r.kt)("h2",{id:"createnativeuserresettokeninput"},"CreateNativeUserResetTokenInput"),(0,r.kt)("p",null,"Input required to generate a password reset token for a native user."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the user to reset the password of"))))),(0,r.kt)("h2",{id:"createownershiptypeinput"},"CreateOwnershipTypeInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Custom Ownership Type"))))),(0,r.kt)("h2",{id:"createpostinput"},"CreatePostInput"),(0,r.kt)("p",null,"Input provided when creating a Post"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"postType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#posttype"},(0,r.kt)("code",null,"PostType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"content",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updatepostcontentinput"},(0,r.kt)("code",null,"UpdatePostContentInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content of the post"))))),(0,r.kt)("h2",{id:"createqueryinput"},"CreateQueryInput"),(0,r.kt)("p",null,"Input required for creating a Query. Requires the 'Edit Queries' privilege for all query subjects."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#createquerypropertiesinput"},(0,r.kt)("code",null,"CreateQueryPropertiesInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subjects",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#createquerysubjectinput"},(0,r.kt)("code",null,"[CreateQuerySubjectInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Subjects for the query"))))),(0,r.kt)("h2",{id:"createquerypropertiesinput"},"CreateQueryPropertiesInput"),(0,r.kt)("p",null,"Input properties required for creating a Query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional display name for the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statement",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#querystatementinput"},(0,r.kt)("code",null,"QueryStatementInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Query contents"))))),(0,r.kt)("h2",{id:"createquerysubjectinput"},"CreateQuerySubjectInput"),(0,r.kt)("p",null,"Input required for creating a Query. For now, only datasets are supported."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the dataset that is the subject of the query"))))),(0,r.kt)("h2",{id:"createsecretinput"},"CreateSecretInput"),(0,r.kt)("p",null,"Input arguments for creating a new Secret"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the secret for reference in ingestion recipes"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value of the secret, to be encrypted and stored"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the secret"))))),(0,r.kt)("h2",{id:"createtaginput"},"CreateTagInput"),(0,r.kt)("p",null,"Input required to create a new Tag"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional! A custom id to use as the primary key identifier for the Tag. If not provided, a random UUID will be generated as the id."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name for the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional description for the Tag"))))),(0,r.kt)("h2",{id:"createtestconnectionrequestinput"},"CreateTestConnectionRequestInput"),(0,r.kt)("p",null,"Input for creating a test connection request"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"recipe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A JSON-encoded recipe"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: The version of the ingestion framework to use"))))),(0,r.kt)("h2",{id:"createtestinput"},"CreateTestInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: a custom id for the test."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"category",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The category of the Test (user defined)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#testdefinitioninput"},(0,r.kt)("code",null,"TestDefinitionInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The test definition"))))),(0,r.kt)("h2",{id:"createviewinput"},"CreateViewInput"),(0,r.kt)("p",null,"Input provided when creating a DataHub View"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#datahubviewtype"},(0,r.kt)("code",null,"DataHubViewType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datahubviewdefinitioninput"},(0,r.kt)("code",null,"DataHubViewDefinitionInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The view definition itself"))))),(0,r.kt)("h2",{id:"dashboardeditablepropertiesupdate"},"DashboardEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Dashboard fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Dashboard"))))),(0,r.kt)("h2",{id:"dashboardupdateinput"},"DashboardUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Dashboard Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#dashboardeditablepropertiesupdate"},(0,r.kt)("code",null,"DashboardEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"datafloweditablepropertiesupdate"},"DataFlowEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Data Flow fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Data Flow"))))),(0,r.kt)("h2",{id:"dataflowupdateinput"},"DataFlowUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Data Flow aka Pipeline Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datafloweditablepropertiesupdate"},(0,r.kt)("code",null,"DataFlowEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"datahubviewdefinitioninput"},"DataHubViewDefinitionInput"),(0,r.kt)("p",null,"Input required for creating a DataHub View Definition"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of entity types that the view applies for. If left empty, then ALL entities will be in scope."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datahubviewfilterinput"},(0,r.kt)("code",null,"DataHubViewFilterInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters to apply."))))),(0,r.kt)("h2",{id:"datahubviewfilterinput"},"DataHubViewFilterInput"),(0,r.kt)("p",null,"Input required for creating a DataHub View Definition"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"operator",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#logicaloperator"},(0,r.kt)("code",null,"LogicalOperator!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The operator used to combine the filters."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters combined via an operator. If left empty, then no filters will be applied."))))),(0,r.kt)("h2",{id:"datajobeditablepropertiesupdate"},"DataJobEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Data Job fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Data Job"))))),(0,r.kt)("h2",{id:"datajobupdateinput"},"DataJobUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Data Job aka Task Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datajobeditablepropertiesupdate"},(0,r.kt)("code",null,"DataJobEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"dataproductentitiesinput"},"DataProductEntitiesInput"),(0,r.kt)("p",null,"Input required to fetch the entities inside of a Data Product."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional query filter for particular entities inside the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"datasetdeprecationupdate"},"DatasetDeprecationUpdate"),(0,r.kt)("p",null,"An update for the deprecation information for a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the dataset is deprecated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time user plan to decommission this dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional information about the dataset deprecation plan"))))),(0,r.kt)("h2",{id:"dataseteditablepropertiesupdate"},"DatasetEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Dataset fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Dataset"))))),(0,r.kt)("h2",{id:"datasetupdateinput"},"DatasetUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Dataset Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datasetdeprecationupdate"},(0,r.kt)("code",null,"DatasetDeprecationUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to deprecation status"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#institutionalmemoryupdate"},(0,r.kt)("code",null,"InstitutionalMemoryUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to institutional memory, ie documentation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#editableschemametadataupdate"},(0,r.kt)("code",null,"EditableSchemaMetadataUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#dataseteditablepropertiesupdate"},(0,r.kt)("code",null,"DatasetEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"descriptionupdateinput"},"DescriptionUpdateInput"),(0,r.kt)("p",null,"Incubating. Updates the description of a resource. Currently supports DatasetField descriptions only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the resource to attach the description to, eg dataset urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A sub resource identifier, eg dataset field path"))))),(0,r.kt)("h2",{id:"domainentitiesinput"},"DomainEntitiesInput"),(0,r.kt)("p",null,"Input required to fetch the entities inside of a Domain."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional query filter for particular entities inside the domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"editableschemafieldinfoupdate"},"EditableSchemaFieldInfoUpdate"),(0,r.kt)("p",null,"Update to writable schema field metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of a field identifying the field the editable info is applied to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edited description of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags associated with the field"))))),(0,r.kt)("h2",{id:"editableschemametadataupdate"},"EditableSchemaMetadataUpdate"),(0,r.kt)("p",null,"Update to editable schema metadata of the dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaFieldInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#editableschemafieldinfoupdate"},(0,r.kt)("code",null,"[EditableSchemaFieldInfoUpdate!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to writable schema field metadata"))))),(0,r.kt)("h2",{id:"entitycountinput"},"EntityCountInput"),(0,r.kt)("p",null,"Input for the get entity counts endpoint"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"entityrequestcontext"},"EntityRequestContext"),(0,r.kt)("p",null,"Context that defines an entity page requesting recommendations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Type of the enity being displayed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the entity being displayed"))))),(0,r.kt)("h2",{id:"facetfilterinput"},"FacetFilterInput"),(0,r.kt)("p",null,"Facet filters to apply to search results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of field to filter by"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Prefer `values` for single elements"),(0,r.kt)("p",null,"Value of the field to filter by. Deprecated in favor of ",(0,r.kt)("code",null,"values"),", which should accept a single element array for a value"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Values, one of which the intended field should match."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"negated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"If the filter should or should not be matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"condition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#filteroperator"},(0,r.kt)("code",null,"FilterOperator"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Condition for the values. How to If unset, assumed to be equality"))))),(0,r.kt)("h2",{id:"filterinput"},"FilterInput"),(0,r.kt)("p",null,"A set of filter criteria"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"and",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of conjunctive filters"))))),(0,r.kt)("h2",{id:"getaccesstokeninput"},"GetAccessTokenInput"),(0,r.kt)("p",null,"Input required to fetch a new Access Token."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesstokentype"},(0,r.kt)("code",null,"AccessTokenType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actorUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor associated with the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"duration",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesstokenduration"},(0,r.kt)("code",null,"AccessTokenDuration!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The duration for which the Access Token is valid."))))),(0,r.kt)("h2",{id:"getgrantedprivilegesinput"},"GetGrantedPrivilegesInput"),(0,r.kt)("p",null,"Input for getting granted privileges"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"actorUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the actor"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcespec"},(0,r.kt)("code",null,"ResourceSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Spec to identify resource. If empty, gets privileges granted to the actor"))))),(0,r.kt)("h2",{id:"getinvitetokeninput"},"GetInviteTokenInput"),(0,r.kt)("p",null,"Input provided when getting an invite token"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"roleUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the role to get the invite token for"))))),(0,r.kt)("h2",{id:"getquickfiltersinput"},"GetQuickFiltersInput"),(0,r.kt)("p",null,"Input for getting Quick Filters"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))))),(0,r.kt)("h2",{id:"getrootglossaryentitiesinput"},"GetRootGlossaryEntitiesInput"),(0,r.kt)("p",null,"Input required when getting Business Glossary entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Glossary Entities in the returned result set"))))),(0,r.kt)("h2",{id:"getschemablameinput"},"GetSchemaBlameInput"),(0,r.kt)("p",null,"Input for getting schema changes computed at a specific version."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The dataset urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Changes after this version are not shown. If not provided, this is the latestVersion."))))),(0,r.kt)("h2",{id:"getschemaversionlistinput"},"GetSchemaVersionListInput"),(0,r.kt)("p",null,"Input for getting list of schema versions."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The dataset urn"))))),(0,r.kt)("h2",{id:"getsecretvaluesinput"},"GetSecretValuesInput"),(0,r.kt)("p",null,"Input arguments for retrieving the plaintext values of a set of secrets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"secrets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of secret names"))))),(0,r.kt)("h2",{id:"globaltagsupdate"},"GlobalTagsUpdate"),(0,r.kt)("p",null,"Deprecated, use addTag or removeTag mutation instead\nUpdate to the Tags associated with a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#tagassociationupdate"},(0,r.kt)("code",null,"[TagAssociationUpdate!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new set of tags"))))),(0,r.kt)("h2",{id:"institutionalmemorymetadataupdate"},"InstitutionalMemoryMetadataUpdate"),(0,r.kt)("p",null,"An institutional memory to add to a Metadata Entity\nTODO Add a USER or GROUP actor enum"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"url",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Link to a document or wiki page or another internal resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"author",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The corp user urn of the author of the metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which this metadata was created"))))),(0,r.kt)("h2",{id:"institutionalmemoryupdate"},"InstitutionalMemoryUpdate"),(0,r.kt)("p",null,"An update for the institutional memory information for a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"elements",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#institutionalmemorymetadataupdate"},(0,r.kt)("code",null,"[InstitutionalMemoryMetadataUpdate!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The individual references in the institutional memory"))))),(0,r.kt)("h2",{id:"lineageedge"},"LineageEdge"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"downstreamUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the source entity. This urn is downstream of the destinationUrn."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"upstreamUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the destination entity. This urn is upstream of the destinationUrn"))))),(0,r.kt)("h2",{id:"lineageinput"},"LineageInput"),(0,r.kt)("p",null,"Input for the list lineage property of an Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#lineagedirection"},(0,r.kt)("code",null,"LineageDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship, either incoming or outgoing from the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"separateSiblings",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional flag to not merge siblings in the response. They are merged by default."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional starting time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional ending time to filter on"))))),(0,r.kt)("h2",{id:"listaccesstokeninput"},"ListAccessTokenInput"),(0,r.kt)("p",null,"Input arguments for listing access tokens"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Facet filters to apply to search results"))))),(0,r.kt)("h2",{id:"listdomainsinput"},"ListDomainsInput"),(0,r.kt)("p",null,"Input required when listing DataHub Domains"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Domains to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listglobalviewsinput"},"ListGlobalViewsInput"),(0,r.kt)("p",null,"Input provided when listing DataHub Global Views"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Views to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listgroupsinput"},"ListGroupsInput"),(0,r.kt)("p",null,"Input required when listing DataHub Groups"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Policies to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listingestionsourcesinput"},"ListIngestionSourcesInput"),(0,r.kt)("p",null,"Input arguments for listing Ingestion Sources"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"listmyviewsinput"},"ListMyViewsInput"),(0,r.kt)("p",null,"Input provided when listing DataHub Views"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Views to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#datahubviewtype"},(0,r.kt)("code",null,"DataHubViewType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - List the type of View to filter for."))))),(0,r.kt)("h2",{id:"listownershiptypesinput"},"ListOwnershipTypesInput"),(0,r.kt)("p",null,"Input required for listing custom ownership types entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned, default is 0"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Custom Ownership Types to be returned in the result set, default is 20"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"listpoliciesinput"},"ListPoliciesInput"),(0,r.kt)("p",null,"Input required when listing DataHub Access Policies"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Policies to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listpostsinput"},"ListPostsInput"),(0,r.kt)("p",null,"Input provided when listing existing posts"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Roles to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listqueriesinput"},"ListQueriesInput"),(0,r.kt)("p",null,"Input required for listing query entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Queries to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A raw search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"source",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#querysource"},(0,r.kt)("code",null,"QuerySource"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional source for the query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional Urn for the parent dataset that the query is associated with."))))),(0,r.kt)("h2",{id:"listrecommendationsinput"},"ListRecommendationsInput"),(0,r.kt)("p",null,"Input arguments for fetching UI recommendations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the actor requesting recommendations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"requestContext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#recommendationrequestcontext"},(0,r.kt)("code",null,"RecommendationRequestContext"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context provider by the caller requesting recommendations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Max number of modules to return"))))),(0,r.kt)("h2",{id:"listrolesinput"},"ListRolesInput"),(0,r.kt)("p",null,"Input provided when listing existing roles"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Roles to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listsecretsinput"},"ListSecretsInput"),(0,r.kt)("p",null,"Input for listing DataHub Secrets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional search query"))))),(0,r.kt)("h2",{id:"listtestsinput"},"ListTestsInput"),(0,r.kt)("p",null,"Input required when listing DataHub Tests"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Domains to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional query string to match on"))))),(0,r.kt)("h2",{id:"listusersinput"},"ListUsersInput"),(0,r.kt)("p",null,"Input required when listing DataHub Users"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Policies to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"metadataanalyticsinput"},"MetadataAnalyticsInput"),(0,r.kt)("p",null,"Input to fetch metadata analytics charts"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity type to fetch analytics for (If empty, queries across all entities)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the domain to fetch analytics for (If empty or GLOBAL, queries across all domains)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Search query to filter down result (If empty, does not apply any search query)"))))),(0,r.kt)("h2",{id:"notebookeditablepropertiesupdate"},"NotebookEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Notebook fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Notebook"))))),(0,r.kt)("h2",{id:"notebookupdateinput"},"NotebookUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Notebook Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#notebookeditablepropertiesupdate"},(0,r.kt)("code",null,"NotebookEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"ownerinput"},"OwnerInput"),(0,r.kt)("p",null,"Input provided when adding an owner to an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Owner to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerEntityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownerentitytype"},(0,r.kt)("code",null,"OwnerEntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The owner type, either a user or group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershiptype"},(0,r.kt)("code",null,"OwnershipType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"The ownership type for the new owner. If none is provided, then a new NONE will be added. Deprecated - Use ownershipTypeUrn field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the ownership type entity."))))),(0,r.kt)("h2",{id:"ownershipupdate"},"OwnershipUpdate"),(0,r.kt)("p",null,"An update for the ownership information for a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownerupdate"},(0,r.kt)("code",null,"[OwnerUpdate!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The updated list of owners"))))),(0,r.kt)("h2",{id:"ownerupdate"},"OwnerUpdate"),(0,r.kt)("p",null,"An owner to add to a Metadata Entity\nTODO Add a USER or GROUP actor enum"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owner",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The owner URN, either a corpGroup or corpuser"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershiptype"},(0,r.kt)("code",null,"OwnershipType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"The owner type. Deprecated - Use ownershipTypeUrn field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the ownership type entity."))))),(0,r.kt)("h2",{id:"policymatchcriterioninput"},"PolicyMatchCriterionInput"),(0,r.kt)("p",null,"Criterion to define relationship between field and values"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the field that the criterion refers to e.g. entity_type, entity_urn, domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Values. Matches criterion if any one of the values matches condition (OR-relationship)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"condition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policymatchcondition"},(0,r.kt)("code",null,"PolicyMatchCondition!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the field that the criterion refers to"))))),(0,r.kt)("h2",{id:"policymatchfilterinput"},"PolicyMatchFilterInput"),(0,r.kt)("p",null,"Filter object that encodes a complex filter logic with OR + AND"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"criteria",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#policymatchcriterioninput"},(0,r.kt)("code",null,"[PolicyMatchCriterionInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of criteria to apply"))))),(0,r.kt)("h2",{id:"policyupdateinput"},"PolicyUpdateInput"),(0,r.kt)("p",null,"Input provided when creating or updating an Access Policy"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policytype"},(0,r.kt)("code",null,"PolicyType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Policy Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Policy name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"state",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policystate"},(0,r.kt)("code",null,"PolicyState!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Policy state"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A Policy description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcefilterinput"},(0,r.kt)("code",null,"ResourceFilterInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of resources that the Policy privileges apply to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of privileges that the Policy grants"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#actorfilterinput"},(0,r.kt)("code",null,"ActorFilterInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of actors that the Policy privileges are granted to"))))),(0,r.kt)("h2",{id:"querystatementinput"},"QueryStatementInput"),(0,r.kt)("p",null,"Input required for creating a Query Statement"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query text"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"language",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#querylanguage"},(0,r.kt)("code",null,"QueryLanguage!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query language"))))),(0,r.kt)("h2",{id:"recommendationrequestcontext"},"RecommendationRequestContext"),(0,r.kt)("p",null,"Context that defines the page requesting recommendations\ni.e. for search pages, the query/filters. for entity pages, the entity urn and tab"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"scenario",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#scenariotype"},(0,r.kt)("code",null,"ScenarioType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Scenario in which the recommendations will be displayed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchRequestContext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchrequestcontext"},(0,r.kt)("code",null,"SearchRequestContext"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional context for defining the search page requesting recommendations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityRequestContext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#entityrequestcontext"},(0,r.kt)("code",null,"EntityRequestContext"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional context for defining the entity page requesting recommendations"))))),(0,r.kt)("h2",{id:"relatedtermsinput"},"RelatedTermsInput"),(0,r.kt)("p",null,"Input provided when adding Terms to an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Glossary Term urn to add or remove this relationship to/from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Term to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationshipType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#termrelationshiptype"},(0,r.kt)("code",null,"TermRelationshipType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of relationship we're adding or removing to/from for a Glossary Term"))))),(0,r.kt)("h2",{id:"relationshipsinput"},"RelationshipsInput"),(0,r.kt)("p",null,"Input for the list relationships field of an Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The types of relationships to query, representing an OR"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#relationshipdirection"},(0,r.kt)("code",null,"RelationshipDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship, either incoming or outgoing from the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))))),(0,r.kt)("h2",{id:"removegroupmembersinput"},"RemoveGroupMembersInput"),(0,r.kt)("p",null,"Input required to remove members from an external DataHub group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The group to remove members from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The members to remove from the group"))))),(0,r.kt)("h2",{id:"removelinkinput"},"RemoveLinkInput"),(0,r.kt)("p",null,"Input provided when removing the association between a Metadata Entity and a Link"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"linkUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The url of the link to add or remove, which uniquely identifies the Link"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach the link to, for example a dataset urn"))))),(0,r.kt)("h2",{id:"removenativegroupmembersinput"},"RemoveNativeGroupMembersInput"),(0,r.kt)("p",null,"Input required to remove members from a native DataHub group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The group to remove members from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The members to remove from the group"))))),(0,r.kt)("h2",{id:"removeownerinput"},"RemoveOwnerInput"),(0,r.kt)("p",null,"Input provided when removing the association between a Metadata Entity and an user or group owner"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Owner to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The ownership type to remove, optional. By default will remove regardless of ownership type."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach or remove the owner from, for example a dataset urn"))))),(0,r.kt)("h2",{id:"reportoperationinput"},"ReportOperationInput"),(0,r.kt)("p",null,"Input provided to report an asset operation"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the asset (e.g. dataset) to report the operation for"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"operationType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#operationtype"},(0,r.kt)("code",null,"OperationType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of operation that was performed. Required"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customOperationType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A custom type of operation. Required if operation type is CUSTOM."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#operationsourcetype"},(0,r.kt)("code",null,"OperationSourceType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The source or reporter of the operation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#stringmapentryinput"},(0,r.kt)("code",null,"[StringMapEntryInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of key-value parameters to include"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional partition identifier"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"numAffectedRows",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional: The number of affected rows"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional: Provide a timestamp associated with the operation. If not provided, one will be generated for you based on the current time."))))),(0,r.kt)("h2",{id:"resourcefilterinput"},"ResourceFilterInput"),(0,r.kt)("p",null,"Input required when creating or updating an Access Policies Determines which resources the Policy applies to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the resource the policy should apply to Not required because in the future we want to support filtering by type OR by domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of specific resource urns to apply the filter to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allResources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether of not to apply the filter to all resources of the type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#policymatchfilterinput"},(0,r.kt)("code",null,"PolicyMatchFilterInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether of not to apply the filter to all resources of the type"))))),(0,r.kt)("h2",{id:"resourcerefinput"},"ResourceRefInput"),(0,r.kt)("p",null,"Reference to a resource to apply an action to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource being referenced"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Tag to"))))),(0,r.kt)("h2",{id:"resourcespec"},"ResourceSpec"),(0,r.kt)("p",null,"Spec to identify resource"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Resource type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Resource urn"))))),(0,r.kt)("h2",{id:"rollbackingestioninput"},"RollbackIngestionInput"),(0,r.kt)("p",null,"Input for rolling back an ingestion execution"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"runId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An ingestion run ID"))))),(0,r.kt)("h2",{id:"scrollacrossentitiesinput"},"ScrollAcrossEntitiesInput"),(0,r.kt)("p",null,"Input arguments for a full text search query across entities, specifying a starting pointer. Allows paging beyond 10k results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"scrollId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results, an opaque ID the backend understands as a pointer"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"keepAlive",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The amount of time to keep the point in time snapshot alive, takes a time unit based string ex: 5m or 30s"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"scrollacrosslineageinput"},"ScrollAcrossLineageInput"),(0,r.kt)("p",null,"Input arguments for a search query over the results of a multi-hop graph query, uses scroll API"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the source node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#lineagedirection"},(0,r.kt)("code",null,"LineageDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship, either incoming or outgoing from the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"scrollId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results, an opaque ID the backend understands as a pointer"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"keepAlive",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The amount of time to keep the point in time snapshot alive, takes a time unit based string ex: 5m or 30s"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional starting time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional ending time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"searchacrossentitiesinput"},"SearchAcrossEntitiesInput"),(0,r.kt)("p",null,"Input arguments for a full text search query across entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `orFilters`- they are more expressive"),(0,r.kt)("p",null,"Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sortInput",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchsortinput"},(0,r.kt)("code",null,"SearchSortInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - Information on how to sort this search result"))))),(0,r.kt)("h2",{id:"searchacrosslineageinput"},"SearchAcrossLineageInput"),(0,r.kt)("p",null,"Input arguments for a search query over the results of a multi-hop graph query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the source node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#lineagedirection"},(0,r.kt)("code",null,"LineageDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship, either incoming or outgoing from the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `orFilters`- they are more expressive"),(0,r.kt)("p",null,"Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional starting time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional ending time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"searchflags"},"SearchFlags"),(0,r.kt)("p",null,"Set of flags to control search behavior"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"skipCache",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to skip cache"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"maxAggValues",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of values in an facet aggregation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fulltext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Structured or unstructured fulltext query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skipHighlighting",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to skip highlighting"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skipAggregates",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to skip aggregates/facets"))))),(0,r.kt)("h2",{id:"searchinput"},"SearchInput"),(0,r.kt)("p",null,"Input arguments for a full text search query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Metadata Entity type to be searched against"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The raw query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `orFilters`- they are more expressive"),(0,r.kt)("p",null,"Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"searchrequestcontext"},"SearchRequestContext"),(0,r.kt)("p",null,"Context that defines a search page requesting recommendatinos"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Faceted filters applied to search results"))))),(0,r.kt)("h2",{id:"searchsortinput"},"SearchSortInput"),(0,r.kt)("p",null,"Input required in order to sort search results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"sortCriterion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#sortcriterion"},(0,r.kt)("code",null,"SortCriterion!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A criterion to sort search results on"))))),(0,r.kt)("h2",{id:"sortcriterion"},"SortCriterion"),(0,r.kt)("p",null,"A single sorting criterion for sorting search."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A field upon which we'll do sorting on."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sortOrder",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#sortorder"},(0,r.kt)("code",null,"SortOrder!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The order in which we will be sorting"))))),(0,r.kt)("h2",{id:"stepstateinput"},"StepStateInput"),(0,r.kt)("p",null,"The input required to update the state of a step"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The globally unique id for the step"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#stringmapentryinput"},(0,r.kt)("code",null,"[StringMapEntryInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new properties for the step"))))),(0,r.kt)("h2",{id:"stringmapentryinput"},"StringMapEntryInput"),(0,r.kt)("p",null,"String map entry input"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The key of the map entry"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value fo the map entry"))))),(0,r.kt)("h2",{id:"tagassociationinput"},"TagAssociationInput"),(0,r.kt)("p",null,"Input provided when updating the association between a Metadata Entity and a Tag"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tagUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tag to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target Metadata Entity to add or remove the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Tag to"))))),(0,r.kt)("h2",{id:"tagassociationupdate"},"TagAssociationUpdate"),(0,r.kt)("p",null,"Deprecated, use addTag or removeTag mutation instead\nA tag update to be applied"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tag",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#tagupdateinput"},(0,r.kt)("code",null,"TagUpdateInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tag being applied"))))),(0,r.kt)("h2",{id:"tagupdateinput"},"TagUpdateInput"),(0,r.kt)("p",null,"Deprecated, use addTag or removeTag mutations instead\nAn update for a particular Tag entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name of a Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the tag"))))),(0,r.kt)("h2",{id:"termassociationinput"},"TermAssociationInput"),(0,r.kt)("p",null,"Input provided when updating the association between a Metadata Entity and a Glossary Term"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Term to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target Metadata Entity to add or remove the Glossary Term from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Glossary Term to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Glossary Term to"))))),(0,r.kt)("h2",{id:"testdefinitioninput"},"TestDefinitionInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"json",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The string representation of the Test"))))),(0,r.kt)("h2",{id:"updatecorpuserviewssettingsinput"},"UpdateCorpUserViewsSettingsInput"),(0,r.kt)("p",null,"Input required to update a users settings."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultView",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The URN of the View that serves as this user's personal default. If not provided, any existing default view will be removed."))))),(0,r.kt)("h2",{id:"updatedataproductinput"},"UpdateDataProductInput"),(0,r.kt)("p",null,"Input properties required for update a DataProduct"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A display name for the DataProduct"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the DataProduct"))))),(0,r.kt)("h2",{id:"updatedeprecationinput"},"UpdateDeprecationInput"),(0,r.kt)("p",null,"Input provided when setting the Deprecation status for an Entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the Entity to set deprecation for."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Entity is marked as deprecated."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - The time user plan to decommission this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - Additional information about the entity deprecation plan"))))),(0,r.kt)("h2",{id:"updateembedinput"},"UpdateEmbedInput"),(0,r.kt)("p",null,"Input required to set or clear information related to rendering a Data Asset inside of DataHub."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The URN associated with the Data Asset to update. Only dataset, dashboard, and chart urns are currently supported."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"renderUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set or clear a URL used to render an embedded asset."))))),(0,r.kt)("h2",{id:"updateglobalviewssettingsinput"},"UpdateGlobalViewsSettingsInput"),(0,r.kt)("p",null,"Input required to update Global View Settings."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultView",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The URN of the View that serves as the Global, or organization-wide, default. If this field is not provided, the existing Global Default will be cleared."))))),(0,r.kt)("h2",{id:"updateingestionsourceconfiginput"},"UpdateIngestionSourceConfigInput"),(0,r.kt)("p",null,"Input parameters for creating / updating an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"recipe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A JSON-encoded recipe"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The version of DataHub Ingestion Framework to use when executing the recipe."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executorId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The id of the executor to use for executing the recipe"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"debugMode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not to run ingestion in debug mode"))))),(0,r.kt)("h2",{id:"updateingestionsourceinput"},"UpdateIngestionSourceInput"),(0,r.kt)("p",null,"Input arguments for creating / updating an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A name associated with the ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the source itself, e.g. mysql, bigquery, bigquery-usage. Should match the recipe."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description associated with the ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schedule",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updateingestionsourcescheduleinput"},(0,r.kt)("code",null,"UpdateIngestionSourceScheduleInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional schedule for the ingestion source. If not provided, the source is only available for run on-demand."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"config",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updateingestionsourceconfiginput"},(0,r.kt)("code",null,"UpdateIngestionSourceConfigInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of type-specific ingestion source configurations"))))),(0,r.kt)("h2",{id:"updateingestionsourcescheduleinput"},"UpdateIngestionSourceScheduleInput"),(0,r.kt)("p",null,"Input arguments for creating / updating the schedule of an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"interval",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The cron-formatted interval describing when the job should be executed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"timezone",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the timezone in which the cron interval should be scheduled (e.g. America/Los Angeles)"))))),(0,r.kt)("h2",{id:"updatelineageinput"},"UpdateLineageInput"),(0,r.kt)("p",null,"Input required in order to upsert lineage edges"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"edgesToAdd",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageedge"},(0,r.kt)("code",null,"[LineageEdge]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"New lineage edges to upsert"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"edgesToRemove",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageedge"},(0,r.kt)("code",null,"[LineageEdge]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Lineage edges to remove. Takes precedence over edgesToAdd - so edges existing both edgesToAdd and edgesToRemove will be removed."))))),(0,r.kt)("h2",{id:"updatemediainput"},"UpdateMediaInput"),(0,r.kt)("p",null,"Input provided for filling in a post content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mediatype"},(0,r.kt)("code",null,"MediaType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of media"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"location",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The location of the media (a URL)"))))),(0,r.kt)("h2",{id:"updatenameinput"},"UpdateNameInput"),(0,r.kt)("p",null,"Input for updating the name of an entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the resource to update the name for"))))),(0,r.kt)("h2",{id:"updateownershiptypeinput"},"UpdateOwnershipTypeInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Custom Ownership Type"))))),(0,r.kt)("h2",{id:"updateparentnodeinput"},"UpdateParentNodeInput"),(0,r.kt)("p",null,"Input for updating the parent node of a resource. Currently only GlossaryNodes and GlossaryTerms have parentNodes."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentNode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new parent node urn. If parentNode is null, this will remove the parent from this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the resource to update the parent node for"))))),(0,r.kt)("h2",{id:"updatepostcontentinput"},"UpdatePostContentInput"),(0,r.kt)("p",null,"Input provided for filling in a post content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"contentType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#postcontenttype"},(0,r.kt)("code",null,"PostContentType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of post content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The title of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional content of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"link",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional link that the post is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"media",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updatemediainput"},(0,r.kt)("code",null,"UpdateMediaInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional media contained in the post"))))),(0,r.kt)("h2",{id:"updatequeryinput"},"UpdateQueryInput"),(0,r.kt)("p",null,"Input required for updating an existing Query. Requires the 'Edit Queries' privilege for all query subjects."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updatequerypropertiesinput"},(0,r.kt)("code",null,"UpdateQueryPropertiesInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subjects",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updatequerysubjectinput"},(0,r.kt)("code",null,"[UpdateQuerySubjectInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Subjects for the query"))))),(0,r.kt)("h2",{id:"updatequerypropertiesinput"},"UpdateQueryPropertiesInput"),(0,r.kt)("p",null,"Input properties required for creating a Query. Any non-null fields will be updated if provided."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional display name for the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statement",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#querystatementinput"},(0,r.kt)("code",null,"QueryStatementInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Query contents"))))),(0,r.kt)("h2",{id:"updatequerysubjectinput"},"UpdateQuerySubjectInput"),(0,r.kt)("p",null,"Input required for creating a Query. For now, only datasets are supported."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the dataset that is the subject of the query"))))),(0,r.kt)("h2",{id:"updatetestinput"},"UpdateTestInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"category",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The category of the Test (user defined)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#testdefinitioninput"},(0,r.kt)("code",null,"TestDefinitionInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The test definition"))))),(0,r.kt)("h2",{id:"updateusersettinginput"},"UpdateUserSettingInput"),(0,r.kt)("p",null,"Input for updating a user setting"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#usersetting"},(0,r.kt)("code",null,"UserSetting!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the setting"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new value of the setting"))))),(0,r.kt)("h2",{id:"updateviewinput"},"UpdateViewInput"),(0,r.kt)("p",null,"Input provided when updating a DataHub View"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datahubviewdefinitioninput"},(0,r.kt)("code",null,"DataHubViewDefinitionInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The view definition itself"))))))}d.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkdocs_website=self.webpackChunkdocs_website||[]).push([[6806],{3905:(t,l,e)=>{e.d(l,{Zo:()=>i,kt:()=>c});var n=e(67294);function r(t,l,e){return l in t?Object.defineProperty(t,l,{value:e,enumerable:!0,configurable:!0,writable:!0}):t[l]=e,t}function u(t,l){var e=Object.keys(t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(t);l&&(n=n.filter((function(l){return Object.getOwnPropertyDescriptor(t,l).enumerable}))),e.push.apply(e,n)}return e}function a(t){for(var l=1;l=0||(r[e]=t[e]);return r}(t,l);if(Object.getOwnPropertySymbols){var u=Object.getOwnPropertySymbols(t);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(t,e)&&(r[e]=t[e])}return r}var o=n.createContext({}),s=function(t){var l=n.useContext(o),e=l;return t&&(e="function"==typeof t?t(l):a(a({},l),t)),e},i=function(t){var l=s(t.components);return n.createElement(o.Provider,{value:l},t.children)},d={inlineCode:"code",wrapper:function(t){var l=t.children;return n.createElement(n.Fragment,{},l)}},p=n.forwardRef((function(t,l){var e=t.components,r=t.mdxType,u=t.originalType,o=t.parentName,i=k(t,["components","mdxType","originalType","parentName"]),p=s(e),c=r,h=p["".concat(o,".").concat(c)]||p[c]||d[c]||u;return e?n.createElement(h,a(a({ref:l},i),{},{components:e})):n.createElement(h,a({ref:l},i))}));function c(t,l){var e=arguments,r=l&&l.mdxType;if("string"==typeof t||r){var u=e.length,a=new Array(u);a[0]=p;var k={};for(var o in l)hasOwnProperty.call(l,o)&&(k[o]=l[o]);k.originalType=t,k.mdxType="string"==typeof t?t:r,a[1]=k;for(var s=2;s{e.r(l),e.d(l,{assets:()=>o,contentTitle:()=>a,default:()=>d,frontMatter:()=>u,metadata:()=>k,toc:()=>s});var n=e(83117),r=(e(67294),e(3905));const u={id:"inputObjects",title:"Input objects",slug:"inputObjects",sidebar_position:7},a=void 0,k={unversionedId:"graphql/inputObjects",id:"graphql/inputObjects",title:"Input objects",description:"AcceptRoleInput",source:"@site/genDocs/graphql/inputObjects.md",sourceDirName:"graphql",slug:"/graphql/inputObjects",permalink:"/docs/graphql/inputObjects",draft:!1,editUrl:"https://github.com/datahub-project/datahub/blob/master/genDocs/graphql/inputObjects.md",tags:[],version:"current",sidebarPosition:7,frontMatter:{id:"inputObjects",title:"Input objects",slug:"inputObjects",sidebar_position:7},sidebar:"overviewSidebar",previous:{title:"Objects",permalink:"/docs/graphql/objects"},next:{title:"Interfaces",permalink:"/docs/graphql/interfaces"}},o={},s=[{value:"AcceptRoleInput",id:"acceptroleinput",level:2},{value:"ActorFilterInput",id:"actorfilterinput",level:2},{value:"AddGroupMembersInput",id:"addgroupmembersinput",level:2},{value:"AddLinkInput",id:"addlinkinput",level:2},{value:"AddNativeGroupMembersInput",id:"addnativegroupmembersinput",level:2},{value:"AddOwnerInput",id:"addownerinput",level:2},{value:"AddOwnersInput",id:"addownersinput",level:2},{value:"AddTagsInput",id:"addtagsinput",level:2},{value:"AddTermsInput",id:"addtermsinput",level:2},{value:"AggregateAcrossEntitiesInput",id:"aggregateacrossentitiesinput",level:2},{value:"AndFilterInput",id:"andfilterinput",level:2},{value:"AspectParams",id:"aspectparams",level:2},{value:"AutoCompleteInput",id:"autocompleteinput",level:2},{value:"AutoCompleteMultipleInput",id:"autocompletemultipleinput",level:2},{value:"BatchAddOwnersInput",id:"batchaddownersinput",level:2},{value:"BatchAddTagsInput",id:"batchaddtagsinput",level:2},{value:"BatchAddTermsInput",id:"batchaddtermsinput",level:2},{value:"BatchAssignRoleInput",id:"batchassignroleinput",level:2},{value:"BatchDatasetUpdateInput",id:"batchdatasetupdateinput",level:2},{value:"BatchGetStepStatesInput",id:"batchgetstepstatesinput",level:2},{value:"BatchRemoveOwnersInput",id:"batchremoveownersinput",level:2},{value:"BatchRemoveTagsInput",id:"batchremovetagsinput",level:2},{value:"BatchRemoveTermsInput",id:"batchremovetermsinput",level:2},{value:"BatchSetDataProductInput",id:"batchsetdataproductinput",level:2},{value:"BatchSetDomainInput",id:"batchsetdomaininput",level:2},{value:"BatchUpdateDeprecationInput",id:"batchupdatedeprecationinput",level:2},{value:"BatchUpdateSoftDeletedInput",id:"batchupdatesoftdeletedinput",level:2},{value:"BatchUpdateStepStatesInput",id:"batchupdatestepstatesinput",level:2},{value:"BrowseInput",id:"browseinput",level:2},{value:"BrowsePathsInput",id:"browsepathsinput",level:2},{value:"BrowseV2Input",id:"browsev2input",level:2},{value:"CancelIngestionExecutionRequestInput",id:"cancelingestionexecutionrequestinput",level:2},{value:"ChartEditablePropertiesUpdate",id:"charteditablepropertiesupdate",level:2},{value:"ChartUpdateInput",id:"chartupdateinput",level:2},{value:"ContainerEntitiesInput",id:"containerentitiesinput",level:2},{value:"CorpGroupUpdateInput",id:"corpgroupupdateinput",level:2},{value:"CorpUserUpdateInput",id:"corpuserupdateinput",level:2},{value:"CreateAccessTokenInput",id:"createaccesstokeninput",level:2},{value:"CreateDataProductInput",id:"createdataproductinput",level:2},{value:"CreateDataProductPropertiesInput",id:"createdataproductpropertiesinput",level:2},{value:"CreateDomainInput",id:"createdomaininput",level:2},{value:"CreateGlossaryEntityInput",id:"createglossaryentityinput",level:2},{value:"CreateGroupInput",id:"creategroupinput",level:2},{value:"CreateIngestionExecutionRequestInput",id:"createingestionexecutionrequestinput",level:2},{value:"CreateInviteTokenInput",id:"createinvitetokeninput",level:2},{value:"CreateNativeUserResetTokenInput",id:"createnativeuserresettokeninput",level:2},{value:"CreateOwnershipTypeInput",id:"createownershiptypeinput",level:2},{value:"CreatePostInput",id:"createpostinput",level:2},{value:"CreateQueryInput",id:"createqueryinput",level:2},{value:"CreateQueryPropertiesInput",id:"createquerypropertiesinput",level:2},{value:"CreateQuerySubjectInput",id:"createquerysubjectinput",level:2},{value:"CreateSecretInput",id:"createsecretinput",level:2},{value:"CreateTagInput",id:"createtaginput",level:2},{value:"CreateTestConnectionRequestInput",id:"createtestconnectionrequestinput",level:2},{value:"CreateTestInput",id:"createtestinput",level:2},{value:"CreateViewInput",id:"createviewinput",level:2},{value:"DashboardEditablePropertiesUpdate",id:"dashboardeditablepropertiesupdate",level:2},{value:"DashboardUpdateInput",id:"dashboardupdateinput",level:2},{value:"DataFlowEditablePropertiesUpdate",id:"datafloweditablepropertiesupdate",level:2},{value:"DataFlowUpdateInput",id:"dataflowupdateinput",level:2},{value:"DataHubViewDefinitionInput",id:"datahubviewdefinitioninput",level:2},{value:"DataHubViewFilterInput",id:"datahubviewfilterinput",level:2},{value:"DataJobEditablePropertiesUpdate",id:"datajobeditablepropertiesupdate",level:2},{value:"DataJobUpdateInput",id:"datajobupdateinput",level:2},{value:"DataProductEntitiesInput",id:"dataproductentitiesinput",level:2},{value:"DatasetDeprecationUpdate",id:"datasetdeprecationupdate",level:2},{value:"DatasetEditablePropertiesUpdate",id:"dataseteditablepropertiesupdate",level:2},{value:"DatasetUpdateInput",id:"datasetupdateinput",level:2},{value:"DescriptionUpdateInput",id:"descriptionupdateinput",level:2},{value:"DomainEntitiesInput",id:"domainentitiesinput",level:2},{value:"EditableSchemaFieldInfoUpdate",id:"editableschemafieldinfoupdate",level:2},{value:"EditableSchemaMetadataUpdate",id:"editableschemametadataupdate",level:2},{value:"EntityCountInput",id:"entitycountinput",level:2},{value:"EntityRequestContext",id:"entityrequestcontext",level:2},{value:"FacetFilterInput",id:"facetfilterinput",level:2},{value:"FilterInput",id:"filterinput",level:2},{value:"GetAccessTokenInput",id:"getaccesstokeninput",level:2},{value:"GetGrantedPrivilegesInput",id:"getgrantedprivilegesinput",level:2},{value:"GetInviteTokenInput",id:"getinvitetokeninput",level:2},{value:"GetQuickFiltersInput",id:"getquickfiltersinput",level:2},{value:"GetRootGlossaryEntitiesInput",id:"getrootglossaryentitiesinput",level:2},{value:"GetSchemaBlameInput",id:"getschemablameinput",level:2},{value:"GetSchemaVersionListInput",id:"getschemaversionlistinput",level:2},{value:"GetSecretValuesInput",id:"getsecretvaluesinput",level:2},{value:"GlobalTagsUpdate",id:"globaltagsupdate",level:2},{value:"InstitutionalMemoryMetadataUpdate",id:"institutionalmemorymetadataupdate",level:2},{value:"InstitutionalMemoryUpdate",id:"institutionalmemoryupdate",level:2},{value:"LineageEdge",id:"lineageedge",level:2},{value:"LineageInput",id:"lineageinput",level:2},{value:"ListAccessTokenInput",id:"listaccesstokeninput",level:2},{value:"ListDomainsInput",id:"listdomainsinput",level:2},{value:"ListGlobalViewsInput",id:"listglobalviewsinput",level:2},{value:"ListGroupsInput",id:"listgroupsinput",level:2},{value:"ListIngestionSourcesInput",id:"listingestionsourcesinput",level:2},{value:"ListMyViewsInput",id:"listmyviewsinput",level:2},{value:"ListOwnershipTypesInput",id:"listownershiptypesinput",level:2},{value:"ListPoliciesInput",id:"listpoliciesinput",level:2},{value:"ListPostsInput",id:"listpostsinput",level:2},{value:"ListQueriesInput",id:"listqueriesinput",level:2},{value:"ListRecommendationsInput",id:"listrecommendationsinput",level:2},{value:"ListRolesInput",id:"listrolesinput",level:2},{value:"ListSecretsInput",id:"listsecretsinput",level:2},{value:"ListTestsInput",id:"listtestsinput",level:2},{value:"ListUsersInput",id:"listusersinput",level:2},{value:"MetadataAnalyticsInput",id:"metadataanalyticsinput",level:2},{value:"NotebookEditablePropertiesUpdate",id:"notebookeditablepropertiesupdate",level:2},{value:"NotebookUpdateInput",id:"notebookupdateinput",level:2},{value:"OwnerInput",id:"ownerinput",level:2},{value:"OwnershipUpdate",id:"ownershipupdate",level:2},{value:"OwnerUpdate",id:"ownerupdate",level:2},{value:"PolicyMatchCriterionInput",id:"policymatchcriterioninput",level:2},{value:"PolicyMatchFilterInput",id:"policymatchfilterinput",level:2},{value:"PolicyUpdateInput",id:"policyupdateinput",level:2},{value:"QueryStatementInput",id:"querystatementinput",level:2},{value:"RecommendationRequestContext",id:"recommendationrequestcontext",level:2},{value:"RelatedTermsInput",id:"relatedtermsinput",level:2},{value:"RelationshipsInput",id:"relationshipsinput",level:2},{value:"RemoveGroupMembersInput",id:"removegroupmembersinput",level:2},{value:"RemoveLinkInput",id:"removelinkinput",level:2},{value:"RemoveNativeGroupMembersInput",id:"removenativegroupmembersinput",level:2},{value:"RemoveOwnerInput",id:"removeownerinput",level:2},{value:"ReportOperationInput",id:"reportoperationinput",level:2},{value:"ResourceFilterInput",id:"resourcefilterinput",level:2},{value:"ResourceRefInput",id:"resourcerefinput",level:2},{value:"ResourceSpec",id:"resourcespec",level:2},{value:"RollbackIngestionInput",id:"rollbackingestioninput",level:2},{value:"ScrollAcrossEntitiesInput",id:"scrollacrossentitiesinput",level:2},{value:"ScrollAcrossLineageInput",id:"scrollacrosslineageinput",level:2},{value:"SearchAcrossEntitiesInput",id:"searchacrossentitiesinput",level:2},{value:"SearchAcrossLineageInput",id:"searchacrosslineageinput",level:2},{value:"SearchFlags",id:"searchflags",level:2},{value:"SearchInput",id:"searchinput",level:2},{value:"SearchRequestContext",id:"searchrequestcontext",level:2},{value:"SearchSortInput",id:"searchsortinput",level:2},{value:"SortCriterion",id:"sortcriterion",level:2},{value:"StepStateInput",id:"stepstateinput",level:2},{value:"StringMapEntryInput",id:"stringmapentryinput",level:2},{value:"TagAssociationInput",id:"tagassociationinput",level:2},{value:"TagAssociationUpdate",id:"tagassociationupdate",level:2},{value:"TagUpdateInput",id:"tagupdateinput",level:2},{value:"TermAssociationInput",id:"termassociationinput",level:2},{value:"TestDefinitionInput",id:"testdefinitioninput",level:2},{value:"UpdateCorpUserViewsSettingsInput",id:"updatecorpuserviewssettingsinput",level:2},{value:"UpdateDataProductInput",id:"updatedataproductinput",level:2},{value:"UpdateDeprecationInput",id:"updatedeprecationinput",level:2},{value:"UpdateEmbedInput",id:"updateembedinput",level:2},{value:"UpdateGlobalViewsSettingsInput",id:"updateglobalviewssettingsinput",level:2},{value:"UpdateIngestionSourceConfigInput",id:"updateingestionsourceconfiginput",level:2},{value:"UpdateIngestionSourceInput",id:"updateingestionsourceinput",level:2},{value:"UpdateIngestionSourceScheduleInput",id:"updateingestionsourcescheduleinput",level:2},{value:"UpdateLineageInput",id:"updatelineageinput",level:2},{value:"UpdateMediaInput",id:"updatemediainput",level:2},{value:"UpdateNameInput",id:"updatenameinput",level:2},{value:"UpdateOwnershipTypeInput",id:"updateownershiptypeinput",level:2},{value:"UpdateParentNodeInput",id:"updateparentnodeinput",level:2},{value:"UpdatePostContentInput",id:"updatepostcontentinput",level:2},{value:"UpdateQueryInput",id:"updatequeryinput",level:2},{value:"UpdateQueryPropertiesInput",id:"updatequerypropertiesinput",level:2},{value:"UpdateQuerySubjectInput",id:"updatequerysubjectinput",level:2},{value:"UpdateTestInput",id:"updatetestinput",level:2},{value:"UpdateUserSettingInput",id:"updateusersettinginput",level:2},{value:"UpdateViewInput",id:"updateviewinput",level:2}],i={toc:s};function d(t){let{components:l,...e}=t;return(0,r.kt)("wrapper",(0,n.Z)({},i,e,{components:l,mdxType:"MDXLayout"}),(0,r.kt)("h2",{id:"acceptroleinput"},"AcceptRoleInput"),(0,r.kt)("p",null,"Input provided when accepting a DataHub role using an invite token"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"inviteToken",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The token needed to accept the role"))))),(0,r.kt)("h2",{id:"actorfilterinput"},"ActorFilterInput"),(0,r.kt)("p",null,"Input required when creating or updating an Access Policies Determines which actors the Policy applies to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of users to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of groups to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceOwners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should return TRUE for owners of a particular resource Only applies to policies of type METADATA, which have a resource associated with them"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceOwnersTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set of OwnershipTypes to apply the policy to (if resourceOwners field is set to True)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allUsers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should apply to all users"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allGroups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should apply to all groups"))))),(0,r.kt)("h2",{id:"addgroupmembersinput"},"AddGroupMembersInput"),(0,r.kt)("p",null,"Input required to add members to an external DataHub group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The group to add members to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The members to add to the group"))))),(0,r.kt)("h2",{id:"addlinkinput"},"AddLinkInput"),(0,r.kt)("p",null,"Input provided when adding the association between a Metadata Entity and a Link"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"linkUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The url of the link to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"label",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A label to attach to the link"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach the link to, for example a dataset urn"))))),(0,r.kt)("h2",{id:"addnativegroupmembersinput"},"AddNativeGroupMembersInput"),(0,r.kt)("p",null,"Input required to add members to a native DataHub group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The group to add members to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The members to add to the group"))))),(0,r.kt)("h2",{id:"addownerinput"},"AddOwnerInput"),(0,r.kt)("p",null,"Input provided when adding the association between a Metadata Entity and an user or group owner"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Owner to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerEntityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownerentitytype"},(0,r.kt)("code",null,"OwnerEntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The owner type, either a user or group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershiptype"},(0,r.kt)("code",null,"OwnershipType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"The ownership type for the new owner. If none is provided, then a new NONE will be added. Deprecated - Use ownershipTypeUrn field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the ownership type entity."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach or remove the owner from, for example a dataset urn"))))),(0,r.kt)("h2",{id:"addownersinput"},"AddOwnersInput"),(0,r.kt)("p",null,"Input provided when adding multiple associations between a Metadata Entity and an user or group owner"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownerinput"},(0,r.kt)("code",null,"[OwnerInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Owner to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach or remove the owner from, for example a dataset urn"))))),(0,r.kt)("h2",{id:"addtagsinput"},"AddTagsInput"),(0,r.kt)("p",null,"Input provided when adding tags to an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tagUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target Metadata Entity to add or remove the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Tag to"))))),(0,r.kt)("h2",{id:"addtermsinput"},"AddTermsInput"),(0,r.kt)("p",null,"Input provided when adding Terms to an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Term to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target Metadata Entity to add or remove the Glossary Term from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Glossary Term to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Glossary Term to"))))),(0,r.kt)("h2",{id:"aggregateacrossentitiesinput"},"AggregateAcrossEntitiesInput"),(0,r.kt)("p",null,"Input arguments for a full text search query across entities to get aggregations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of facets to get aggregations for. If list is empty or null, get aggregations for all facets Sub-aggregations can be specified with the unicode character \u241e (U+241E) as a delimiter between the subtypes. e.g. _entityType\u241eowners"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"andfilterinput"},"AndFilterInput"),(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"and",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of and criteria the filter applies to the query"))))),(0,r.kt)("h2",{id:"aspectparams"},"AspectParams"),(0,r.kt)("p",null,"Params to configure what list of aspects should be fetched by the aspects property"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"autoRenderOnly",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Only fetch auto render aspects"))))),(0,r.kt)("h2",{id:"autocompleteinput"},"AutoCompleteInput"),(0,r.kt)("p",null,"Input for performing an auto completion query against a single Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity type to be autocompleted against"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The raw query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional entity field name to autocomplete on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of autocomplete results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Faceted filters applied to autocomplete results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))))),(0,r.kt)("h2",{id:"autocompletemultipleinput"},"AutoCompleteMultipleInput"),(0,r.kt)("p",null,"Input for performing an auto completion query against a a set of Metadata Entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be autocompleted against Optional, if none supplied, all searchable types will be autocompleted against"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The raw query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional field to autocomplete against"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of autocomplete results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Faceted filters applied to autocomplete results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))))),(0,r.kt)("h2",{id:"batchaddownersinput"},"BatchAddOwnersInput"),(0,r.kt)("p",null,"Input provided when adding owners to a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownerinput"},(0,r.kt)("code",null,"[OwnerInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the owners"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The ownership type to remove, optional. By default will remove regardless of ownership type."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the owners to"))))),(0,r.kt)("h2",{id:"batchaddtagsinput"},"BatchAddTagsInput"),(0,r.kt)("p",null,"Input provided when adding tags to a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tagUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the tags to"))))),(0,r.kt)("h2",{id:"batchaddtermsinput"},"BatchAddTermsInput"),(0,r.kt)("p",null,"Input provided when adding glossary terms to a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Terms"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the glossary terms to"))))),(0,r.kt)("h2",{id:"batchassignroleinput"},"BatchAssignRoleInput"),(0,r.kt)("p",null,"Input provided when batch assigning a role to a list of users"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"roleUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the role to assign to the actors. If undefined, will remove the role."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urns of the actors to assign the role to"))))),(0,r.kt)("h2",{id:"batchdatasetupdateinput"},"BatchDatasetUpdateInput"),(0,r.kt)("p",null,"Arguments provided to batch update Dataset entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Primary key of the Dataset to which the update will be applied"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"update",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datasetupdateinput"},(0,r.kt)("code",null,"DatasetUpdateInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Arguments provided to update the Dataset"))))),(0,r.kt)("h2",{id:"batchgetstepstatesinput"},"BatchGetStepStatesInput"),(0,r.kt)("p",null,"Input arguments required for fetching step states"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ids",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique ids for the steps to retrieve"))))),(0,r.kt)("h2",{id:"batchremoveownersinput"},"BatchRemoveOwnersInput"),(0,r.kt)("p",null,"Input provided when removing owners from a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the owners"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The ownership type to remove, optional. By default will remove regardless of ownership type."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to remove the owners from"))))),(0,r.kt)("h2",{id:"batchremovetagsinput"},"BatchRemoveTagsInput"),(0,r.kt)("p",null,"Input provided when removing tags from a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tagUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to remove the tags from"))))),(0,r.kt)("h2",{id:"batchremovetermsinput"},"BatchRemoveTermsInput"),(0,r.kt)("p",null,"Input provided when removing glossary terms from a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Terms"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to remove the glossary terms from"))))),(0,r.kt)("h2",{id:"batchsetdataproductinput"},"BatchSetDataProductInput"),(0,r.kt)("p",null,"Input properties required for batch setting a DataProduct on other entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataProductUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the data product you are setting on a group of resources. If this is null, the Data Product will be unset for the given resources."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urns of the entities the given data product should be set on"))))),(0,r.kt)("h2",{id:"batchsetdomaininput"},"BatchSetDomainInput"),(0,r.kt)("p",null,"Input provided when adding tags to a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"domainUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Domain, or null if the domain will be unset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the Domain"))))),(0,r.kt)("h2",{id:"batchupdatedeprecationinput"},"BatchUpdateDeprecationInput"),(0,r.kt)("p",null,"Input provided when updating the deprecation status for a batch of assets."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Entity is marked as deprecated."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - The time user plan to decommission this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - Additional information about the entity deprecation plan"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcerefinput"},(0,r.kt)("code",null,"[ResourceRefInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target assets to attach the tags to"))))),(0,r.kt)("h2",{id:"batchupdatesoftdeletedinput"},"BatchUpdateSoftDeletedInput"),(0,r.kt)("p",null,"Input provided when updating the soft-deleted status for a batch of assets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urns of the assets to soft delete"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to mark the asset as soft-deleted (hidden)"))))),(0,r.kt)("h2",{id:"batchupdatestepstatesinput"},"BatchUpdateStepStatesInput"),(0,r.kt)("p",null,"Input arguments required for updating step states"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"states",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#stepstateinput"},(0,r.kt)("code",null,"[StepStateInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set of step states. If the id does not exist, it will be created."))))),(0,r.kt)("h2",{id:"browseinput"},"BrowseInput"),(0,r.kt)("p",null,"Input required for browse queries"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `orFilters`- they are more expressive"),(0,r.kt)("p",null,"Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))))),(0,r.kt)("h2",{id:"browsepathsinput"},"BrowsePathsInput"),(0,r.kt)("p",null,"Inputs for fetching the browse paths for a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The entity urn"))))),(0,r.kt)("h2",{id:"browsev2input"},"BrowseV2Input"),(0,r.kt)("p",null,"Input required for browse queries"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 - a list with each entry being part of the browse path V2"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search query string"))))),(0,r.kt)("h2",{id:"cancelingestionexecutionrequestinput"},"CancelIngestionExecutionRequestInput"),(0,r.kt)("p",null,"Input for cancelling an execution request input"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ingestionSourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionRequestUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the specific execution request to cancel"))))),(0,r.kt)("h2",{id:"charteditablepropertiesupdate"},"ChartEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Chart fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Chart"))))),(0,r.kt)("h2",{id:"chartupdateinput"},"ChartUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Chart Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#charteditablepropertiesupdate"},(0,r.kt)("code",null,"ChartEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"containerentitiesinput"},"ContainerEntitiesInput"),(0,r.kt)("p",null,"Input required to fetch the entities inside of a container."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional query filter for particular entities inside the container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"corpgroupupdateinput"},"CorpGroupUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a CorpGroup Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"DataHub description of the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Slack handle for the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address for the group"))))),(0,r.kt)("h2",{id:"corpuserupdateinput"},"CorpUserUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a CorpUser Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aboutMe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"About me section of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"teams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Teams that the user belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skills",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Skills that the user possesses"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"pictureLink",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A URL which points to a picture which user wants to set as a profile photo"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The slack handle of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"phone",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Phone number for the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address for the user"))))),(0,r.kt)("h2",{id:"createaccesstokeninput"},"CreateAccessTokenInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesstokentype"},(0,r.kt)("code",null,"AccessTokenType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actorUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor associated with the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"duration",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesstokenduration"},(0,r.kt)("code",null,"AccessTokenDuration!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The duration for which the Access Token is valid."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the token to be generated."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the token if defined."))))),(0,r.kt)("h2",{id:"createdataproductinput"},"CreateDataProductInput"),(0,r.kt)("p",null,"Input required for creating a DataProduct."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#createdataproductpropertiesinput"},(0,r.kt)("code",null,"CreateDataProductPropertiesInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domainUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Domain"))))),(0,r.kt)("h2",{id:"createdataproductpropertiesinput"},"CreateDataProductPropertiesInput"),(0,r.kt)("p",null,"Input properties required for creating a DataProduct"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A display name for the DataProduct"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the DataProduct"))))),(0,r.kt)("h2",{id:"createdomaininput"},"CreateDomainInput"),(0,r.kt)("p",null,"Input required to create a new Domain."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional! A custom id to use as the primary key identifier for the domain. If not provided, a random UUID will be generated as the id."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name for the Domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional description for the Domain"))))),(0,r.kt)("h2",{id:"createglossaryentityinput"},"CreateGlossaryEntityInput"),(0,r.kt)("p",null,"Input required to create a new Glossary Entity - a Node or a Term."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional! A custom id to use as the primary key identifier for the domain. If not provided, a random UUID will be generated as the id."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name for the Node or Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description for the Node or Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentNode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional parent node urn for the Glossary Node or Term"))))),(0,r.kt)("h2",{id:"creategroupinput"},"CreateGroupInput"),(0,r.kt)("p",null,"Input for creating a new group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional! A custom id to use as the primary key identifier for the group. If not provided, a random UUID will be generated as the id."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name of the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the group"))))),(0,r.kt)("h2",{id:"createingestionexecutionrequestinput"},"CreateIngestionExecutionRequestInput"),(0,r.kt)("p",null,"Input for creating an execution request input"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ingestionSourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the ingestion source to execute"))))),(0,r.kt)("h2",{id:"createinvitetokeninput"},"CreateInviteTokenInput"),(0,r.kt)("p",null,"Input provided when creating an invite token"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"roleUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the role to create the invite token for"))))),(0,r.kt)("h2",{id:"createnativeuserresettokeninput"},"CreateNativeUserResetTokenInput"),(0,r.kt)("p",null,"Input required to generate a password reset token for a native user."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the user to reset the password of"))))),(0,r.kt)("h2",{id:"createownershiptypeinput"},"CreateOwnershipTypeInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Custom Ownership Type"))))),(0,r.kt)("h2",{id:"createpostinput"},"CreatePostInput"),(0,r.kt)("p",null,"Input provided when creating a Post"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"postType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#posttype"},(0,r.kt)("code",null,"PostType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"content",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updatepostcontentinput"},(0,r.kt)("code",null,"UpdatePostContentInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content of the post"))))),(0,r.kt)("h2",{id:"createqueryinput"},"CreateQueryInput"),(0,r.kt)("p",null,"Input required for creating a Query. Requires the 'Edit Queries' privilege for all query subjects."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#createquerypropertiesinput"},(0,r.kt)("code",null,"CreateQueryPropertiesInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subjects",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#createquerysubjectinput"},(0,r.kt)("code",null,"[CreateQuerySubjectInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Subjects for the query"))))),(0,r.kt)("h2",{id:"createquerypropertiesinput"},"CreateQueryPropertiesInput"),(0,r.kt)("p",null,"Input properties required for creating a Query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional display name for the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statement",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#querystatementinput"},(0,r.kt)("code",null,"QueryStatementInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Query contents"))))),(0,r.kt)("h2",{id:"createquerysubjectinput"},"CreateQuerySubjectInput"),(0,r.kt)("p",null,"Input required for creating a Query. For now, only datasets are supported."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the dataset that is the subject of the query"))))),(0,r.kt)("h2",{id:"createsecretinput"},"CreateSecretInput"),(0,r.kt)("p",null,"Input arguments for creating a new Secret"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the secret for reference in ingestion recipes"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value of the secret, to be encrypted and stored"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the secret"))))),(0,r.kt)("h2",{id:"createtaginput"},"CreateTagInput"),(0,r.kt)("p",null,"Input required to create a new Tag"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional! A custom id to use as the primary key identifier for the Tag. If not provided, a random UUID will be generated as the id."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name for the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional description for the Tag"))))),(0,r.kt)("h2",{id:"createtestconnectionrequestinput"},"CreateTestConnectionRequestInput"),(0,r.kt)("p",null,"Input for creating a test connection request"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"recipe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A JSON-encoded recipe"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: The version of the ingestion framework to use"))))),(0,r.kt)("h2",{id:"createtestinput"},"CreateTestInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: a custom id for the test."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"category",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The category of the Test (user defined)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#testdefinitioninput"},(0,r.kt)("code",null,"TestDefinitionInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The test definition"))))),(0,r.kt)("h2",{id:"createviewinput"},"CreateViewInput"),(0,r.kt)("p",null,"Input provided when creating a DataHub View"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#datahubviewtype"},(0,r.kt)("code",null,"DataHubViewType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datahubviewdefinitioninput"},(0,r.kt)("code",null,"DataHubViewDefinitionInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The view definition itself"))))),(0,r.kt)("h2",{id:"dashboardeditablepropertiesupdate"},"DashboardEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Dashboard fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Dashboard"))))),(0,r.kt)("h2",{id:"dashboardupdateinput"},"DashboardUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Dashboard Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#dashboardeditablepropertiesupdate"},(0,r.kt)("code",null,"DashboardEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"datafloweditablepropertiesupdate"},"DataFlowEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Data Flow fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Data Flow"))))),(0,r.kt)("h2",{id:"dataflowupdateinput"},"DataFlowUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Data Flow aka Pipeline Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datafloweditablepropertiesupdate"},(0,r.kt)("code",null,"DataFlowEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"datahubviewdefinitioninput"},"DataHubViewDefinitionInput"),(0,r.kt)("p",null,"Input required for creating a DataHub View Definition"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of entity types that the view applies for. If left empty, then ALL entities will be in scope."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datahubviewfilterinput"},(0,r.kt)("code",null,"DataHubViewFilterInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters to apply."))))),(0,r.kt)("h2",{id:"datahubviewfilterinput"},"DataHubViewFilterInput"),(0,r.kt)("p",null,"Input required for creating a DataHub View Definition"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"operator",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#logicaloperator"},(0,r.kt)("code",null,"LogicalOperator!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The operator used to combine the filters."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters combined via an operator. If left empty, then no filters will be applied."))))),(0,r.kt)("h2",{id:"datajobeditablepropertiesupdate"},"DataJobEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Data Job fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Data Job"))))),(0,r.kt)("h2",{id:"datajobupdateinput"},"DataJobUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Data Job aka Task Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datajobeditablepropertiesupdate"},(0,r.kt)("code",null,"DataJobEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"dataproductentitiesinput"},"DataProductEntitiesInput"),(0,r.kt)("p",null,"Input required to fetch the entities inside of a Data Product."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional query filter for particular entities inside the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"datasetdeprecationupdate"},"DatasetDeprecationUpdate"),(0,r.kt)("p",null,"An update for the deprecation information for a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the dataset is deprecated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time user plan to decommission this dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional information about the dataset deprecation plan"))))),(0,r.kt)("h2",{id:"dataseteditablepropertiesupdate"},"DatasetEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Dataset fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Dataset"))))),(0,r.kt)("h2",{id:"datasetupdateinput"},"DatasetUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Dataset Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datasetdeprecationupdate"},(0,r.kt)("code",null,"DatasetDeprecationUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to deprecation status"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#institutionalmemoryupdate"},(0,r.kt)("code",null,"InstitutionalMemoryUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to institutional memory, ie documentation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use tags field instead Update to global tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#editableschemametadataupdate"},(0,r.kt)("code",null,"EditableSchemaMetadataUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#dataseteditablepropertiesupdate"},(0,r.kt)("code",null,"DatasetEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"descriptionupdateinput"},"DescriptionUpdateInput"),(0,r.kt)("p",null,"Incubating. Updates the description of a resource. Currently supports DatasetField descriptions only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the resource to attach the description to, eg dataset urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A sub resource identifier, eg dataset field path"))))),(0,r.kt)("h2",{id:"domainentitiesinput"},"DomainEntitiesInput"),(0,r.kt)("p",null,"Input required to fetch the entities inside of a Domain."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional query filter for particular entities inside the domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"editableschemafieldinfoupdate"},"EditableSchemaFieldInfoUpdate"),(0,r.kt)("p",null,"Update to writable schema field metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of a field identifying the field the editable info is applied to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edited description of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags associated with the field"))))),(0,r.kt)("h2",{id:"editableschemametadataupdate"},"EditableSchemaMetadataUpdate"),(0,r.kt)("p",null,"Update to editable schema metadata of the dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaFieldInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#editableschemafieldinfoupdate"},(0,r.kt)("code",null,"[EditableSchemaFieldInfoUpdate!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to writable schema field metadata"))))),(0,r.kt)("h2",{id:"entitycountinput"},"EntityCountInput"),(0,r.kt)("p",null,"Input for the get entity counts endpoint"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"entityrequestcontext"},"EntityRequestContext"),(0,r.kt)("p",null,"Context that defines an entity page requesting recommendations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Type of the enity being displayed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the entity being displayed"))))),(0,r.kt)("h2",{id:"facetfilterinput"},"FacetFilterInput"),(0,r.kt)("p",null,"Facet filters to apply to search results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of field to filter by"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Prefer `values` for single elements"),(0,r.kt)("p",null,"Value of the field to filter by. Deprecated in favor of ",(0,r.kt)("code",null,"values"),", which should accept a single element array for a value"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Values, one of which the intended field should match."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"negated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"If the filter should or should not be matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"condition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#filteroperator"},(0,r.kt)("code",null,"FilterOperator"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Condition for the values. How to If unset, assumed to be equality"))))),(0,r.kt)("h2",{id:"filterinput"},"FilterInput"),(0,r.kt)("p",null,"A set of filter criteria"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"and",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of conjunctive filters"))))),(0,r.kt)("h2",{id:"getaccesstokeninput"},"GetAccessTokenInput"),(0,r.kt)("p",null,"Input required to fetch a new Access Token."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesstokentype"},(0,r.kt)("code",null,"AccessTokenType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actorUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor associated with the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"duration",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesstokenduration"},(0,r.kt)("code",null,"AccessTokenDuration!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The duration for which the Access Token is valid."))))),(0,r.kt)("h2",{id:"getgrantedprivilegesinput"},"GetGrantedPrivilegesInput"),(0,r.kt)("p",null,"Input for getting granted privileges"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"actorUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the actor"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcespec"},(0,r.kt)("code",null,"ResourceSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Spec to identify resource. If empty, gets privileges granted to the actor"))))),(0,r.kt)("h2",{id:"getinvitetokeninput"},"GetInviteTokenInput"),(0,r.kt)("p",null,"Input provided when getting an invite token"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"roleUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the role to get the invite token for"))))),(0,r.kt)("h2",{id:"getquickfiltersinput"},"GetQuickFiltersInput"),(0,r.kt)("p",null,"Input for getting Quick Filters"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))))),(0,r.kt)("h2",{id:"getrootglossaryentitiesinput"},"GetRootGlossaryEntitiesInput"),(0,r.kt)("p",null,"Input required when getting Business Glossary entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Glossary Entities in the returned result set"))))),(0,r.kt)("h2",{id:"getschemablameinput"},"GetSchemaBlameInput"),(0,r.kt)("p",null,"Input for getting schema changes computed at a specific version."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The dataset urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Changes after this version are not shown. If not provided, this is the latestVersion."))))),(0,r.kt)("h2",{id:"getschemaversionlistinput"},"GetSchemaVersionListInput"),(0,r.kt)("p",null,"Input for getting list of schema versions."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The dataset urn"))))),(0,r.kt)("h2",{id:"getsecretvaluesinput"},"GetSecretValuesInput"),(0,r.kt)("p",null,"Input arguments for retrieving the plaintext values of a set of secrets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"secrets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of secret names"))))),(0,r.kt)("h2",{id:"globaltagsupdate"},"GlobalTagsUpdate"),(0,r.kt)("p",null,"Deprecated, use addTag or removeTag mutation instead\nUpdate to the Tags associated with a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#tagassociationupdate"},(0,r.kt)("code",null,"[TagAssociationUpdate!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new set of tags"))))),(0,r.kt)("h2",{id:"institutionalmemorymetadataupdate"},"InstitutionalMemoryMetadataUpdate"),(0,r.kt)("p",null,"An institutional memory to add to a Metadata Entity\nTODO Add a USER or GROUP actor enum"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"url",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Link to a document or wiki page or another internal resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"author",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The corp user urn of the author of the metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which this metadata was created"))))),(0,r.kt)("h2",{id:"institutionalmemoryupdate"},"InstitutionalMemoryUpdate"),(0,r.kt)("p",null,"An update for the institutional memory information for a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"elements",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#institutionalmemorymetadataupdate"},(0,r.kt)("code",null,"[InstitutionalMemoryMetadataUpdate!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The individual references in the institutional memory"))))),(0,r.kt)("h2",{id:"lineageedge"},"LineageEdge"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"downstreamUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the source entity. This urn is downstream of the destinationUrn."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"upstreamUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the destination entity. This urn is upstream of the destinationUrn"))))),(0,r.kt)("h2",{id:"lineageinput"},"LineageInput"),(0,r.kt)("p",null,"Input for the list lineage property of an Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#lineagedirection"},(0,r.kt)("code",null,"LineageDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship, either incoming or outgoing from the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"separateSiblings",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional flag to not merge siblings in the response. They are merged by default."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional starting time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional ending time to filter on"))))),(0,r.kt)("h2",{id:"listaccesstokeninput"},"ListAccessTokenInput"),(0,r.kt)("p",null,"Input arguments for listing access tokens"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Facet filters to apply to search results"))))),(0,r.kt)("h2",{id:"listdomainsinput"},"ListDomainsInput"),(0,r.kt)("p",null,"Input required when listing DataHub Domains"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Domains to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listglobalviewsinput"},"ListGlobalViewsInput"),(0,r.kt)("p",null,"Input provided when listing DataHub Global Views"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Views to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listgroupsinput"},"ListGroupsInput"),(0,r.kt)("p",null,"Input required when listing DataHub Groups"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Policies to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listingestionsourcesinput"},"ListIngestionSourcesInput"),(0,r.kt)("p",null,"Input arguments for listing Ingestion Sources"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"listmyviewsinput"},"ListMyViewsInput"),(0,r.kt)("p",null,"Input provided when listing DataHub Views"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Views to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#datahubviewtype"},(0,r.kt)("code",null,"DataHubViewType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - List the type of View to filter for."))))),(0,r.kt)("h2",{id:"listownershiptypesinput"},"ListOwnershipTypesInput"),(0,r.kt)("p",null,"Input required for listing custom ownership types entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned, default is 0"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Custom Ownership Types to be returned in the result set, default is 20"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional Facet filters to apply to the result set"))))),(0,r.kt)("h2",{id:"listpoliciesinput"},"ListPoliciesInput"),(0,r.kt)("p",null,"Input required when listing DataHub Access Policies"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Policies to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listpostsinput"},"ListPostsInput"),(0,r.kt)("p",null,"Input provided when listing existing posts"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Roles to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listqueriesinput"},"ListQueriesInput"),(0,r.kt)("p",null,"Input required for listing query entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Queries to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A raw search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"source",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#querysource"},(0,r.kt)("code",null,"QuerySource"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional source for the query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional Urn for the parent dataset that the query is associated with."))))),(0,r.kt)("h2",{id:"listrecommendationsinput"},"ListRecommendationsInput"),(0,r.kt)("p",null,"Input arguments for fetching UI recommendations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the actor requesting recommendations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"requestContext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#recommendationrequestcontext"},(0,r.kt)("code",null,"RecommendationRequestContext"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context provider by the caller requesting recommendations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Max number of modules to return"))))),(0,r.kt)("h2",{id:"listrolesinput"},"ListRolesInput"),(0,r.kt)("p",null,"Input provided when listing existing roles"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Roles to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"listsecretsinput"},"ListSecretsInput"),(0,r.kt)("p",null,"Input for listing DataHub Secrets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional search query"))))),(0,r.kt)("h2",{id:"listtestsinput"},"ListTestsInput"),(0,r.kt)("p",null,"Input required when listing DataHub Tests"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Domains to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional query string to match on"))))),(0,r.kt)("h2",{id:"listusersinput"},"ListUsersInput"),(0,r.kt)("p",null,"Input required when listing DataHub Users"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of Policies to be returned in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional search query"))))),(0,r.kt)("h2",{id:"metadataanalyticsinput"},"MetadataAnalyticsInput"),(0,r.kt)("p",null,"Input to fetch metadata analytics charts"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity type to fetch analytics for (If empty, queries across all entities)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the domain to fetch analytics for (If empty or GLOBAL, queries across all domains)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Search query to filter down result (If empty, does not apply any search query)"))))),(0,r.kt)("h2",{id:"notebookeditablepropertiesupdate"},"NotebookEditablePropertiesUpdate"),(0,r.kt)("p",null,"Update to writable Notebook fields"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Writable description aka documentation for a Notebook"))))),(0,r.kt)("h2",{id:"notebookupdateinput"},"NotebookUpdateInput"),(0,r.kt)("p",null,"Arguments provided to update a Notebook Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#globaltagsupdate"},(0,r.kt)("code",null,"GlobalTagsUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#notebookeditablepropertiesupdate"},(0,r.kt)("code",null,"NotebookEditablePropertiesUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Update to editable properties"))))),(0,r.kt)("h2",{id:"ownerinput"},"OwnerInput"),(0,r.kt)("p",null,"Input provided when adding an owner to an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Owner to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerEntityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownerentitytype"},(0,r.kt)("code",null,"OwnerEntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The owner type, either a user or group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershiptype"},(0,r.kt)("code",null,"OwnershipType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"The ownership type for the new owner. If none is provided, then a new NONE will be added. Deprecated - Use ownershipTypeUrn field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the ownership type entity."))))),(0,r.kt)("h2",{id:"ownershipupdate"},"OwnershipUpdate"),(0,r.kt)("p",null,"An update for the ownership information for a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownerupdate"},(0,r.kt)("code",null,"[OwnerUpdate!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The updated list of owners"))))),(0,r.kt)("h2",{id:"ownerupdate"},"OwnerUpdate"),(0,r.kt)("p",null,"An owner to add to a Metadata Entity\nTODO Add a USER or GROUP actor enum"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owner",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The owner URN, either a corpGroup or corpuser"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershiptype"},(0,r.kt)("code",null,"OwnershipType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"The owner type. Deprecated - Use ownershipTypeUrn field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the ownership type entity."))))),(0,r.kt)("h2",{id:"policymatchcriterioninput"},"PolicyMatchCriterionInput"),(0,r.kt)("p",null,"Criterion to define relationship between field and values"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the field that the criterion refers to e.g. entity_type, entity_urn, domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Values. Matches criterion if any one of the values matches condition (OR-relationship)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"condition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policymatchcondition"},(0,r.kt)("code",null,"PolicyMatchCondition!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the field that the criterion refers to"))))),(0,r.kt)("h2",{id:"policymatchfilterinput"},"PolicyMatchFilterInput"),(0,r.kt)("p",null,"Filter object that encodes a complex filter logic with OR + AND"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"criteria",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#policymatchcriterioninput"},(0,r.kt)("code",null,"[PolicyMatchCriterionInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of criteria to apply"))))),(0,r.kt)("h2",{id:"policyupdateinput"},"PolicyUpdateInput"),(0,r.kt)("p",null,"Input provided when creating or updating an Access Policy"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policytype"},(0,r.kt)("code",null,"PolicyType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Policy Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Policy name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"state",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policystate"},(0,r.kt)("code",null,"PolicyState!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Policy state"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A Policy description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#resourcefilterinput"},(0,r.kt)("code",null,"ResourceFilterInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of resources that the Policy privileges apply to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of privileges that the Policy grants"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#actorfilterinput"},(0,r.kt)("code",null,"ActorFilterInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of actors that the Policy privileges are granted to"))))),(0,r.kt)("h2",{id:"querystatementinput"},"QueryStatementInput"),(0,r.kt)("p",null,"Input required for creating a Query Statement"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query text"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"language",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#querylanguage"},(0,r.kt)("code",null,"QueryLanguage!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query language"))))),(0,r.kt)("h2",{id:"recommendationrequestcontext"},"RecommendationRequestContext"),(0,r.kt)("p",null,"Context that defines the page requesting recommendations\ni.e. for search pages, the query/filters. for entity pages, the entity urn and tab"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"scenario",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#scenariotype"},(0,r.kt)("code",null,"ScenarioType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Scenario in which the recommendations will be displayed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchRequestContext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchrequestcontext"},(0,r.kt)("code",null,"SearchRequestContext"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional context for defining the search page requesting recommendations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityRequestContext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#entityrequestcontext"},(0,r.kt)("code",null,"EntityRequestContext"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional context for defining the entity page requesting recommendations"))))),(0,r.kt)("h2",{id:"relatedtermsinput"},"RelatedTermsInput"),(0,r.kt)("p",null,"Input provided when adding Terms to an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Glossary Term urn to add or remove this relationship to/from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Term to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationshipType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#termrelationshiptype"},(0,r.kt)("code",null,"TermRelationshipType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of relationship we're adding or removing to/from for a Glossary Term"))))),(0,r.kt)("h2",{id:"relationshipsinput"},"RelationshipsInput"),(0,r.kt)("p",null,"Input for the list relationships field of an Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The types of relationships to query, representing an OR"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#relationshipdirection"},(0,r.kt)("code",null,"RelationshipDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship, either incoming or outgoing from the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))))),(0,r.kt)("h2",{id:"removegroupmembersinput"},"RemoveGroupMembersInput"),(0,r.kt)("p",null,"Input required to remove members from an external DataHub group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The group to remove members from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The members to remove from the group"))))),(0,r.kt)("h2",{id:"removelinkinput"},"RemoveLinkInput"),(0,r.kt)("p",null,"Input provided when removing the association between a Metadata Entity and a Link"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"linkUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The url of the link to add or remove, which uniquely identifies the Link"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach the link to, for example a dataset urn"))))),(0,r.kt)("h2",{id:"removenativegroupmembersinput"},"RemoveNativeGroupMembersInput"),(0,r.kt)("p",null,"Input required to remove members from a native DataHub group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The group to remove members from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userUrns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The members to remove from the group"))))),(0,r.kt)("h2",{id:"removeownerinput"},"RemoveOwnerInput"),(0,r.kt)("p",null,"Input provided when removing the association between a Metadata Entity and an user or group owner"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Owner to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The ownership type to remove, optional. By default will remove regardless of ownership type."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource or entity to attach or remove the owner from, for example a dataset urn"))))),(0,r.kt)("h2",{id:"reportoperationinput"},"ReportOperationInput"),(0,r.kt)("p",null,"Input provided to report an asset operation"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the asset (e.g. dataset) to report the operation for"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"operationType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#operationtype"},(0,r.kt)("code",null,"OperationType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of operation that was performed. Required"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customOperationType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A custom type of operation. Required if operation type is CUSTOM."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#operationsourcetype"},(0,r.kt)("code",null,"OperationSourceType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The source or reporter of the operation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#stringmapentryinput"},(0,r.kt)("code",null,"[StringMapEntryInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of key-value parameters to include"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional partition identifier"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"numAffectedRows",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional: The number of affected rows"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional: Provide a timestamp associated with the operation. If not provided, one will be generated for you based on the current time."))))),(0,r.kt)("h2",{id:"resourcefilterinput"},"ResourceFilterInput"),(0,r.kt)("p",null,"Input required when creating or updating an Access Policies Determines which resources the Policy applies to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the resource the policy should apply to Not required because in the future we want to support filtering by type OR by domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of specific resource urns to apply the filter to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allResources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether of not to apply the filter to all resources of the type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#policymatchfilterinput"},(0,r.kt)("code",null,"PolicyMatchFilterInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether of not to apply the filter to all resources of the type"))))),(0,r.kt)("h2",{id:"resourcerefinput"},"ResourceRefInput"),(0,r.kt)("p",null,"Reference to a resource to apply an action to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the resource being referenced"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Tag to"))))),(0,r.kt)("h2",{id:"resourcespec"},"ResourceSpec"),(0,r.kt)("p",null,"Spec to identify resource"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Resource type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Resource urn"))))),(0,r.kt)("h2",{id:"rollbackingestioninput"},"RollbackIngestionInput"),(0,r.kt)("p",null,"Input for rolling back an ingestion execution"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"runId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An ingestion run ID"))))),(0,r.kt)("h2",{id:"scrollacrossentitiesinput"},"ScrollAcrossEntitiesInput"),(0,r.kt)("p",null,"Input arguments for a full text search query across entities, specifying a starting pointer. Allows paging beyond 10k results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"scrollId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results, an opaque ID the backend understands as a pointer"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"keepAlive",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The amount of time to keep the point in time snapshot alive, takes a time unit based string ex: 5m or 30s"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"scrollacrosslineageinput"},"ScrollAcrossLineageInput"),(0,r.kt)("p",null,"Input arguments for a search query over the results of a multi-hop graph query, uses scroll API"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the source node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#lineagedirection"},(0,r.kt)("code",null,"LineageDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship, either incoming or outgoing from the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"scrollId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results, an opaque ID the backend understands as a pointer"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"keepAlive",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The amount of time to keep the point in time snapshot alive, takes a time unit based string ex: 5m or 30s"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional starting time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional ending time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"searchacrossentitiesinput"},"SearchAcrossEntitiesInput"),(0,r.kt)("p",null,"Input arguments for a full text search query across entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `orFilters`- they are more expressive"),(0,r.kt)("p",null,"Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - A View to apply when generating results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sortInput",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchsortinput"},(0,r.kt)("code",null,"SearchSortInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - Information on how to sort this search result"))))),(0,r.kt)("h2",{id:"searchacrosslineageinput"},"SearchAcrossLineageInput"),(0,r.kt)("p",null,"Input arguments for a search query over the results of a multi-hop graph query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the source node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#lineagedirection"},(0,r.kt)("code",null,"LineageDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship, either incoming or outgoing from the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `orFilters`- they are more expressive"),(0,r.kt)("p",null,"Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional starting time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional ending time to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"searchflags"},"SearchFlags"),(0,r.kt)("p",null,"Set of flags to control search behavior"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"skipCache",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to skip cache"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"maxAggValues",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maximum number of values in an facet aggregation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fulltext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Structured or unstructured fulltext query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skipHighlighting",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to skip highlighting"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skipAggregates",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to skip aggregates/facets"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"getSuggestions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether to request for search suggestions on the _entityName virtualized field"))))),(0,r.kt)("h2",{id:"searchinput"},"SearchInput"),(0,r.kt)("p",null,"Input arguments for a full text search query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Metadata Entity type to be searched against"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The raw query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `orFilters`- they are more expressive"),(0,r.kt)("p",null,"Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#andfilterinput"},(0,r.kt)("code",null,"[AndFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of disjunctive criterion for the filter. (or operation to combine filters)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchflags"},(0,r.kt)("code",null,"SearchFlags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flags controlling search options"))))),(0,r.kt)("h2",{id:"searchrequestcontext"},"SearchRequestContext"),(0,r.kt)("p",null,"Context that defines a search page requesting recommendatinos"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#facetfilterinput"},(0,r.kt)("code",null,"[FacetFilterInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Faceted filters applied to search results"))))),(0,r.kt)("h2",{id:"searchsortinput"},"SearchSortInput"),(0,r.kt)("p",null,"Input required in order to sort search results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"sortCriterion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#sortcriterion"},(0,r.kt)("code",null,"SortCriterion!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A criterion to sort search results on"))))),(0,r.kt)("h2",{id:"sortcriterion"},"SortCriterion"),(0,r.kt)("p",null,"A single sorting criterion for sorting search."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A field upon which we'll do sorting on."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sortOrder",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#sortorder"},(0,r.kt)("code",null,"SortOrder!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The order in which we will be sorting"))))),(0,r.kt)("h2",{id:"stepstateinput"},"StepStateInput"),(0,r.kt)("p",null,"The input required to update the state of a step"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The globally unique id for the step"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#stringmapentryinput"},(0,r.kt)("code",null,"[StringMapEntryInput]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new properties for the step"))))),(0,r.kt)("h2",{id:"stringmapentryinput"},"StringMapEntryInput"),(0,r.kt)("p",null,"String map entry input"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The key of the map entry"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value fo the map entry"))))),(0,r.kt)("h2",{id:"tagassociationinput"},"TagAssociationInput"),(0,r.kt)("p",null,"Input provided when updating the association between a Metadata Entity and a Tag"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tagUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tag to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target Metadata Entity to add or remove the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Tag to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Tag to"))))),(0,r.kt)("h2",{id:"tagassociationupdate"},"TagAssociationUpdate"),(0,r.kt)("p",null,"Deprecated, use addTag or removeTag mutation instead\nA tag update to be applied"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tag",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#tagupdateinput"},(0,r.kt)("code",null,"TagUpdateInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tag being applied"))))),(0,r.kt)("h2",{id:"tagupdateinput"},"TagUpdateInput"),(0,r.kt)("p",null,"Deprecated, use addTag or removeTag mutations instead\nAn update for a particular Tag entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name of a Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#ownershipupdate"},(0,r.kt)("code",null,"OwnershipUpdate"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the tag"))))),(0,r.kt)("h2",{id:"termassociationinput"},"TermAssociationInput"),(0,r.kt)("p",null,"Input provided when updating the association between a Metadata Entity and a Glossary Term"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"termUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Glossary Term to add or remove"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The target Metadata Entity to add or remove the Glossary Term from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#subresourcetype"},(0,r.kt)("code",null,"SubResourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional type of a sub resource to attach the Glossary Term to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subResource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional sub resource identifier to attach the Glossary Term to"))))),(0,r.kt)("h2",{id:"testdefinitioninput"},"TestDefinitionInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"json",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The string representation of the Test"))))),(0,r.kt)("h2",{id:"updatecorpuserviewssettingsinput"},"UpdateCorpUserViewsSettingsInput"),(0,r.kt)("p",null,"Input required to update a users settings."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultView",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The URN of the View that serves as this user's personal default. If not provided, any existing default view will be removed."))))),(0,r.kt)("h2",{id:"updatedataproductinput"},"UpdateDataProductInput"),(0,r.kt)("p",null,"Input properties required for update a DataProduct"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A display name for the DataProduct"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the DataProduct"))))),(0,r.kt)("h2",{id:"updatedeprecationinput"},"UpdateDeprecationInput"),(0,r.kt)("p",null,"Input provided when setting the Deprecation status for an Entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the Entity to set deprecation for."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Entity is marked as deprecated."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - The time user plan to decommission this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional - Additional information about the entity deprecation plan"))))),(0,r.kt)("h2",{id:"updateembedinput"},"UpdateEmbedInput"),(0,r.kt)("p",null,"Input required to set or clear information related to rendering a Data Asset inside of DataHub."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The URN associated with the Data Asset to update. Only dataset, dashboard, and chart urns are currently supported."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"renderUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set or clear a URL used to render an embedded asset."))))),(0,r.kt)("h2",{id:"updateglobalviewssettingsinput"},"UpdateGlobalViewsSettingsInput"),(0,r.kt)("p",null,"Input required to update Global View Settings."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultView",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The URN of the View that serves as the Global, or organization-wide, default. If this field is not provided, the existing Global Default will be cleared."))))),(0,r.kt)("h2",{id:"updateingestionsourceconfiginput"},"UpdateIngestionSourceConfigInput"),(0,r.kt)("p",null,"Input parameters for creating / updating an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"recipe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A JSON-encoded recipe"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The version of DataHub Ingestion Framework to use when executing the recipe."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executorId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The id of the executor to use for executing the recipe"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"debugMode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not to run ingestion in debug mode"))))),(0,r.kt)("h2",{id:"updateingestionsourceinput"},"UpdateIngestionSourceInput"),(0,r.kt)("p",null,"Input arguments for creating / updating an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A name associated with the ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the source itself, e.g. mysql, bigquery, bigquery-usage. Should match the recipe."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description associated with the ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schedule",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updateingestionsourcescheduleinput"},(0,r.kt)("code",null,"UpdateIngestionSourceScheduleInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional schedule for the ingestion source. If not provided, the source is only available for run on-demand."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"config",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updateingestionsourceconfiginput"},(0,r.kt)("code",null,"UpdateIngestionSourceConfigInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of type-specific ingestion source configurations"))))),(0,r.kt)("h2",{id:"updateingestionsourcescheduleinput"},"UpdateIngestionSourceScheduleInput"),(0,r.kt)("p",null,"Input arguments for creating / updating the schedule of an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"interval",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The cron-formatted interval describing when the job should be executed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"timezone",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the timezone in which the cron interval should be scheduled (e.g. America/Los Angeles)"))))),(0,r.kt)("h2",{id:"updatelineageinput"},"UpdateLineageInput"),(0,r.kt)("p",null,"Input required in order to upsert lineage edges"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"edgesToAdd",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageedge"},(0,r.kt)("code",null,"[LineageEdge]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"New lineage edges to upsert"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"edgesToRemove",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageedge"},(0,r.kt)("code",null,"[LineageEdge]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Lineage edges to remove. Takes precedence over edgesToAdd - so edges existing both edgesToAdd and edgesToRemove will be removed."))))),(0,r.kt)("h2",{id:"updatemediainput"},"UpdateMediaInput"),(0,r.kt)("p",null,"Input provided for filling in a post content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mediatype"},(0,r.kt)("code",null,"MediaType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of media"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"location",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The location of the media (a URL)"))))),(0,r.kt)("h2",{id:"updatenameinput"},"UpdateNameInput"),(0,r.kt)("p",null,"Input for updating the name of an entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the resource to update the name for"))))),(0,r.kt)("h2",{id:"updateownershiptypeinput"},"UpdateOwnershipTypeInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Custom Ownership Type"))))),(0,r.kt)("h2",{id:"updateparentnodeinput"},"UpdateParentNodeInput"),(0,r.kt)("p",null,"Input for updating the parent node of a resource. Currently only GlossaryNodes and GlossaryTerms have parentNodes."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentNode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new parent node urn. If parentNode is null, this will remove the parent from this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the resource to update the parent node for"))))),(0,r.kt)("h2",{id:"updatepostcontentinput"},"UpdatePostContentInput"),(0,r.kt)("p",null,"Input provided for filling in a post content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"contentType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#postcontenttype"},(0,r.kt)("code",null,"PostContentType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of post content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The title of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional content of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"link",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional link that the post is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"media",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updatemediainput"},(0,r.kt)("code",null,"UpdateMediaInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional media contained in the post"))))),(0,r.kt)("h2",{id:"updatequeryinput"},"UpdateQueryInput"),(0,r.kt)("p",null,"Input required for updating an existing Query. Requires the 'Edit Queries' privilege for all query subjects."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updatequerypropertiesinput"},(0,r.kt)("code",null,"UpdateQueryPropertiesInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subjects",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#updatequerysubjectinput"},(0,r.kt)("code",null,"[UpdateQuerySubjectInput!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Subjects for the query"))))),(0,r.kt)("h2",{id:"updatequerypropertiesinput"},"UpdateQueryPropertiesInput"),(0,r.kt)("p",null,"Input properties required for creating a Query. Any non-null fields will be updated if provided."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional display name for the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statement",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#querystatementinput"},(0,r.kt)("code",null,"QueryStatementInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Query contents"))))),(0,r.kt)("h2",{id:"updatequerysubjectinput"},"UpdateQuerySubjectInput"),(0,r.kt)("p",null,"Input required for creating a Query. For now, only datasets are supported."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the dataset that is the subject of the query"))))),(0,r.kt)("h2",{id:"updatetestinput"},"UpdateTestInput"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"category",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The category of the Test (user defined)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#testdefinitioninput"},(0,r.kt)("code",null,"TestDefinitionInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The test definition"))))),(0,r.kt)("h2",{id:"updateusersettinginput"},"UpdateUserSettingInput"),(0,r.kt)("p",null,"Input for updating a user setting"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#usersetting"},(0,r.kt)("code",null,"UserSetting!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the setting"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The new value of the setting"))))),(0,r.kt)("h2",{id:"updateviewinput"},"UpdateViewInput"),(0,r.kt)("p",null,"Input provided when updating a DataHub View"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#datahubviewdefinitioninput"},(0,r.kt)("code",null,"DataHubViewDefinitionInput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The view definition itself"))))))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/2ae2f03c.2b36dabe.js b/assets/js/2ae2f03c.2b36dabe.js deleted file mode 100644 index 21b9ab5c0aa99..0000000000000 --- a/assets/js/2ae2f03c.2b36dabe.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkdocs_website=self.webpackChunkdocs_website||[]).push([[135],{3905:(t,l,e)=>{e.d(l,{Zo:()=>i,kt:()=>c});var n=e(67294);function r(t,l,e){return l in t?Object.defineProperty(t,l,{value:e,enumerable:!0,configurable:!0,writable:!0}):t[l]=e,t}function a(t,l){var e=Object.keys(t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(t);l&&(n=n.filter((function(l){return Object.getOwnPropertyDescriptor(t,l).enumerable}))),e.push.apply(e,n)}return e}function u(t){for(var l=1;l=0||(r[e]=t[e]);return r}(t,l);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(t);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(t,e)&&(r[e]=t[e])}return r}var k=n.createContext({}),o=function(t){var l=n.useContext(k),e=l;return t&&(e="function"==typeof t?t(l):u(u({},l),t)),e},i=function(t){var l=o(t.components);return n.createElement(k.Provider,{value:l},t.children)},d={inlineCode:"code",wrapper:function(t){var l=t.children;return n.createElement(n.Fragment,{},l)}},h=n.forwardRef((function(t,l){var e=t.components,r=t.mdxType,a=t.originalType,k=t.parentName,i=s(t,["components","mdxType","originalType","parentName"]),h=o(e),c=r,p=h["".concat(k,".").concat(c)]||h[c]||d[c]||a;return e?n.createElement(p,u(u({ref:l},i),{},{components:e})):n.createElement(p,u({ref:l},i))}));function c(t,l){var e=arguments,r=l&&l.mdxType;if("string"==typeof t||r){var a=e.length,u=new Array(a);u[0]=h;var s={};for(var k in l)hasOwnProperty.call(l,k)&&(s[k]=l[k]);s.originalType=t,s.mdxType="string"==typeof t?t:r,u[1]=s;for(var o=2;o{e.r(l),e.d(l,{assets:()=>k,contentTitle:()=>u,default:()=>d,frontMatter:()=>a,metadata:()=>s,toc:()=>o});var n=e(83117),r=(e(67294),e(3905));const a={id:"objects",title:"Objects",slug:"objects",sidebar_position:3},u=void 0,s={unversionedId:"graphql/objects",id:"graphql/objects",title:"Objects",description:"Access",source:"@site/genDocs/graphql/objects.md",sourceDirName:"graphql",slug:"/graphql/objects",permalink:"/docs/graphql/objects",draft:!1,editUrl:"https://github.com/datahub-project/datahub/blob/master/genDocs/graphql/objects.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{id:"objects",title:"Objects",slug:"objects",sidebar_position:3},sidebar:"overviewSidebar",previous:{title:"Mutations",permalink:"/docs/graphql/mutations"},next:{title:"Input objects",permalink:"/docs/graphql/inputObjects"}},k={},o=[{value:"Access",id:"access",level:2},{value:"AccessToken",id:"accesstoken",level:2},{value:"AccessTokenMetadata",id:"accesstokenmetadata",level:2},{value:"Actor",id:"actor",level:2},{value:"ActorFilter",id:"actorfilter",level:2},{value:"AggregateResults",id:"aggregateresults",level:2},{value:"AggregationMetadata",id:"aggregationmetadata",level:2},{value:"AnalyticsChartGroup",id:"analyticschartgroup",level:2},{value:"AnalyticsConfig",id:"analyticsconfig",level:2},{value:"AppConfig",id:"appconfig",level:2},{value:"AspectRenderSpec",id:"aspectrenderspec",level:2},{value:"Assertion",id:"assertion",level:2},{value:"AssertionInfo",id:"assertioninfo",level:2},{value:"AssertionResult",id:"assertionresult",level:2},{value:"AssertionRunEvent",id:"assertionrunevent",level:2},{value:"AssertionRunEventsResult",id:"assertionruneventsresult",level:2},{value:"AssertionStdParameter",id:"assertionstdparameter",level:2},{value:"AssertionStdParameters",id:"assertionstdparameters",level:2},{value:"AuditStamp",id:"auditstamp",level:2},{value:"AuthConfig",id:"authconfig",level:2},{value:"AuthenticatedUser",id:"authenticateduser",level:2},{value:"AutoCompleteMultipleResults",id:"autocompletemultipleresults",level:2},{value:"AutoCompleteResultForEntity",id:"autocompleteresultforentity",level:2},{value:"AutoCompleteResults",id:"autocompleteresults",level:2},{value:"BarChart",id:"barchart",level:2},{value:"BarSegment",id:"barsegment",level:2},{value:"BaseData",id:"basedata",level:2},{value:"BatchGetStepStatesResult",id:"batchgetstepstatesresult",level:2},{value:"BatchSpec",id:"batchspec",level:2},{value:"BatchUpdateStepStatesResult",id:"batchupdatestepstatesresult",level:2},{value:"BooleanBox",id:"booleanbox",level:2},{value:"BrowsePath",id:"browsepath",level:2},{value:"BrowsePathEntry",id:"browsepathentry",level:2},{value:"BrowsePathV2",id:"browsepathv2",level:2},{value:"BrowseResultGroup",id:"browseresultgroup",level:2},{value:"BrowseResultGroupV2",id:"browseresultgroupv2",level:2},{value:"BrowseResultMetadata",id:"browseresultmetadata",level:2},{value:"BrowseResults",id:"browseresults",level:2},{value:"BrowseResultsV2",id:"browseresultsv2",level:2},{value:"CaveatDetails",id:"caveatdetails",level:2},{value:"CaveatsAndRecommendations",id:"caveatsandrecommendations",level:2},{value:"Cell",id:"cell",level:2},{value:"ChangeAuditStamps",id:"changeauditstamps",level:2},{value:"Chart",id:"chart",level:2},{value:"ChartCell",id:"chartcell",level:2},{value:"ChartEditableProperties",id:"charteditableproperties",level:2},{value:"ChartInfo",id:"chartinfo",level:2},{value:"ChartProperties",id:"chartproperties",level:2},{value:"ChartQuery",id:"chartquery",level:2},{value:"ChartStatsSummary",id:"chartstatssummary",level:2},{value:"Container",id:"container",level:2},{value:"ContainerEditableProperties",id:"containereditableproperties",level:2},{value:"ContainerProperties",id:"containerproperties",level:2},{value:"ContentParams",id:"contentparams",level:2},{value:"CorpGroup",id:"corpgroup",level:2},{value:"CorpGroupEditableProperties",id:"corpgroupeditableproperties",level:2},{value:"CorpGroupInfo",id:"corpgroupinfo",level:2},{value:"CorpGroupProperties",id:"corpgroupproperties",level:2},{value:"CorpUser",id:"corpuser",level:2},{value:"CorpUserAppearanceSettings",id:"corpuserappearancesettings",level:2},{value:"CorpUserEditableInfo",id:"corpusereditableinfo",level:2},{value:"CorpUserEditableProperties",id:"corpusereditableproperties",level:2},{value:"CorpUserInfo",id:"corpuserinfo",level:2},{value:"CorpUserProperties",id:"corpuserproperties",level:2},{value:"CorpUserSettings",id:"corpusersettings",level:2},{value:"CorpUserViewsSettings",id:"corpuserviewssettings",level:2},{value:"Cost",id:"cost",level:2},{value:"CostValue",id:"costvalue",level:2},{value:"CustomPropertiesEntry",id:"custompropertiesentry",level:2},{value:"Dashboard",id:"dashboard",level:2},{value:"DashboardEditableProperties",id:"dashboardeditableproperties",level:2},{value:"DashboardInfo",id:"dashboardinfo",level:2},{value:"DashboardProperties",id:"dashboardproperties",level:2},{value:"DashboardStatsSummary",id:"dashboardstatssummary",level:2},{value:"DashboardUsageAggregation",id:"dashboardusageaggregation",level:2},{value:"DashboardUsageAggregationMetrics",id:"dashboardusageaggregationmetrics",level:2},{value:"DashboardUsageMetrics",id:"dashboardusagemetrics",level:2},{value:"DashboardUsageQueryResult",id:"dashboardusagequeryresult",level:2},{value:"DashboardUsageQueryResultAggregations",id:"dashboardusagequeryresultaggregations",level:2},{value:"DashboardUserUsageCounts",id:"dashboarduserusagecounts",level:2},{value:"DataFlow",id:"dataflow",level:2},{value:"DataFlowDataJobsRelationships",id:"dataflowdatajobsrelationships",level:2},{value:"DataFlowEditableProperties",id:"datafloweditableproperties",level:2},{value:"DataFlowInfo",id:"dataflowinfo",level:2},{value:"DataFlowProperties",id:"dataflowproperties",level:2},{value:"DataHubPolicy",id:"datahubpolicy",level:2},{value:"DataHubRole",id:"datahubrole",level:2},{value:"DataHubView",id:"datahubview",level:2},{value:"DataHubViewDefinition",id:"datahubviewdefinition",level:2},{value:"DataHubViewFilter",id:"datahubviewfilter",level:2},{value:"DataJob",id:"datajob",level:2},{value:"DataJobEditableProperties",id:"datajobeditableproperties",level:2},{value:"DataJobInfo",id:"datajobinfo",level:2},{value:"DataJobInputOutput",id:"datajobinputoutput",level:2},{value:"DataJobProperties",id:"datajobproperties",level:2},{value:"DataPlatform",id:"dataplatform",level:2},{value:"DataPlatformInfo",id:"dataplatforminfo",level:2},{value:"DataPlatformInstance",id:"dataplatforminstance",level:2},{value:"DataPlatformInstanceProperties",id:"dataplatforminstanceproperties",level:2},{value:"DataPlatformProperties",id:"dataplatformproperties",level:2},{value:"DataProcessInstance",id:"dataprocessinstance",level:2},{value:"DataProcessInstanceResult",id:"dataprocessinstanceresult",level:2},{value:"DataProcessInstanceRunResult",id:"dataprocessinstancerunresult",level:2},{value:"DataProcessRunEvent",id:"dataprocessrunevent",level:2},{value:"DataProduct",id:"dataproduct",level:2},{value:"DataProductProperties",id:"dataproductproperties",level:2},{value:"Dataset",id:"dataset",level:2},{value:"DatasetAssertionInfo",id:"datasetassertioninfo",level:2},{value:"DatasetDeprecation",id:"datasetdeprecation",level:2},{value:"DatasetEditableProperties",id:"dataseteditableproperties",level:2},{value:"DatasetFieldProfile",id:"datasetfieldprofile",level:2},{value:"DatasetProfile",id:"datasetprofile",level:2},{value:"DatasetProperties",id:"datasetproperties",level:2},{value:"DatasetStatsSummary",id:"datasetstatssummary",level:2},{value:"DateRange",id:"daterange",level:2},{value:"Deprecation",id:"deprecation",level:2},{value:"Domain",id:"domain",level:2},{value:"DomainAssociation",id:"domainassociation",level:2},{value:"DomainProperties",id:"domainproperties",level:2},{value:"DownstreamEntityRelationships",id:"downstreamentityrelationships",level:2},{value:"EditableSchemaFieldInfo",id:"editableschemafieldinfo",level:2},{value:"EditableSchemaMetadata",id:"editableschemametadata",level:2},{value:"EditableTagProperties",id:"editabletagproperties",level:2},{value:"Embed",id:"embed",level:2},{value:"EntityAssertionsResult",id:"entityassertionsresult",level:2},{value:"EntityCountResult",id:"entitycountresult",level:2},{value:"EntityCountResults",id:"entitycountresults",level:2},{value:"EntityLineageResult",id:"entitylineageresult",level:2},{value:"EntityPath",id:"entitypath",level:2},{value:"EntityPrivileges",id:"entityprivileges",level:2},{value:"EntityProfileConfig",id:"entityprofileconfig",level:2},{value:"EntityProfileParams",id:"entityprofileparams",level:2},{value:"EntityProfilesConfig",id:"entityprofilesconfig",level:2},{value:"EntityRelationship",id:"entityrelationship",level:2},{value:"EntityRelationshipLegacy",id:"entityrelationshiplegacy",level:2},{value:"EntityRelationshipsResult",id:"entityrelationshipsresult",level:2},{value:"EthicalConsiderations",id:"ethicalconsiderations",level:2},{value:"ExecutionRequest",id:"executionrequest",level:2},{value:"ExecutionRequestInput",id:"executionrequestinput",level:2},{value:"ExecutionRequestResult",id:"executionrequestresult",level:2},{value:"ExecutionRequestSource",id:"executionrequestsource",level:2},{value:"FacetFilter",id:"facetfilter",level:2},{value:"FacetMetadata",id:"facetmetadata",level:2},{value:"FeatureFlagsConfig",id:"featureflagsconfig",level:2},{value:"FieldUsageCounts",id:"fieldusagecounts",level:2},{value:"FineGrainedLineage",id:"finegrainedlineage",level:2},{value:"FloatBox",id:"floatbox",level:2},{value:"ForeignKeyConstraint",id:"foreignkeyconstraint",level:2},{value:"FreshnessStats",id:"freshnessstats",level:2},{value:"GetQuickFiltersResult",id:"getquickfiltersresult",level:2},{value:"GetRootGlossaryNodesResult",id:"getrootglossarynodesresult",level:2},{value:"GetRootGlossaryTermsResult",id:"getrootglossarytermsresult",level:2},{value:"GetSchemaBlameResult",id:"getschemablameresult",level:2},{value:"GetSchemaVersionListResult",id:"getschemaversionlistresult",level:2},{value:"GlobalTags",id:"globaltags",level:2},{value:"GlobalViewsSettings",id:"globalviewssettings",level:2},{value:"GlossaryNode",id:"glossarynode",level:2},{value:"GlossaryNodeProperties",id:"glossarynodeproperties",level:2},{value:"GlossaryTerm",id:"glossaryterm",level:2},{value:"GlossaryTermAssociation",id:"glossarytermassociation",level:2},{value:"GlossaryTermInfo",id:"glossaryterminfo",level:2},{value:"GlossaryTermProperties",id:"glossarytermproperties",level:2},{value:"GlossaryTerms",id:"glossaryterms",level:2},{value:"Health",id:"health",level:2},{value:"Highlight",id:"highlight",level:2},{value:"HyperParameterMap",id:"hyperparametermap",level:2},{value:"IdentityManagementConfig",id:"identitymanagementconfig",level:2},{value:"IngestionConfig",id:"ingestionconfig",level:2},{value:"IngestionRun",id:"ingestionrun",level:2},{value:"IngestionSchedule",id:"ingestionschedule",level:2},{value:"IngestionSource",id:"ingestionsource",level:2},{value:"IngestionSourceExecutionRequests",id:"ingestionsourceexecutionrequests",level:2},{value:"InputField",id:"inputfield",level:2},{value:"InputFields",id:"inputfields",level:2},{value:"InstitutionalMemory",id:"institutionalmemory",level:2},{value:"InstitutionalMemoryMetadata",id:"institutionalmemorymetadata",level:2},{value:"IntBox",id:"intbox",level:2},{value:"IntendedUse",id:"intendeduse",level:2},{value:"InviteToken",id:"invitetoken",level:2},{value:"KeyValueSchema",id:"keyvalueschema",level:2},{value:"LineageConfig",id:"lineageconfig",level:2},{value:"LineageRelationship",id:"lineagerelationship",level:2},{value:"LinkParams",id:"linkparams",level:2},{value:"ListAccessTokenResult",id:"listaccesstokenresult",level:2},{value:"ListDomainsResult",id:"listdomainsresult",level:2},{value:"ListGroupsResult",id:"listgroupsresult",level:2},{value:"ListIngestionSourcesResult",id:"listingestionsourcesresult",level:2},{value:"ListOwnershipTypesResult",id:"listownershiptypesresult",level:2},{value:"ListPoliciesResult",id:"listpoliciesresult",level:2},{value:"ListPostsResult",id:"listpostsresult",level:2},{value:"ListQueriesResult",id:"listqueriesresult",level:2},{value:"ListRecommendationsResult",id:"listrecommendationsresult",level:2},{value:"ListRolesResult",id:"listrolesresult",level:2},{value:"ListSecretsResult",id:"listsecretsresult",level:2},{value:"ListTestsResult",id:"listtestsresult",level:2},{value:"ListUsersResult",id:"listusersresult",level:2},{value:"ListViewsResult",id:"listviewsresult",level:2},{value:"ManagedIngestionConfig",id:"managedingestionconfig",level:2},{value:"MatchedField",id:"matchedfield",level:2},{value:"Media",id:"media",level:2},{value:"Metrics",id:"metrics",level:2},{value:"MLFeature",id:"mlfeature",level:2},{value:"MLFeatureEditableProperties",id:"mlfeatureeditableproperties",level:2},{value:"MLFeatureProperties",id:"mlfeatureproperties",level:2},{value:"MLFeatureTable",id:"mlfeaturetable",level:2},{value:"MLFeatureTableEditableProperties",id:"mlfeaturetableeditableproperties",level:2},{value:"MLFeatureTableProperties",id:"mlfeaturetableproperties",level:2},{value:"MLHyperParam",id:"mlhyperparam",level:2},{value:"MLMetric",id:"mlmetric",level:2},{value:"MLModel",id:"mlmodel",level:2},{value:"MLModelEditableProperties",id:"mlmodeleditableproperties",level:2},{value:"MLModelFactorPrompts",id:"mlmodelfactorprompts",level:2},{value:"MLModelFactors",id:"mlmodelfactors",level:2},{value:"MLModelGroup",id:"mlmodelgroup",level:2},{value:"MLModelGroupEditableProperties",id:"mlmodelgroupeditableproperties",level:2},{value:"MLModelGroupProperties",id:"mlmodelgroupproperties",level:2},{value:"MLModelProperties",id:"mlmodelproperties",level:2},{value:"MLPrimaryKey",id:"mlprimarykey",level:2},{value:"MLPrimaryKeyEditableProperties",id:"mlprimarykeyeditableproperties",level:2},{value:"MLPrimaryKeyProperties",id:"mlprimarykeyproperties",level:2},{value:"NamedBar",id:"namedbar",level:2},{value:"NamedLine",id:"namedline",level:2},{value:"Notebook",id:"notebook",level:2},{value:"NotebookCell",id:"notebookcell",level:2},{value:"NotebookContent",id:"notebookcontent",level:2},{value:"NotebookEditableProperties",id:"notebookeditableproperties",level:2},{value:"NotebookInfo",id:"notebookinfo",level:2},{value:"NumericDataPoint",id:"numericdatapoint",level:2},{value:"Operation",id:"operation",level:2},{value:"Origin",id:"origin",level:2},{value:"Owner",id:"owner",level:2},{value:"Ownership",id:"ownership",level:2},{value:"OwnershipSource",id:"ownershipsource",level:2},{value:"OwnershipTypeEntity",id:"ownershiptypeentity",level:2},{value:"OwnershipTypeInfo",id:"ownershiptypeinfo",level:2},{value:"ParentContainersResult",id:"parentcontainersresult",level:2},{value:"ParentNodesResult",id:"parentnodesresult",level:2},{value:"PartitionSpec",id:"partitionspec",level:2},{value:"PlatformPrivileges",id:"platformprivileges",level:2},{value:"PoliciesConfig",id:"policiesconfig",level:2},{value:"Policy",id:"policy",level:2},{value:"PolicyMatchCriterion",id:"policymatchcriterion",level:2},{value:"PolicyMatchCriterionValue",id:"policymatchcriterionvalue",level:2},{value:"PolicyMatchFilter",id:"policymatchfilter",level:2},{value:"Post",id:"post",level:2},{value:"PostContent",id:"postcontent",level:2},{value:"Privilege",id:"privilege",level:2},{value:"Privileges",id:"privileges",level:2},{value:"QuantitativeAnalyses",id:"quantitativeanalyses",level:2},{value:"QueriesTabConfig",id:"queriestabconfig",level:2},{value:"QueryCell",id:"querycell",level:2},{value:"QueryEntity",id:"queryentity",level:2},{value:"QueryProperties",id:"queryproperties",level:2},{value:"QueryStatement",id:"querystatement",level:2},{value:"QuerySubject",id:"querysubject",level:2},{value:"QuickFilter",id:"quickfilter",level:2},{value:"RawAspect",id:"rawaspect",level:2},{value:"RecommendationContent",id:"recommendationcontent",level:2},{value:"RecommendationModule",id:"recommendationmodule",level:2},{value:"RecommendationParams",id:"recommendationparams",level:2},{value:"ResetToken",id:"resettoken",level:2},{value:"ResourceFilter",id:"resourcefilter",level:2},{value:"ResourcePrivileges",id:"resourceprivileges",level:2},{value:"Role",id:"role",level:2},{value:"RoleAssociation",id:"roleassociation",level:2},{value:"RoleProperties",id:"roleproperties",level:2},{value:"RoleUser",id:"roleuser",level:2},{value:"Row",id:"row",level:2},{value:"Schema",id:"schema",level:2},{value:"SchemaField",id:"schemafield",level:2},{value:"SchemaFieldBlame",id:"schemafieldblame",level:2},{value:"SchemaFieldChange",id:"schemafieldchange",level:2},{value:"SchemaFieldEntity",id:"schemafieldentity",level:2},{value:"SchemaFieldRef",id:"schemafieldref",level:2},{value:"SchemaMetadata",id:"schemametadata",level:2},{value:"ScrollAcrossLineageResults",id:"scrollacrosslineageresults",level:2},{value:"ScrollResults",id:"scrollresults",level:2},{value:"SearchAcrossLineageResult",id:"searchacrosslineageresult",level:2},{value:"SearchAcrossLineageResults",id:"searchacrosslineageresults",level:2},{value:"SearchInsight",id:"searchinsight",level:2},{value:"SearchParams",id:"searchparams",level:2},{value:"SearchResult",id:"searchresult",level:2},{value:"SearchResults",id:"searchresults",level:2},{value:"SearchResultsVisualConfig",id:"searchresultsvisualconfig",level:2},{value:"Secret",id:"secret",level:2},{value:"SecretValue",id:"secretvalue",level:2},{value:"SemanticVersionStruct",id:"semanticversionstruct",level:2},{value:"SiblingProperties",id:"siblingproperties",level:2},{value:"SourceCode",id:"sourcecode",level:2},{value:"SourceCodeUrl",id:"sourcecodeurl",level:2},{value:"Status",id:"status",level:2},{value:"StepStateResult",id:"stepstateresult",level:2},{value:"StringBox",id:"stringbox",level:2},{value:"StringMapEntry",id:"stringmapentry",level:2},{value:"StructuredReport",id:"structuredreport",level:2},{value:"SubTypes",id:"subtypes",level:2},{value:"SystemFreshness",id:"systemfreshness",level:2},{value:"TableChart",id:"tablechart",level:2},{value:"TableSchema",id:"tableschema",level:2},{value:"Tag",id:"tag",level:2},{value:"TagAssociation",id:"tagassociation",level:2},{value:"TagProperties",id:"tagproperties",level:2},{value:"TelemetryConfig",id:"telemetryconfig",level:2},{value:"Test",id:"test",level:2},{value:"TestDefinition",id:"testdefinition",level:2},{value:"TestResult",id:"testresult",level:2},{value:"TestResults",id:"testresults",level:2},{value:"TestsConfig",id:"testsconfig",level:2},{value:"TextCell",id:"textcell",level:2},{value:"TimeSeriesChart",id:"timeserieschart",level:2},{value:"TimeWindow",id:"timewindow",level:2},{value:"UpdateStepStateResult",id:"updatestepstateresult",level:2},{value:"UpstreamEntityRelationships",id:"upstreamentityrelationships",level:2},{value:"UsageAggregation",id:"usageaggregation",level:2},{value:"UsageAggregationMetrics",id:"usageaggregationmetrics",level:2},{value:"UsageQueryResult",id:"usagequeryresult",level:2},{value:"UsageQueryResultAggregations",id:"usagequeryresultaggregations",level:2},{value:"UserUsageCounts",id:"userusagecounts",level:2},{value:"VersionedDataset",id:"versioneddataset",level:2},{value:"VersionTag",id:"versiontag",level:2},{value:"ViewProperties",id:"viewproperties",level:2},{value:"ViewsConfig",id:"viewsconfig",level:2},{value:"VisualConfig",id:"visualconfig",level:2}],i={toc:o};function d(t){let{components:l,...e}=t;return(0,r.kt)("wrapper",(0,n.Z)({},i,e,{components:l,mdxType:"MDXLayout"}),(0,r.kt)("h2",{id:"access"},"Access"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"roles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#roleassociation"},(0,r.kt)("code",null,"[RoleAssociation!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"accesstoken"},"AccessToken"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"accessToken",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The access token itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#accesstokenmetadata"},(0,r.kt)("code",null,"AccessTokenMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metadata about the generated token"))))),(0,r.kt)("h2",{id:"accesstokenmetadata"},"AccessTokenMetadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the access token"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique identifier of the token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the token, if it exists."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the token if defined."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actorUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor associated with the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor who created the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when token was generated at."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"expiresAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Time when token will be expired."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"actor"},"Actor"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#roleuser"},(0,r.kt)("code",null,"[RoleUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of users for which the role is provisioned"))))),(0,r.kt)("h2",{id:"actorfilter"},"ActorFilter"),(0,r.kt)("p",null,"The actors that a DataHub Access Policy applies to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of users to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of groups to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"roles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of roles to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceOwners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should return TRUE for owners of a particular resource Only applies to policies of type METADATA, which have a resource associated with them"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceOwnersTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set of OwnershipTypes to apply the policy to (if resourceOwners field is set to True)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resolvedOwnershipTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershiptypeentity"},(0,r.kt)("code",null,"[OwnershipTypeEntity!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set of OwnershipTypes to apply the policy to (if resourceOwners field is set to True), resolved."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allUsers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should apply to all users"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allGroups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should apply to all groups"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resolvedUsers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of users on the Policy, resolved."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resolvedGroups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroup"},(0,r.kt)("code",null,"[CorpGroup!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of groups on the Policy, resolved."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resolvedRoles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubrole"},(0,r.kt)("code",null,"[DataHubRole!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of roles on the Policy, resolved."))))),(0,r.kt)("h2",{id:"aggregateresults"},"AggregateResults"),(0,r.kt)("p",null,"Results returned from aggregateAcrossEntities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))))),(0,r.kt)("h2",{id:"aggregationmetadata"},"AggregationMetadata"),(0,r.kt)("p",null,"Information about the aggregation that can be used for filtering, included the field value and number of results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A particular value of a facet field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of search results containing the value"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity corresponding to the facet field"))))),(0,r.kt)("h2",{id:"analyticschartgroup"},"AnalyticsChartGroup"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"charts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#analyticschart"},(0,r.kt)("code",null,"[AnalyticsChart!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"analyticsconfig"},"AnalyticsConfig"),(0,r.kt)("p",null,"Configurations related to the Analytics Feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Analytics feature is enabled and should be displayed"))))),(0,r.kt)("h2",{id:"appconfig"},"AppConfig"),(0,r.kt)("p",null,"Config loaded at application boot time\nThis configuration dictates the behavior of the UI, such as which features are enabled or disabled"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"appVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"App version"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"authConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#authconfig"},(0,r.kt)("code",null,"AuthConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Auth-related configurations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"analyticsConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#analyticsconfig"},(0,r.kt)("code",null,"AnalyticsConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to the Analytics Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"policiesConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policiesconfig"},(0,r.kt)("code",null,"PoliciesConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to the Policies Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"identityManagementConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#identitymanagementconfig"},(0,r.kt)("code",null,"IdentityManagementConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to the User & Group management"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"managedIngestionConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#managedingestionconfig"},(0,r.kt)("code",null,"ManagedIngestionConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to UI-based ingestion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineageConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#lineageconfig"},(0,r.kt)("code",null,"LineageConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to Lineage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"visualConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#visualconfig"},(0,r.kt)("code",null,"VisualConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to visual appearance, allows styling the UI without rebuilding the bundle"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"telemetryConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#telemetryconfig"},(0,r.kt)("code",null,"TelemetryConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to tracking users in the app"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"testsConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testsconfig"},(0,r.kt)("code",null,"TestsConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to DataHub tests"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#viewsconfig"},(0,r.kt)("code",null,"ViewsConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to DataHub Views"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#featureflagsconfig"},(0,r.kt)("code",null,"FeatureFlagsConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Feature flags telling the UI whether a feature is enabled or not"))))),(0,r.kt)("h2",{id:"aspectrenderspec"},"AspectRenderSpec"),(0,r.kt)("p",null,"Details for the frontend on how the raw aspect should be rendered"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Format the aspect should be displayed in for the UI. Powered by the renderSpec annotation on the aspect model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name to refer to the aspect type by for the UI. Powered by the renderSpec annotation on the aspect model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Field in the aspect payload to index into for rendering."))))),(0,r.kt)("h2",{id:"assertion"},"Assertion"),(0,r.kt)("p",null,"An assertion represents a programmatic validation, check, or test performed periodically against another Entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Assertion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the assertion is evaluated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertioninfo"},(0,r.kt)("code",null,"AssertionInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Details about assertion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runEvents",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionruneventsresult"},(0,r.kt)("code",null,"AssertionRunEventsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Lifecycle events detailing individual runs of this assertion. If startTimeMillis & endTimeMillis are not provided, the most recent events will be returned."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionrunstatus"},(0,r.kt)("code",null,"AssertionRunStatus"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#filterinput"},(0,r.kt)("code",null,"FilterInput"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"assertioninfo"},"AssertionInfo"),(0,r.kt)("p",null,"Type of assertion. Assertion types can evolve to span Datasets, Flows (Pipelines), Models, Features etc."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertiontype"},(0,r.kt)("code",null,"AssertionType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Top-level type of the assertion."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetAssertion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetassertioninfo"},(0,r.kt)("code",null,"DatasetAssertionInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Dataset-specific assertion information"))))),(0,r.kt)("h2",{id:"assertionresult"},"AssertionResult"),(0,r.kt)("p",null,"The result of evaluating an assertion."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionresulttype"},(0,r.kt)("code",null,"AssertionResultType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The final result, e.g. either SUCCESS or FAILURE."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rowCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of rows for evaluated batch"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"missingCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of rows with missing value for evaluated batch"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"unexpectedCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of rows with unexpected value for evaluated batch"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actualAggValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Observed aggregate value for evaluated batch"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"URL where full results are available"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native results / properties of evaluation"))))),(0,r.kt)("h2",{id:"assertionrunevent"},"AssertionRunEvent"),(0,r.kt)("p",null,"An event representing an event in the assertion evaluation lifecycle."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the assertion was evaluated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"assertionUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of assertion which is evaluated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"asserteeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of entity on which the assertion is applicable"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native (platform-specific) identifier for this run"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionrunstatus"},(0,r.kt)("code",null,"AssertionRunStatus!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The status of the assertion run as per this timeseries event."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"batchSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#batchspec"},(0,r.kt)("code",null,"BatchSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Specification of the batch which this run is evaluating"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partitionSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#partitionspec"},(0,r.kt)("code",null,"PartitionSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Information about the partition that was evaluated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runtimeContext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Runtime parameters of evaluation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"result",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionresult"},(0,r.kt)("code",null,"AssertionResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Results of assertion, present if the status is COMPLETE"))))),(0,r.kt)("h2",{id:"assertionruneventsresult"},"AssertionRunEventsResult"),(0,r.kt)("p",null,"Result returned when fetching run events for an assertion."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of run events returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"failed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of failed run events"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"succeeded",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of succeeded run events"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runEvents",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionrunevent"},(0,r.kt)("code",null,"[AssertionRunEvent!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The run events themselves"))))),(0,r.kt)("h2",{id:"assertionstdparameter"},"AssertionStdParameter"),(0,r.kt)("p",null,"Parameter for AssertionStdOperator."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parameter value"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionstdparametertype"},(0,r.kt)("code",null,"AssertionStdParameterType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the parameter"))))),(0,r.kt)("h2",{id:"assertionstdparameters"},"AssertionStdParameters"),(0,r.kt)("p",null,"Parameters for AssertionStdOperators"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionstdparameter"},(0,r.kt)("code",null,"AssertionStdParameter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value parameter of an assertion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"maxValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionstdparameter"},(0,r.kt)("code",null,"AssertionStdParameter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maxValue parameter of an assertion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"minValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionstdparameter"},(0,r.kt)("code",null,"AssertionStdParameter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The minValue parameter of an assertion"))))),(0,r.kt)("h2",{id:"auditstamp"},"AuditStamp"),(0,r.kt)("p",null,"A time stamp along with an optional actor"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"time",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"When the audited action took place"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Who performed the audited action"))))),(0,r.kt)("h2",{id:"authconfig"},"AuthConfig"),(0,r.kt)("p",null,"Configurations related to auth"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tokenAuthEnabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether token-based auth is enabled."))))),(0,r.kt)("h2",{id:"authenticateduser"},"AuthenticatedUser"),(0,r.kt)("p",null,"Information about the currently authenticated user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"corpUser",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user information associated with the authenticated user, including properties used in rendering the profile"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformPrivileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#platformprivileges"},(0,r.kt)("code",null,"PlatformPrivileges!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The privileges assigned to the currently authenticated user, which dictates which parts of the UI they should be able to use"))))),(0,r.kt)("h2",{id:"autocompletemultipleresults"},"AutoCompleteMultipleResults"),(0,r.kt)("p",null,"The results returned on a multi entity autocomplete query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The raw query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"suggestions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#autocompleteresultforentity"},(0,r.kt)("code",null,"[AutoCompleteResultForEntity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The autocompletion suggestions"))))),(0,r.kt)("h2",{id:"autocompleteresultforentity"},"AutoCompleteResultForEntity"),(0,r.kt)("p",null,"An individual auto complete result specific to an individual Metadata Entity Type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"suggestions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The autocompletion results for specified entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of entities to render in autocomplete"))))),(0,r.kt)("h2",{id:"autocompleteresults"},"AutoCompleteResults"),(0,r.kt)("p",null,"The results returned on a single entity autocomplete query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"suggestions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The autocompletion results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of entities to render in autocomplete"))))),(0,r.kt)("h2",{id:"barchart"},"BarChart"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"bars",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#namedbar"},(0,r.kt)("code",null,"[NamedBar!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"barsegment"},"BarSegment"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"label",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"basedata"},"BaseData"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataset",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Dataset used for the Training or Evaluation of the MLModel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"motivation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Motivation to pick these datasets"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"preProcessing",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Details of Data Proprocessing"))))),(0,r.kt)("h2",{id:"batchgetstepstatesresult"},"BatchGetStepStatesResult"),(0,r.kt)("p",null,"Result returned when fetching step state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"results",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stepstateresult"},(0,r.kt)("code",null,"[StepStateResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The step states"))))),(0,r.kt)("h2",{id:"batchspec"},"BatchSpec"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeBatchId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native identifier as specified by the system operating on the batch."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A query that identifies a batch of data"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Any limit to the number of rows in the batch, if applied"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the Batch"))))),(0,r.kt)("h2",{id:"batchupdatestepstatesresult"},"BatchUpdateStepStatesResult"),(0,r.kt)("p",null,"Result returned when fetching step state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"results",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#updatestepstateresult"},(0,r.kt)("code",null,"[UpdateStepStateResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Results for each step"))))),(0,r.kt)("h2",{id:"booleanbox"},"BooleanBox"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"booleanValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"browsepath"},"BrowsePath"),(0,r.kt)("p",null,"A hierarchical entity path"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The components of the browse path"))))),(0,r.kt)("h2",{id:"browsepathentry"},"BrowsePathEntry"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The path name of a group of browse results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional entity associated with this browse entry. This will usually be a container entity. If this entity is not populated, the name must be used."))))),(0,r.kt)("h2",{id:"browsepathv2"},"BrowsePathV2"),(0,r.kt)("p",null,"A hierarchical entity path V2"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathentry"},(0,r.kt)("code",null,"[BrowsePathEntry!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The components of the browse path"))))),(0,r.kt)("h2",{id:"browseresultgroup"},"BrowseResultGroup"),(0,r.kt)("p",null,"A group of Entities under a given browse path"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The path name of a group of browse results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities within the group"))))),(0,r.kt)("h2",{id:"browseresultgroupv2"},"BrowseResultGroupV2"),(0,r.kt)("p",null,"A group of Entities under a given browse path"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The path name of a group of browse results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional entity associated with this browse group. This will usually be a container entity. If this entity is not populated, the name must be used."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities within the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hasSubGroups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not there are any more groups underneath this group"))))),(0,r.kt)("h2",{id:"browseresultmetadata"},"BrowseResultMetadata"),(0,r.kt)("p",null,"Metadata about the Browse Paths response"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The provided path"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"totalNumEntities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of entities under the provided browse path"))))),(0,r.kt)("h2",{id:"browseresults"},"BrowseResults"),(0,r.kt)("p",null,"The results of a browse path traversal query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browseresultgroup"},(0,r.kt)("code",null,"[BrowseResultGroup!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The groups present at the provided browse path"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of browse results under the path with filters applied"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browseresultmetadata"},(0,r.kt)("code",null,"BrowseResultMetadata!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metadata containing resulting browse groups"))))),(0,r.kt)("h2",{id:"browseresultsv2"},"BrowseResultsV2"),(0,r.kt)("p",null,"The results of a browse path V2 traversal query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browseresultgroupv2"},(0,r.kt)("code",null,"[BrowseResultGroupV2!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The groups present at the provided browse path V2"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of groups included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of browse groups under the path with filters applied"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browseresultmetadata"},(0,r.kt)("code",null,"BrowseResultMetadata!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metadata containing resulting browse groups"))))),(0,r.kt)("h2",{id:"caveatdetails"},"CaveatDetails"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"needsFurtherTesting",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Did the results suggest any further testing"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"caveatDescription",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Caveat Description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupsNotRepresented",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Relevant groups that were not represented in the evaluation dataset"))))),(0,r.kt)("h2",{id:"caveatsandrecommendations"},"CaveatsAndRecommendations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"caveats",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#caveatdetails"},(0,r.kt)("code",null,"CaveatDetails"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Caveats on using this MLModel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"recommendations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recommendations on where this MLModel should be used"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"idealDatasetCharacteristics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ideal characteristics of an evaluation dataset for this MLModel"))))),(0,r.kt)("h2",{id:"cell"},"Cell"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"linkParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#linkparams"},(0,r.kt)("code",null,"LinkParams"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"changeauditstamps"},"ChangeAuditStamps"),(0,r.kt)("p",null,"Captures information about who created/last modified/deleted the entity and when"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion"))))),(0,r.kt)("h2",{id:"chart"},"Chart"),(0,r.kt)("p",null,"A Chart Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parent container in which the entity resides"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tool",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The chart tool name Note that this field will soon be deprecated in favor a unified notion of Data Platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"chartId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An id unique within the charting tool"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartproperties"},(0,r.kt)("code",null,"ChartProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#charteditableproperties"},(0,r.kt)("code",null,"ChartEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartquery"},(0,r.kt)("code",null,"ChartQuery"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Info about the query which is used to render the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"embed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#embed"},(0,r.kt)("code",null,"Embed"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Embed information about the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statsSummary",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartstatssummary"},(0,r.kt)("code",null,"ChartStatsSummary"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Not yet implemented."),(0,r.kt)("p",null,"Experimental - Summary operational & usage statistics about a Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the chart. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartinfo"},(0,r.kt)("code",null,"ChartInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only information about the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#charteditableproperties"},(0,r.kt)("code",null,"ChartEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use editableProperties field instead Additional read write information about the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags instead The structured tags associated with the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the chart is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#inputfields"},(0,r.kt)("code",null,"InputFields"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Input fields to power the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"chartcell"},"ChartCell"),(0,r.kt)("p",null,"A Notebook cell which contains chart as content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellTitle",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the cell"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id for the cell."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeAuditStamps",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#changeauditstamps"},(0,r.kt)("code",null,"ChangeAuditStamps"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who created/last modified/deleted this TextCell and when"))))),(0,r.kt)("h2",{id:"charteditableproperties"},"ChartEditableProperties"),(0,r.kt)("p",null,"Chart properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Chart"))))),(0,r.kt)("h2",{id:"chartinfo"},"ChartInfo"),(0,r.kt)("p",null,"Deprecated, use ChartProperties instead\nAdditional read only information about the chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship Consumes instead Data sources for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#charttype"},(0,r.kt)("code",null,"ChartType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesslevel"},(0,r.kt)("code",null,"AccessLevel"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastRefreshed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this chart last refreshed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification of this chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion of this chart"))))),(0,r.kt)("h2",{id:"chartproperties"},"ChartProperties"),(0,r.kt)("p",null,"Additional read only properties about the chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#charttype"},(0,r.kt)("code",null,"ChartType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesslevel"},(0,r.kt)("code",null,"AccessLevel"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastRefreshed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this chart last refreshed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification of this chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion of this chart"))))),(0,r.kt)("h2",{id:"chartquery"},"ChartQuery"),(0,r.kt)("p",null,"The query that was used to populate a Chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"rawQuery",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw query to build a chart from input datasets"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#chartquerytype"},(0,r.kt)("code",null,"ChartQueryType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the chart query"))))),(0,r.kt)("h2",{id:"chartstatssummary"},"ChartStatsSummary"),(0,r.kt)("p",null,"Experimental - subject to change. A summary of usage metrics about a Chart."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total view count for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The view count in the last 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique user count in the past 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"topUsersLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The top users in the past 30 days"))))),(0,r.kt)("h2",{id:"container"},"Container"),(0,r.kt)("p",null,"A container of other Metadata Entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fetch an Entity Container by primary key (urn)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#containerproperties"},(0,r.kt)("code",null,"ContainerProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read-only properties that originate in the source data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#containereditableproperties"},(0,r.kt)("code",null,"ContainerEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read-write properties that originate in DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,'Sub types of the container, e.g. "Database" etc'))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresults"},(0,r.kt)("code",null,"SearchResults"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Children entities inside of the Container"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#containerentitiesinput"},(0,r.kt)("code",null,"ContainerEntitiesInput"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"containereditableproperties"},"ContainerEditableProperties"),(0,r.kt)("p",null,"Read-write properties that originate in DataHub"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"DataHub description of the Container"))))),(0,r.kt)("h2",{id:"containerproperties"},"ContainerProperties"),(0,r.kt)("p",null,"Read-only properties that originate in the source data platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the Container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"System description of the Container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the Container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the Container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"qualifiedName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fully-qualified name of the Container"))))),(0,r.kt)("h2",{id:"contentparams"},"ContentParams"),(0,r.kt)("p",null,"Params about the recommended content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of entities corresponding to the recommended content"))))),(0,r.kt)("h2",{id:"corpgroup"},"CorpGroup"),(0,r.kt)("p",null,"A DataHub Group entity, which represents a Person on the Metadata Entity Graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Group name eg wherehows dev, ask_metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the Corp Group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroupproperties"},(0,r.kt)("code",null,"CorpGroupProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroupeditableproperties"},(0,r.kt)("code",null,"CorpGroupEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#origin"},(0,r.kt)("code",null,"Origin"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Origin info about this group."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroupinfo"},(0,r.kt)("code",null,"CorpGroupInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only info about the group"))))),(0,r.kt)("h2",{id:"corpgroupeditableproperties"},"CorpGroupEditableProperties"),(0,r.kt)("p",null,"Additional read write properties about a group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"DataHub description of the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Slack handle for the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address for the group"))))),(0,r.kt)("h2",{id:"corpgroupinfo"},"CorpGroupInfo"),(0,r.kt)("p",null,"Deprecated, use CorpUserProperties instead\nAdditional read only info about a group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name to display when rendering the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description provided for the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"email of this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"admins",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, do not use owners of this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"members",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship IsMemberOfGroup instead List of ldap urn in this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, do not use List of groups urns in this group"))))),(0,r.kt)("h2",{id:"corpgroupproperties"},"CorpGroupProperties"),(0,r.kt)("p",null,"Additional read only properties about a group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"display name of this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description provided for the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"email of this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Slack handle for the group"))))),(0,r.kt)("h2",{id:"corpuser"},"CorpUser"),(0,r.kt)("p",null,"A DataHub User entity, which represents a Person on the Metadata Entity Graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"username",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A username associated with the user This uniquely identifies the user within DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuserproperties"},(0,r.kt)("code",null,"CorpUserProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the corp user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpusereditableproperties"},(0,r.kt)("code",null,"CorpUserEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read write properties about the corp user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#corpuserstatus"},(0,r.kt)("code",null,"CorpUserStatus"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The status of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"isNativeUser",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this user is a native DataHub user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuserinfo"},(0,r.kt)("code",null,"CorpUserInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only info about the corp user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpusereditableinfo"},(0,r.kt)("code",null,"CorpUserEditableInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use editableProperties field instead Read write info about the corp user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use the tags field instead The structured tags associated with the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"settings",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpusersettings"},(0,r.kt)("code",null,"CorpUserSettings"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Settings that a user can customize through the datahub ui"))))),(0,r.kt)("h2",{id:"corpuserappearancesettings"},"CorpUserAppearanceSettings"),(0,r.kt)("p",null,"Settings that control look and feel of the DataHub UI for the user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"showSimplifiedHomepage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flag whether the user should see a homepage with only datasets, charts & dashboards. Intended for users who have less operational use cases for the datahub tool."))))),(0,r.kt)("h2",{id:"corpusereditableinfo"},"CorpUserEditableInfo"),(0,r.kt)("p",null,"Deprecated, use CorpUserEditableProperties instead\nAdditional read write info about a user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aboutMe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"About me section of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"teams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Teams that the user belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skills",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Skills that the user possesses"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"pictureLink",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A URL which points to a picture which user wants to set as a profile photo"))))),(0,r.kt)("h2",{id:"corpusereditableproperties"},"CorpUserEditableProperties"),(0,r.kt)("p",null,"Additional read write properties about a user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aboutMe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"About me section of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"teams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Teams that the user belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skills",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Skills that the user possesses"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"pictureLink",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A URL which points to a picture which user wants to set as a profile photo"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The slack handle of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"phone",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Phone number for the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address for the user"))))),(0,r.kt)("h2",{id:"corpuserinfo"},"CorpUserInfo"),(0,r.kt)("p",null,"Deprecated, use CorpUserProperties instead\nAdditional read only info about a user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"active",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is active"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manager",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Direct manager of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"departmentId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"department id the user belong to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"departmentName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"department name this user belong to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"firstName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"first name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"last name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fullName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Common name of this user, format is firstName plus lastName"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"countryCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"two uppercase letters country code"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the ldap"))))),(0,r.kt)("h2",{id:"corpuserproperties"},"CorpUserProperties"),(0,r.kt)("p",null,"Additional read only properties about a user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"active",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is active"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manager",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Direct manager of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"departmentId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"department id the user belong to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"departmentName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"department name this user belong to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"firstName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"first name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"last name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fullName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Common name of this user, format is firstName plus lastName"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"countryCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"two uppercase letters country code"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the ldap"))))),(0,r.kt)("h2",{id:"corpusersettings"},"CorpUserSettings"),(0,r.kt)("p",null,"Settings that a user can customize through the datahub ui"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"appearance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuserappearancesettings"},(0,r.kt)("code",null,"CorpUserAppearanceSettings"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Settings that control look and feel of the DataHub UI for the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"views",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuserviewssettings"},(0,r.kt)("code",null,"CorpUserViewsSettings"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Settings related to the DataHub Views feature"))))),(0,r.kt)("h2",{id:"corpuserviewssettings"},"CorpUserViewsSettings"),(0,r.kt)("p",null,"Settings related to the Views feature of DataHub."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultView",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubview"},(0,r.kt)("code",null,"DataHubView"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The default view for the User."))))),(0,r.kt)("h2",{id:"cost"},"Cost"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"costType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#costtype"},(0,r.kt)("code",null,"CostType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Type of Cost Code"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"costValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#costvalue"},(0,r.kt)("code",null,"CostValue!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Code to which the Cost of this entity should be attributed to ie organizational cost ID"))))),(0,r.kt)("h2",{id:"costvalue"},"CostValue"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"costId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Organizational Cost ID"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"costCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Organizational Cost Code"))))),(0,r.kt)("h2",{id:"custompropertiesentry"},"CustomPropertiesEntry"),(0,r.kt)("p",null,"An entry in a custom properties map represented as a tuple"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The key of the map entry"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value fo the map entry"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the entity this property came from for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"dashboard"},"Dashboard"),(0,r.kt)("p",null,"A Dashboard Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parent container in which the entity resides"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tool",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The dashboard tool name Note that this will soon be deprecated in favor of a standardized notion of Data Platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dashboardId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An id unique within the dashboard tool"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardproperties"},(0,r.kt)("code",null,"DashboardProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardeditableproperties"},(0,r.kt)("code",null,"DashboardEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"embed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#embed"},(0,r.kt)("code",null,"Embed"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Embed information about the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the dashboard. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"usageStats",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusagequeryresult"},(0,r.kt)("code",null,"DashboardUsageQueryResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental (Subject to breaking change) -- Statistics about how this Dashboard is used"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statsSummary",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardstatssummary"},(0,r.kt)("code",null,"DashboardStatsSummary"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental - Summary operational & usage statistics about a Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardinfo"},(0,r.kt)("code",null,"DashboardInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only information about the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardeditableproperties"},(0,r.kt)("code",null,"DashboardEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use editableProperties instead Additional read write properties about the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead The structured tags associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the dashboard is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#inputfields"},(0,r.kt)("code",null,"InputFields"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Input fields that power all the charts in the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Sub Types of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"dashboardeditableproperties"},"DashboardEditableProperties"),(0,r.kt)("p",null,"Dashboard properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Dashboard"))))),(0,r.kt)("h2",{id:"dashboardinfo"},"DashboardInfo"),(0,r.kt)("p",null,"Deprecated, use DashboardProperties instead\nAdditional read only info about a Dashboard"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"charts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chart"},(0,r.kt)("code",null,"[Chart!]!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship Contains instead Charts that comprise the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesslevel"},(0,r.kt)("code",null,"AccessLevel"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the dashboard Note that this will soon be deprecated for low usage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastRefreshed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this dashboard last refreshed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification of this dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion of this dashboard"))))),(0,r.kt)("h2",{id:"dashboardproperties"},"DashboardProperties"),(0,r.kt)("p",null,"Additional read only properties about a Dashboard"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesslevel"},(0,r.kt)("code",null,"AccessLevel"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the dashboard Note that this will soon be deprecated for low usage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastRefreshed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this dashboard last refreshed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification of this dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion of this dashboard"))))),(0,r.kt)("h2",{id:"dashboardstatssummary"},"DashboardStatsSummary"),(0,r.kt)("p",null,"Experimental - subject to change. A summary of usage metrics about a Dashboard."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total view count for the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The view count in the last 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique user count in the past 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"topUsersLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The top users in the past 30 days"))))),(0,r.kt)("h2",{id:"dashboardusageaggregation"},"DashboardUsageAggregation"),(0,r.kt)("p",null,"An aggregation of Dashboard usage statistics"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"bucket",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time window start time"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"duration",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#windowduration"},(0,r.kt)("code",null,"WindowDuration"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time window span"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resource urn associated with the usage information, eg a Dashboard urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusageaggregationmetrics"},(0,r.kt)("code",null,"DashboardUsageAggregationMetrics"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The rolled up usage metrics"))))),(0,r.kt)("h2",{id:"dashboardusageaggregationmetrics"},"DashboardUsageAggregationMetrics"),(0,r.kt)("p",null,"Rolled up metrics about Dashboard usage over time"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique number of dashboard users within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard views within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard executions within the time range"))))),(0,r.kt)("h2",{id:"dashboardusagemetrics"},"DashboardUsageMetrics"),(0,r.kt)("p",null,"A set of absolute dashboard usage metrics"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the metrics were reported"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"favoritesCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of times dashboard has been favorited FIXME: Qualifies as Popularity Metric rather than Usage Metric?"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard views"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard execution"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastViewed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this dashboard was last viewed"))))),(0,r.kt)("h2",{id:"dashboardusagequeryresult"},"DashboardUsageQueryResult"),(0,r.kt)("p",null,"The result of a dashboard usage query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"buckets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusageaggregation"},(0,r.kt)("code",null,"[DashboardUsageAggregation]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of relevant time windows for use in displaying usage statistics"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aggregations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusagequeryresultaggregations"},(0,r.kt)("code",null,"DashboardUsageQueryResultAggregations"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of rolled up aggregations about the dashboard usage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusagemetrics"},(0,r.kt)("code",null,"[DashboardUsageMetrics!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of absolute dashboard usage metrics"))))),(0,r.kt)("h2",{id:"dashboardusagequeryresultaggregations"},"DashboardUsageQueryResultAggregations"),(0,r.kt)("p",null,"A set of rolled up aggregations about the Dashboard usage"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The count of unique Dashboard users within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboarduserusagecounts"},(0,r.kt)("code",null,"[DashboardUserUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific per user usage counts within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard views within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard executions within the queried time range"))))),(0,r.kt)("h2",{id:"dashboarduserusagecounts"},"DashboardUserUsageCounts"),(0,r.kt)("p",null,"Information about individual user usage of a Dashboard"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"user",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user of the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"number of times dashboard has been viewed by the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"number of dashboard executions by the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"usageCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Normalized numeric metric representing user's dashboard usage Higher value represents more usage"))))),(0,r.kt)("h2",{id:"dataflow"},"DataFlow"),(0,r.kt)("p",null,"A Data Flow Metadata Entity, representing an set of pipelined Data Job or Tasks required\nto produce an output Dataset Also known as a Data Pipeline"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of a Data Flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orchestrator",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Workflow orchestrator ei Azkaban, Airflow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"flowId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cluster",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Cluster of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataflowproperties"},(0,r.kt)("code",null,"DataFlowProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about a Data flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datafloweditableproperties"},(0,r.kt)("code",null,"DataFlowEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about a Data Flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the dataflow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the dataflow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the Data Flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the DataFlow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the data flow. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataflowinfo"},(0,r.kt)("code",null,"DataFlowInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only information about a Data flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead The structured tags associated with the dataflow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataJobs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataflowdatajobsrelationships"},(0,r.kt)("code",null,"DataFlowDataJobsRelationships"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship IsPartOf instead Data Jobs"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the datflow is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"dataflowdatajobsrelationships"},"DataFlowDataJobsRelationships"),(0,r.kt)("p",null,"Deprecated, use relationships query instead"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshiplegacy"},(0,r.kt)("code",null,"[EntityRelationshipLegacy]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"datafloweditableproperties"},"DataFlowEditableProperties"),(0,r.kt)("p",null,"Data Flow properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Data Flow"))))),(0,r.kt)("h2",{id:"dataflowinfo"},"DataFlowInfo"),(0,r.kt)("p",null,"Deprecated, use DataFlowProperties instead\nAdditional read only properties about a Data Flow aka Pipeline"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"project",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional project or namespace associated with the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the DataFlow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))))),(0,r.kt)("h2",{id:"dataflowproperties"},"DataFlowProperties"),(0,r.kt)("p",null,"Additional read only properties about a Data Flow aka Pipeline"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"project",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional project or namespace associated with the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the DataFlow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))))),(0,r.kt)("h2",{id:"datahubpolicy"},"DataHubPolicy"),(0,r.kt)("p",null,"An DataHub Platform Access Policy - Policies determine who can perform what actions against which resources on the platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the Role"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"policyType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policytype"},(0,r.kt)("code",null,"PolicyType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"state",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policystate"},(0,r.kt)("code",null,"PolicyState!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The present state of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#resourcefilter"},(0,r.kt)("code",null,"ResourceFilter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resources that the Policy privileges apply to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The privileges that the Policy grants"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#actorfilter"},(0,r.kt)("code",null,"ActorFilter!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actors that the Policy grants privileges to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editable",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Policy is editable, ie system policies, or not"))))),(0,r.kt)("h2",{id:"datahubrole"},"DataHubRole"),(0,r.kt)("p",null,"A DataHub Role is a high-level abstraction on top of Policies that dictates what actions users can take."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the role"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the Role"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Role."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Role"))))),(0,r.kt)("h2",{id:"datahubview"},"DataHubView"),(0,r.kt)("p",null,"An DataHub View - Filters that are applied across the application automatically."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#datahubviewtype"},(0,r.kt)("code",null,"DataHubViewType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubviewdefinition"},(0,r.kt)("code",null,"DataHubViewDefinition!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The definition of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the View"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"datahubviewdefinition"},"DataHubViewDefinition"),(0,r.kt)("p",null,"An DataHub View Definition"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters to apply. If left empty, then ALL entity types are in scope."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubviewfilter"},(0,r.kt)("code",null,"DataHubViewFilter!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters to apply. If left empty, then no filters will be applied."))))),(0,r.kt)("h2",{id:"datahubviewfilter"},"DataHubViewFilter"),(0,r.kt)("p",null,"A DataHub View Filter. Note that"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"operator",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#logicaloperator"},(0,r.kt)("code",null,"LogicalOperator!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The operator used to combine the filters."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetfilter"},(0,r.kt)("code",null,"[FacetFilter!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters combined using the operator. If left empty, then no filters will be applied."))))),(0,r.kt)("h2",{id:"datajob"},"DataJob"),(0,r.kt)("p",null,"A Data Job Metadata Entity, representing an individual unit of computation or Task\nto produce an output Dataset Always part of a parent Data Flow aka Pipeline"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Data Job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataFlow",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataflow"},(0,r.kt)("code",null,"DataFlow"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use relationship IsPartOf instead The associated data flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"jobId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajobproperties"},(0,r.kt)("code",null,"DataJobProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties associated with the Data Job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajobeditableproperties"},(0,r.kt)("code",null,"DataJobEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties associated with the Data Job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the Data Flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Data Job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the data job. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajobinfo"},(0,r.kt)("code",null,"DataJobInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only information about a Data processing job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputOutput",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajobinputoutput"},(0,r.kt)("code",null,"DataJobInputOutput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Information about the inputs and outputs of a Data processing job including column-level lineage."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use the tags field instead The structured tags associated with the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessinstanceresult"},(0,r.kt)("code",null,"DataProcessInstanceResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"History of runs of this task"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"datajobeditableproperties"},"DataJobEditableProperties"),(0,r.kt)("p",null,"Data Job properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Data Job"))))),(0,r.kt)("h2",{id:"datajobinfo"},"DataJobInfo"),(0,r.kt)("p",null,"Deprecated, use DataJobProperties instead\nAdditional read only information about a Data Job aka Task"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Job display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Job description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))))),(0,r.kt)("h2",{id:"datajobinputoutput"},"DataJobInputOutput"),(0,r.kt)("p",null,"The lineage information for a DataJob\nTODO Rename this to align with other Lineage models"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputDatasets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship Consumes instead Input datasets produced by the data job during processing"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"outputDatasets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship Produces instead Output datasets produced by the data job during processing"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputDatajobs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajob"},(0,r.kt)("code",null,"[DataJob!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship DownstreamOf instead Input datajobs that this data job depends on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fineGrainedLineages",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#finegrainedlineage"},(0,r.kt)("code",null,"[FineGrainedLineage!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Lineage information for the column-level. Includes a list of objects detailing which columns are upstream and which are downstream of each other. The upstream and downstream columns are from datasets."))))),(0,r.kt)("h2",{id:"datajobproperties"},"DataJobProperties"),(0,r.kt)("p",null,"Additional read only properties about a Data Job aka Task"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Job display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Job description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))))),(0,r.kt)("h2",{id:"dataplatform"},"DataPlatform"),(0,r.kt)("p",null,"A Data Platform represents a specific third party Data System or Tool Examples include\nwarehouses like Snowflake, orchestrators like Airflow, and dashboarding tools like Looker"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatformproperties"},(0,r.kt)("code",null,"DataPlatformProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties associated with a data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties displayName instead Display name of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminfo"},(0,r.kt)("code",null,"DataPlatformInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional properties associated with a data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"dataplatforminfo"},"DataPlatformInfo"),(0,r.kt)("p",null,"Deprecated, use DataPlatformProperties instead\nAdditional read only information about a Data Platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#platformtype"},(0,r.kt)("code",null,"PlatformType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The platform category"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name associated with the platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetNameDelimiter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The delimiter in the dataset names on the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"logoUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A logo URL associated with the platform"))))),(0,r.kt)("h2",{id:"dataplatforminstance"},"DataPlatformInstance"),(0,r.kt)("p",null,"A Data Platform instance represents an instance of a 3rd party platform like Looker, Snowflake, etc."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"instanceId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The platform instance id"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstanceproperties"},(0,r.kt)("code",null,"DataPlatformInstanceProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties associated with a data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the container"))))),(0,r.kt)("h2",{id:"dataplatforminstanceproperties"},"DataPlatformInstanceProperties"),(0,r.kt)("p",null,"Additional read only properties about a DataPlatformInstance"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the data platform instance used in display"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read only technical description for the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the data platform instance"))))),(0,r.kt)("h2",{id:"dataplatformproperties"},"DataPlatformProperties"),(0,r.kt)("p",null,"Additional read only properties about a Data Platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#platformtype"},(0,r.kt)("code",null,"PlatformType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The platform category"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name associated with the platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetNameDelimiter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The delimiter in the dataset names on the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"logoUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A logo URL associated with the platform"))))),(0,r.kt)("h2",{id:"dataprocessinstance"},"DataProcessInstance"),(0,r.kt)("p",null,"A DataProcessInstance Metadata Entity, representing an individual run of\na task or datajob."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the DataProcessInstance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"state",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessrunevent"},(0,r.kt)("code",null,"[DataProcessRunEvent]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The history of state changes for the run"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"When the run was kicked off"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the data process"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity. In the UI, used for inputs, outputs and parentTemplate"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The link to view the task run in the source system"))))),(0,r.kt)("h2",{id:"dataprocessinstanceresult"},"DataProcessInstanceResult"),(0,r.kt)("p",null,"Data Process instances that match the provided query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of run events returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessinstance"},(0,r.kt)("code",null,"[DataProcessInstance]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The data process instances that produced or consumed the entity"))))),(0,r.kt)("h2",{id:"dataprocessinstancerunresult"},"DataProcessInstanceRunResult"),(0,r.kt)("p",null,"the result of a run, part of the run state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resultType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#dataprocessinstancerunresulttype"},(0,r.kt)("code",null,"DataProcessInstanceRunResultType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The outcome of the run"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeResultType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The outcome of the run in the data platforms native language"))))),(0,r.kt)("h2",{id:"dataprocessrunevent"},"DataProcessRunEvent"),(0,r.kt)("p",null,"A state change event in the data process instance lifecycle"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#dataprocessrunstatus"},(0,r.kt)("code",null,"DataProcessRunStatus"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The status of the data process instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"attempt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The try number that this instance run is in"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"result",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessinstancerunresult"},(0,r.kt)("code",null,"DataProcessInstanceRunResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The result of a run"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp associated with the run event in milliseconds"))))),(0,r.kt)("h2",{id:"dataproduct"},"DataProduct"),(0,r.kt)("p",null,"A Data Product, or a logical grouping of Metadata Entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataproductproperties"},(0,r.kt)("code",null,"DataProductProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about a Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresults"},(0,r.kt)("code",null,"SearchResults"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Children entities inside of the DataProduct"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchacrossentitiesinput"},(0,r.kt)("code",null,"SearchAcrossEntitiesInput"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching Data Product"))))),(0,r.kt)("h2",{id:"dataproductproperties"},"DataProductProperties"),(0,r.kt)("p",null,"Properties about a domain"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL for the DataProduct (most likely GitHub repo where Data Products are managed as code)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"numAssets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of children entities inside of the Data Product. This number includes soft deleted entities."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the Data Product"))))),(0,r.kt)("h2",{id:"dataset"},"Dataset"),(0,r.kt)("p",null,"A Dataset entity, which encompasses Relational Tables, Document store collections, streaming topics, and other sets of data having an independent lifecycle"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the dataset is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parent container in which the entity resides"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique guid for dataset No longer to be used as the Dataset display name. Use properties.name instead"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetproperties"},(0,r.kt)("code",null,"DatasetProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of read only properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataseteditableproperties"},(0,r.kt)("code",null,"DatasetEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemametadata"},(0,r.kt)("code",null,"SchemaMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema metadata of the dataset, available by version number"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#editableschemametadata"},(0,r.kt)("code",null,"EditableSchemaMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Editable schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"embed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#embed"},(0,r.kt)("code",null,"Embed"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Embed information about the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#access"},(0,r.kt)("code",null,"Access"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Roles and the properties to access the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"usageStats",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#usagequeryresult"},(0,r.kt)("code",null,"UsageQueryResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Statistics about how this Dataset is used The first parameter, ",(0,r.kt)("code",null,"resource"),", is deprecated and no longer needs to be provided"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"range",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#timerange"},(0,r.kt)("code",null,"TimeRange"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statsSummary",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetstatssummary"},(0,r.kt)("code",null,"DatasetStatsSummary"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental - Summary operational & usage statistics about a Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetProfiles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetprofile"},(0,r.kt)("code",null,"[DatasetProfile!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Profile Stats resource that retrieves the events in a previous unit of time in descending order If no start or end time are provided, the most recent events will be returned"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#filterinput"},(0,r.kt)("code",null,"FilterInput"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"operations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#operation"},(0,r.kt)("code",null,"[Operation!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Operational events for an entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#filterinput"},(0,r.kt)("code",null,"FilterInput"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"assertions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityassertionsresult"},(0,r.kt)("code",null,"EntityAssertionsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Assertions associated with the Dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the dataset. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"health",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#health"},(0,r.kt)("code",null,"[Health!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental! The resolved health statuses of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schema"},(0,r.kt)("code",null,"Schema"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `schemaMetadata`"),(0,r.kt)("p",null,"Schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead External URL associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, see the properties field instead Environment in which the dataset belongs to or where it was generated Note that this field will soon be deprecated in favor of a more standardized concept of Environment"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use the properties field instead Read only technical description for dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformNativeType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#platformnativetype"},(0,r.kt)("code",null,"PlatformNativeType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, do not use this field The logical type of the dataset ie table, stream, etc"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uri",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties instead Native Dataset Uri Uri should not include any environment specific properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead The structured tags associated with the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Sub Types that this entity implements"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#viewproperties"},(0,r.kt)("code",null,"ViewProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"View related properties. Only relevant if subtypes field contains view."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aspects",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#rawaspect"},(0,r.kt)("code",null,"[RawAspect!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental API. For fetching extra entities that do not have custom UI code yet"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#aspectparams"},(0,r.kt)("code",null,"AspectParams"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessinstanceresult"},(0,r.kt)("code",null,"DataProcessInstanceResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"History of datajob runs that either produced or consumed this dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#relationshipdirection"},(0,r.kt)("code",null,"RelationshipDirection!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"siblings",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#siblingproperties"},(0,r.kt)("code",null,"SiblingProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metadata about the datasets siblings"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fineGrainedLineages",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#finegrainedlineage"},(0,r.kt)("code",null,"[FineGrainedLineage!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Lineage information for the column-level. Includes a list of objects detailing which columns are upstream and which are downstream of each other. The upstream and downstream columns are from datasets."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"testResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testresults"},(0,r.kt)("code",null,"TestResults"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The results of evaluating tests"))))),(0,r.kt)("h2",{id:"datasetassertioninfo"},"DatasetAssertionInfo"),(0,r.kt)("p",null,"Detailed information about a Dataset Assertion"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the dataset that the assertion is related to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"scope",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#datasetassertionscope"},(0,r.kt)("code",null,"DatasetAssertionScope!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The scope of the Dataset assertion."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldref"},(0,r.kt)("code",null,"[SchemaFieldRef!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The fields serving as input to the assertion. Empty if there are none."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aggregation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionstdaggregation"},(0,r.kt)("code",null,"AssertionStdAggregation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized assertion operator"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"operator",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionstdoperator"},(0,r.kt)("code",null,"AssertionStdOperator!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized assertion operator"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parameters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionstdparameters"},(0,r.kt)("code",null,"AssertionStdParameters"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standard parameters required for the assertion. e.g. min_value, max_value, value, columns"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native operator for the assertion. For Great Expectations, this will contain the original expectation name."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeParameters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native parameters required for the assertion."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"logic",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Logic comprising a raw, unstructured assertion."))))),(0,r.kt)("h2",{id:"datasetdeprecation"},"DatasetDeprecation"),(0,r.kt)("p",null,"Deprecated, use Deprecation instead\nInformation about Dataset deprecation status\nNote that this model will soon be migrated to a more general purpose Entity status"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the dataset has been deprecated by owner"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time user plan to decommission this dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional information about the dataset deprecation plan"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user who will be credited for modifying this deprecation content"))))),(0,r.kt)("h2",{id:"dataseteditableproperties"},"DatasetEditableProperties"),(0,r.kt)("p",null,"Dataset properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Dataset"))))),(0,r.kt)("h2",{id:"datasetfieldprofile"},"DatasetFieldProfile"),(0,r.kt)("p",null,"An individual Dataset Field Profile"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standardized path of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique value count for the field across the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueProportion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The proportion of rows with unique values across the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nullCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of NULL row values across the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nullProportion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The proportion of rows with NULL values across the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"min",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The min value for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"max",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The max value for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mean",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The mean value for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"median",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The median value for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"stdev",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard deviation for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sampleValues",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of sample values for the field"))))),(0,r.kt)("h2",{id:"datasetprofile"},"DatasetProfile"),(0,r.kt)("p",null,"A Dataset Profile associated with a Dataset, containing profiling statistics about the Dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the profile was reported"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rowCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional row count of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"columnCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional column count of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sizeInBytes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The storage size in bytes"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldProfiles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetfieldprofile"},(0,r.kt)("code",null,"[DatasetFieldProfile!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional set of per field statistics obtained in the profile"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partitionSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#partitionspec"},(0,r.kt)("code",null,"PartitionSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Information about the partition that was profiled"))))),(0,r.kt)("h2",{id:"datasetproperties"},"DatasetProperties"),(0,r.kt)("p",null,"Additional read only properties about a Dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the dataset used in display"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"qualifiedName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fully-qualified name of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Environment in which the dataset belongs to or where it was generated Note that this field will soon be deprecated in favor of a more standardized concept of Environment"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read only technical description for dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Created timestamp millis associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdActor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Actor associated with the Dataset's created timestamp"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Last Modified timestamp millis associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModifiedActor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Actor associated with the Dataset's lastModified timestamp"))))),(0,r.kt)("h2",{id:"datasetstatssummary"},"DatasetStatsSummary"),(0,r.kt)("p",null,"Experimental - subject to change. A summary of usage metrics about a Dataset."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"queryCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query count in the past 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique user count in the past 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"topUsersLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The top users in the past 30 days"))))),(0,r.kt)("h2",{id:"daterange"},"DateRange"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"end",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"deprecation"},"Deprecation"),(0,r.kt)("p",null,"Information about Metadata Entity deprecation status"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the entity has been deprecated by owner"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time user plan to decommission this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional information about the entity deprecation plan"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user who will be credited for modifying this deprecation content"))))),(0,r.kt)("h2",{id:"domain"},"Domain"),(0,r.kt)("p",null,"A domain, or a logical grouping of Metadata Entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainproperties"},(0,r.kt)("code",null,"DomainProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about a domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresults"},(0,r.kt)("code",null,"SearchResults"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Children entities inside of the Domain"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#domainentitiesinput"},(0,r.kt)("code",null,"DomainEntitiesInput"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"domainassociation"},"DomainAssociation"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domain"},(0,r.kt)("code",null,"Domain!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The domain related to the assocaited urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"domainproperties"},"DomainProperties"),(0,r.kt)("p",null,"Properties about a domain"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Domain"))))),(0,r.kt)("h2",{id:"downstreamentityrelationships"},"DownstreamEntityRelationships"),(0,r.kt)("p",null,"Deprecated, use relationships query instead"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshiplegacy"},(0,r.kt)("code",null,"[EntityRelationshipLegacy]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"editableschemafieldinfo"},"EditableSchemaFieldInfo"),(0,r.kt)("p",null,"Editable schema field metadata ie descriptions, tags, etc"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of a field identifying the field the editable info is applied to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edited description of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead Tags associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Glossary terms associated with the field"))))),(0,r.kt)("h2",{id:"editableschemametadata"},"EditableSchemaMetadata"),(0,r.kt)("p",null,"Information about schema metadata that is editable via the UI"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaFieldInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#editableschemafieldinfo"},(0,r.kt)("code",null,"[EditableSchemaFieldInfo!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Editable schema field metadata"))))),(0,r.kt)("h2",{id:"editabletagproperties"},"EditableTagProperties"),(0,r.kt)("p",null,"Additional read write Tag properties\nDeprecated! Replaced by TagProperties."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A display name for the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A description of the Tag"))))),(0,r.kt)("h2",{id:"embed"},"Embed"),(0,r.kt)("p",null,"Information required to render an embedded version of an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"renderUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A URL which can be rendered inside of an iframe."))))),(0,r.kt)("h2",{id:"entityassertionsresult"},"EntityAssertionsResult"),(0,r.kt)("p",null,"A list of Assertions Associated with an Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of assertions in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of assertions in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"assertions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertion"},(0,r.kt)("code",null,"[Assertion!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The assertions themselves"))))),(0,r.kt)("h2",{id:"entitycountresult"},"EntityCountResult"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"entitycountresults"},"EntityCountResults"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"counts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitycountresult"},(0,r.kt)("code",null,"[EntityCountResult!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"entitylineageresult"},"EntityLineageResult"),(0,r.kt)("p",null,"A list of lineage information associated with a source Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Start offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of results in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filtered",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results that were filtered out of the page (soft-deleted or non-existent)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#lineagerelationship"},(0,r.kt)("code",null,"[LineageRelationship!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Relationships in the result set"))))),(0,r.kt)("h2",{id:"entitypath"},"EntityPath"),(0,r.kt)("p",null,"An overview of the field that was matched in the entity search document"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Path of entities between source and destination nodes"))))),(0,r.kt)("h2",{id:"entityprivileges"},"EntityPrivileges"),(0,r.kt)("p",null,"Shared privileges object across entities. Not all privileges apply to every entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"canManageChildren",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user can create child entities under a parent entity. For example, can one create Terms/Node sunder a Glossary Node."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"canManageEntity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user can delete or move this entity."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"canEditLineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user can create or delete lineage edges for an entity."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"canEditEmbed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user update the embed information"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"canEditQueries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user can update the Queries for the entity (e.g. dataset)"))))),(0,r.kt)("h2",{id:"entityprofileconfig"},"EntityProfileConfig"),(0,r.kt)("p",null,"Configuration for an entity profile"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultTab",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The enum value from EntityProfileTab for which tab should be showed by default on entity profile pages. If null, rely on default sorting from React code."))))),(0,r.kt)("h2",{id:"entityprofileparams"},"EntityProfileParams"),(0,r.kt)("p",null,"Context to define the entity profile page"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the entity being shown"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Type of the enity being displayed"))))),(0,r.kt)("h2",{id:"entityprofilesconfig"},"EntityProfilesConfig"),(0,r.kt)("p",null,"Configuration for different entity profiles"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprofileconfig"},(0,r.kt)("code",null,"EntityProfileConfig"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The configurations for a Domain entity profile"))))),(0,r.kt)("h2",{id:"entityrelationship"},"EntityRelationship"),(0,r.kt)("p",null,"A relationship between two entities TODO Migrate all entity relationships to this more generic model"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the relationship"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#relationshipdirection"},(0,r.kt)("code",null,"RelationshipDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship relative to the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity that is related via lineage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the last modification of this relationship"))))),(0,r.kt)("h2",{id:"entityrelationshiplegacy"},"EntityRelationshipLegacy"),(0,r.kt)("p",null,"Deprecated, use relationships query instead"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entitywithrelationships"},(0,r.kt)("code",null,"EntityWithRelationships"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity that is related via lineage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the last modification of this relationship"))))),(0,r.kt)("h2",{id:"entityrelationshipsresult"},"EntityRelationshipsResult"),(0,r.kt)("p",null,"A list of relationship information associated with a source Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Start offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of results in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationship"},(0,r.kt)("code",null,"[EntityRelationship!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Relationships in the result set"))))),(0,r.kt)("h2",{id:"ethicalconsiderations"},"EthicalConsiderations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"data",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Does the model use any sensitive data eg, protected classes"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"humanLife",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Is the model intended to inform decisions about matters central to human life or flourishing eg, health or safety"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mitigations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"What risk mitigation strategies were used during model development"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"risksAndHarms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"What risks may be present in model usage Try to identify the potential recipients, likelihood, and magnitude of harms If these cannot be determined, note that they were considered but remain unknown"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"useCases",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Are there any known model use cases that are especially fraught This may connect directly to the intended use section"))))),(0,r.kt)("h2",{id:"executionrequest"},"ExecutionRequest"),(0,r.kt)("p",null,"Retrieve an ingestion execution request"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the execution request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id for the execution request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#executionrequestinput"},(0,r.kt)("code",null,"ExecutionRequestInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Input provided when creating the Execution Request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"result",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#executionrequestresult"},(0,r.kt)("code",null,"ExecutionRequestResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Result of the execution request"))))),(0,r.kt)("h2",{id:"executionrequestinput"},"ExecutionRequestInput"),(0,r.kt)("p",null,"Input provided when creating an Execution Request"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"task",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the task to executed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"source",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#executionrequestsource"},(0,r.kt)("code",null,"ExecutionRequestSource!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The source of the execution request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"arguments",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Arguments provided when creating the execution request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"requestedAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the request was created"))))),(0,r.kt)("h2",{id:"executionrequestresult"},"ExecutionRequestResult"),(0,r.kt)("p",null,"The result of an ExecutionRequest"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The result of the request, e.g. either SUCCEEDED or FAILED"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Time at which the task began"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"durationMs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Duration of the task"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"report",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A report about the ingestion run"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"structuredReport",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#structuredreport"},(0,r.kt)("code",null,"StructuredReport"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A structured report for this Execution Request"))))),(0,r.kt)("h2",{id:"executionrequestsource"},"ExecutionRequestSource"),(0,r.kt)("p",null,"Information about the source of an execution request"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the source, e.g. SCHEDULED_INGESTION_SOURCE"))))),(0,r.kt)("h2",{id:"facetfilter"},"FacetFilter"),(0,r.kt)("p",null,"A single filter value"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of field to filter by"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"condition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#filteroperator"},(0,r.kt)("code",null,"FilterOperator"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Condition for the values."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Values, one of which the intended field should match."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"negated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"If the filter should or should not be matched"))))),(0,r.kt)("h2",{id:"facetmetadata"},"FacetMetadata"),(0,r.kt)("p",null,"Contains valid fields to filter search results further on"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of a field present in the search entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aggregations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#aggregationmetadata"},(0,r.kt)("code",null,"[AggregationMetadata!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Aggregated search result counts by value of the field"))))),(0,r.kt)("h2",{id:"featureflagsconfig"},"FeatureFlagsConfig"),(0,r.kt)("p",null,"Configurations related to DataHub Views feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"readOnlyModeEnabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether read only mode is enabled on an instance. Right now this only affects ability to edit user profile image URL but can be extended."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"showSearchFiltersV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether search filters V2 should be shown or the default filter side-panel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"showBrowseV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether browse V2 sidebar should be shown"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"showAcrylInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether we should show CTAs in the UI related to moving to Managed DataHub by Acryl."))))),(0,r.kt)("h2",{id:"fieldusagecounts"},"FieldUsageCounts"),(0,r.kt)("p",null,"The usage for a particular Dataset field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The path of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The count of usages"))))),(0,r.kt)("h2",{id:"finegrainedlineage"},"FineGrainedLineage"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"upstreams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldref"},(0,r.kt)("code",null,"[SchemaFieldRef!]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"downstreams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldref"},(0,r.kt)("code",null,"[SchemaFieldRef!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"floatbox"},"FloatBox"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"floatValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"foreignkeyconstraint"},"ForeignKeyConstraint"),(0,r.kt)("p",null,"Metadata around a foreign key constraint between two datasets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The human-readable name of the constraint"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"foreignFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldentity"},(0,r.kt)("code",null,"[SchemaFieldEntity]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of fields in the foreign dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldentity"},(0,r.kt)("code",null,"[SchemaFieldEntity]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of fields in this dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"foreignDataset",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"Dataset"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The foreign dataset for easy reference"))))),(0,r.kt)("h2",{id:"freshnessstats"},"FreshnessStats"),(0,r.kt)("p",null,"Freshness stats for a query result.\nCaptures whether the query was served out of a cache, what the staleness was, etc."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cached",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether a cache was used to respond to this query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"systemFreshness",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#systemfreshness"},(0,r.kt)("code",null,"[SystemFreshness]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The latest timestamp in millis of the system that was used to respond to this query In case a cache was consulted, this reflects the freshness of the cache In case an index was consulted, this reflects the freshness of the index"))))),(0,r.kt)("h2",{id:"getquickfiltersresult"},"GetQuickFiltersResult"),(0,r.kt)("p",null,"The result object when fetching quick filters"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"quickFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#quickfilter"},(0,r.kt)("code",null,"[QuickFilter]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of quick filters to render in the UI"))))),(0,r.kt)("h2",{id:"getrootglossarynodesresult"},"GetRootGlossaryNodesResult"),(0,r.kt)("p",null,"The result when getting Glossary entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"nodes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarynode"},(0,r.kt)("code",null,"[GlossaryNode!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of Glossary Nodes without a parent node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of nodes in the returned result"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of nodes in the result set"))))),(0,r.kt)("h2",{id:"getrootglossarytermsresult"},"GetRootGlossaryTermsResult"),(0,r.kt)("p",null,"The result when getting root GlossaryTerms"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"terms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterm"},(0,r.kt)("code",null,"[GlossaryTerm!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of Glossary Terms without a parent node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of terms in the returned result"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of terms in the result set"))))),(0,r.kt)("h2",{id:"getschemablameresult"},"GetSchemaBlameResult"),(0,r.kt)("p",null,"Schema changes computed at a specific version."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#semanticversionstruct"},(0,r.kt)("code",null,"SemanticVersionStruct"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Selected semantic version"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaFieldBlameList",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldblame"},(0,r.kt)("code",null,"[SchemaFieldBlame!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of schema blame. Absent when there are no fields to return history for."))))),(0,r.kt)("h2",{id:"getschemaversionlistresult"},"GetSchemaVersionListResult"),(0,r.kt)("p",null,"Schema changes computed at a specific version."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"latestVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#semanticversionstruct"},(0,r.kt)("code",null,"SemanticVersionStruct"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Latest and current semantic version"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#semanticversionstruct"},(0,r.kt)("code",null,"SemanticVersionStruct"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Selected semantic version"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"semanticVersionList",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#semanticversionstruct"},(0,r.kt)("code",null,"[SemanticVersionStruct!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"All semantic versions. Absent when there are no versions."))))),(0,r.kt)("h2",{id:"globaltags"},"GlobalTags"),(0,r.kt)("p",null,"Tags attached to a particular Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#tagassociation"},(0,r.kt)("code",null,"[TagAssociation!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of tags attached to the Metadata Entity"))))),(0,r.kt)("h2",{id:"globalviewssettings"},"GlobalViewsSettings"),(0,r.kt)("p",null,"Global (platform-level) settings related to the Views feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultView",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The global default View. If a user does not have a personal default, then this will be the default view."))))),(0,r.kt)("h2",{id:"glossarynode"},"GlossaryNode"),(0,r.kt)("p",null,"A Glossary Node, or a directory in a Business Glossary represents a container of\nGlossary Terms or other Glossary Nodes"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarynodeproperties"},(0,r.kt)("code",null,"GlossaryNodeProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional properties associated with the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentNodes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentnodesresult"},(0,r.kt)("code",null,"ParentNodesResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of glossary nodes for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"glossarynodeproperties"},"GlossaryNodeProperties"),(0,r.kt)("p",null,"Additional read only properties about a Glossary Node"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the glossary term"))))),(0,r.kt)("h2",{id:"glossaryterm"},"GlossaryTerm"),(0,r.kt)("p",null,"A Glossary Term, or a node in a Business Glossary representing a standardized domain\ndata type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"A unique identifier for the Glossary Term. Deprecated - Use properties.name field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hierarchicalName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"hierarchicalName of glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarytermproperties"},(0,r.kt)("code",null,"GlossaryTermProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional properties associated with the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTermInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterminfo"},(0,r.kt)("code",null,"GlossaryTermInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use properties field instead Details of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemametadata"},(0,r.kt)("code",null,"SchemaMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema metadata of the dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentNodes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentnodesresult"},(0,r.kt)("code",null,"ParentNodesResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of glossary nodes for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"glossarytermassociation"},"GlossaryTermAssociation"),(0,r.kt)("p",null,"An edge between a Metadata Entity and a Glossary Term Modeled as a struct to permit\nadditional attributes\nTODO Consider whether this query should be serviced by the relationships field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"term",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterm"},(0,r.kt)("code",null,"GlossaryTerm!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The glossary term itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the associated urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"glossaryterminfo"},"GlossaryTermInfo"),(0,r.kt)("p",null,"Deprecated, use GlossaryTermProperties instead\nInformation about a glossary term"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Definition of the glossary term. Deprecated - Use 'description' instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"termSource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Term Source of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceRef",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Ref of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Url of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rawSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema definition of glossary term"))))),(0,r.kt)("h2",{id:"glossarytermproperties"},"GlossaryTermProperties"),(0,r.kt)("p",null,"Additional read only properties about a Glossary Term"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Definition of the glossary term. Deprecated - Use 'description' instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"termSource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Term Source of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceRef",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Ref of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Url of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rawSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema definition of glossary term"))))),(0,r.kt)("h2",{id:"glossaryterms"},"GlossaryTerms"),(0,r.kt)("p",null,"Glossary Terms attached to a particular Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"terms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarytermassociation"},(0,r.kt)("code",null,"[GlossaryTermAssociation!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of glossary terms attached to the Metadata Entity"))))),(0,r.kt)("h2",{id:"health"},"Health"),(0,r.kt)("p",null,"The resolved Health of an Asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#healthstatustype"},(0,r.kt)("code",null,"HealthStatusType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An enum representing the type of health indicator"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#healthstatus"},(0,r.kt)("code",null,"HealthStatus!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An enum representing the resolved Health status of an Asset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"message",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional message describing the resolved health status"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"causes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The causes responsible for the health status"))))),(0,r.kt)("h2",{id:"highlight"},"Highlight"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"body",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"hyperparametermap"},"HyperParameterMap"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#hyperparametervaluetype"},(0,r.kt)("code",null,"HyperParameterValueType!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"identitymanagementconfig"},"IdentityManagementConfig"),(0,r.kt)("p",null,"Configurations related to Identity Management"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether identity management screen is able to be shown in the UI"))))),(0,r.kt)("h2",{id:"ingestionconfig"},"IngestionConfig"),(0,r.kt)("p",null,"A set of configurations for an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"recipe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The JSON-encoded recipe to use for ingestion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executorId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: The specific executor that should handle the execution request. Defaults to 'default'."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: The version of the ingestion framework to use"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"debugMode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: Whether or not to run ingestion in debug mode"))))),(0,r.kt)("h2",{id:"ingestionrun"},"IngestionRun"),(0,r.kt)("p",null,"The runs associated with an Ingestion Source managed by DataHub"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionRequestUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the execution request associated with the user"))))),(0,r.kt)("h2",{id:"ingestionschedule"},"IngestionSchedule"),(0,r.kt)("p",null,"A schedule associated with an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timezone",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Time Zone abbreviation (e.g. GMT, EDT). Defaults to UTC."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"interval",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The cron-formatted interval to execute the ingestion source on"))))),(0,r.kt)("h2",{id:"ingestionsource"},"IngestionSource"),(0,r.kt)("p",null,"An Ingestion Source Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Ingestion Source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the source itself, e.g. mysql, bigquery, bigquery-usage. Should match the recipe."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name of the Ingestion Source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schedule",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ingestionschedule"},(0,r.kt)("code",null,"IngestionSchedule"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional schedule associated with the Ingestion Source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The data platform associated with this ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"config",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ingestionconfig"},(0,r.kt)("code",null,"IngestionConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An type-specific set of configurations for the ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ingestionsourceexecutionrequests"},(0,r.kt)("code",null,"IngestionSourceExecutionRequests"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Previous requests to execute the ingestion source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"ingestionsourceexecutionrequests"},"IngestionSourceExecutionRequests"),(0,r.kt)("p",null,"Requests for execution associated with an ingestion source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionRequests",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#executionrequest"},(0,r.kt)("code",null,"[ExecutionRequest!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The execution request objects comprising the result set"))))),(0,r.kt)("h2",{id:"inputfield"},"InputField"),(0,r.kt)("p",null,"Input field of the chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaFieldUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaField",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafield"},(0,r.kt)("code",null,"SchemaField"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"inputfields"},"InputFields"),(0,r.kt)("p",null,"Input fields of the chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#inputfield"},(0,r.kt)("code",null,"[InputField]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"institutionalmemory"},"InstitutionalMemory"),(0,r.kt)("p",null,"Institutional memory metadata, meaning internal links and pointers related to an Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"elements",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemorymetadata"},(0,r.kt)("code",null,"[InstitutionalMemoryMetadata!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of records that represent the institutional memory or internal documentation of an entity"))))),(0,r.kt)("h2",{id:"institutionalmemorymetadata"},"InstitutionalMemoryMetadata"),(0,r.kt)("p",null,"An institutional memory resource about a particular Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"url",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Link to a document or wiki page or another internal resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"label",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Label associated with the URL"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"author",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The author of this metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use label instead Description of the resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the owned urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"intbox"},"IntBox"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"intValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"intendeduse"},"IntendedUse"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryUses",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Primary Use cases for the model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryUsers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#intendedusertype"},(0,r.kt)("code",null,"[IntendedUserType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Primary Intended Users"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"outOfScopeUses",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Out of scope uses of the MLModel"))))),(0,r.kt)("h2",{id:"invitetoken"},"InviteToken"),(0,r.kt)("p",null,"Token that allows users to sign up as a native user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"inviteToken",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The invite token"))))),(0,r.kt)("h2",{id:"keyvalueschema"},"KeyValueSchema"),(0,r.kt)("p",null,"Information about a raw Key Value Schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"keySchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw key schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"valueSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw value schema"))))),(0,r.kt)("h2",{id:"lineageconfig"},"LineageConfig"),(0,r.kt)("p",null,"Configurations related to Lineage"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"supportsImpactAnalysis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the backend support impact analysis feature"))))),(0,r.kt)("h2",{id:"lineagerelationship"},"LineageRelationship"),(0,r.kt)("p",null,"Metadata about a lineage relationship between two entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the relationship"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity that is related via lineage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"degree",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Degree of relationship (number of hops to get to entity)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdOn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Timestamp for when this lineage relationship was created. Could be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdActor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor who created this lineage relationship. Could be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"updatedOn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Timestamp for when this lineage relationship was last updated. Could be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"updatedActor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor who last updated this lineage relationship. Could be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"isManual",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether this edge is a manual edge. Could be null."))))),(0,r.kt)("h2",{id:"linkparams"},"LinkParams"),(0,r.kt)("p",null,"Parameters required to specify the page to land once clicked"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchparams"},(0,r.kt)("code",null,"SearchParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context to define the search page"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityProfileParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprofileparams"},(0,r.kt)("code",null,"EntityProfileParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context to define the entity profile page"))))),(0,r.kt)("h2",{id:"listaccesstokenresult"},"ListAccessTokenResult"),(0,r.kt)("p",null,"Results returned when listing access tokens"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tokens",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#accesstokenmetadata"},(0,r.kt)("code",null,"[AccessTokenMetadata!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The token metadata themselves"))))),(0,r.kt)("h2",{id:"listdomainsresult"},"ListDomainsResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Domains"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Domains in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Domains in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domains",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domain"},(0,r.kt)("code",null,"[Domain!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domains themselves"))))),(0,r.kt)("h2",{id:"listgroupsresult"},"ListGroupsResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Groups"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Policies in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Policies in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroup"},(0,r.kt)("code",null,"[CorpGroup!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The groups themselves"))))),(0,r.kt)("h2",{id:"listingestionsourcesresult"},"ListIngestionSourcesResult"),(0,r.kt)("p",null,"Results returned when listing ingestion sources"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ingestionSources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ingestionsource"},(0,r.kt)("code",null,"[IngestionSource!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Ingestion Sources themselves"))))),(0,r.kt)("h2",{id:"listownershiptypesresult"},"ListOwnershipTypesResult"),(0,r.kt)("p",null,"Results when listing custom ownership types."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershiptypeentity"},(0,r.kt)("code",null,"[OwnershipTypeEntity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Custom Ownership Types themselves"))))),(0,r.kt)("h2",{id:"listpoliciesresult"},"ListPoliciesResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Access Policies"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Policies in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Policies in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"policies",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policy"},(0,r.kt)("code",null,"[Policy!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Policies themselves"))))),(0,r.kt)("h2",{id:"listpostsresult"},"ListPostsResult"),(0,r.kt)("p",null,"The result obtained when listing Posts"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Roles in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Roles in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"posts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#post"},(0,r.kt)("code",null,"[Post!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Posts themselves"))))),(0,r.kt)("h2",{id:"listqueriesresult"},"ListQueriesResult"),(0,r.kt)("p",null,"Results when listing entity queries"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"queries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#queryentity"},(0,r.kt)("code",null,"[QueryEntity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Queries themselves"))))),(0,r.kt)("h2",{id:"listrecommendationsresult"},"ListRecommendationsResult"),(0,r.kt)("p",null,"Results returned by the ListRecommendations query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"modules",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#recommendationmodule"},(0,r.kt)("code",null,"[RecommendationModule!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of modules to show"))))),(0,r.kt)("h2",{id:"listrolesresult"},"ListRolesResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Roles"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Roles in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Roles in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"roles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubrole"},(0,r.kt)("code",null,"[DataHubRole!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Roles themselves"))))),(0,r.kt)("h2",{id:"listsecretsresult"},"ListSecretsResult"),(0,r.kt)("p",null,"Input for listing DataHub Secrets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"secrets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#secret"},(0,r.kt)("code",null,"[Secret!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The secrets themselves"))))),(0,r.kt)("h2",{id:"listtestsresult"},"ListTestsResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Tests"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Tests in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Tests in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tests",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#test"},(0,r.kt)("code",null,"[Test!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Tests themselves"))))),(0,r.kt)("h2",{id:"listusersresult"},"ListUsersResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Users"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Policies in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Policies in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The users themselves"))))),(0,r.kt)("h2",{id:"listviewsresult"},"ListViewsResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Views"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Views in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Views in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"views",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubview"},(0,r.kt)("code",null,"[DataHubView!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Views themselves"))))),(0,r.kt)("h2",{id:"managedingestionconfig"},"ManagedIngestionConfig"),(0,r.kt)("p",null,"Configurations related to managed, UI based ingestion"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether ingestion screen is enabled in the UI"))))),(0,r.kt)("h2",{id:"matchedfield"},"MatchedField"),(0,r.kt)("p",null,"An overview of the field that was matched in the entity search document"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the field that matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Value of the field that matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity if the value is an urn"))))),(0,r.kt)("h2",{id:"media"},"Media"),(0,r.kt)("p",null,"Media content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mediatype"},(0,r.kt)("code",null,"MediaType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of media"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"location",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The location of the media (a URL)"))))),(0,r.kt)("h2",{id:"metrics"},"Metrics"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"performanceMeasures",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Measures of ML Model performance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decisionThreshold",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Decision Thresholds used if any"))))),(0,r.kt)("h2",{id:"mlfeature"},"MLFeature"),(0,r.kt)("p",null,"An ML Feature Metadata Entity Note that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name for the ML Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureNamespace",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLFeature featureNamespace"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description about the ML Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mlfeaturedatatype"},(0,r.kt)("code",null,"MLFeatureDataType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLFeature data type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeatureproperties"},(0,r.kt)("code",null,"MLFeatureProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"ModelProperties metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeatureproperties"},(0,r.kt)("code",null,"MLFeatureProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"ModelProperties metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags applied to entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeatureeditableproperties"},(0,r.kt)("code",null,"MLFeatureEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlfeatureeditableproperties"},"MLFeatureEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlfeatureproperties"},"MLFeatureProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mlfeaturedatatype"},(0,r.kt)("code",null,"MLFeatureDataType"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#versiontag"},(0,r.kt)("code",null,"VersionTag"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlfeaturetable"},"MLFeatureTable"),(0,r.kt)("p",null,"An ML Feature Table Entity Note that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML Feature Table"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the MLFeatureTable is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLFeatureTable description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the MLFeatureTable"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeaturetableproperties"},(0,r.kt)("code",null,"MLFeatureTableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties associated the the ML Feature Table"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureTableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeaturetableproperties"},(0,r.kt)("code",null,"MLFeatureTableProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead ModelProperties metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the MLFeatureTable"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the ML Feature Table. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags applied to entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeaturetableeditableproperties"},(0,r.kt)("code",null,"MLFeatureTableEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlfeaturetableeditableproperties"},"MLFeatureTableEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlfeaturetableproperties"},"MLFeatureTableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mlFeatures",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeature"},(0,r.kt)("code",null,"[MLFeature]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mlPrimaryKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlprimarykey"},(0,r.kt)("code",null,"[MLPrimaryKey]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlhyperparam"},"MLHyperParam"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlmetric"},"MLMetric"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlmodel"},"MLModel"),(0,r.kt)("p",null,"An ML Model Metadata Entity Note that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"ML model display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the MLModel is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fabric type where mlmodel belongs to or where it was generated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Human readable description for mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead The standard tags for the ML Model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard tags for the ML Model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelproperties"},(0,r.kt)("code",null,"MLModelProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only information about the ML Model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"intendedUse",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#intendeduse"},(0,r.kt)("code",null,"IntendedUse"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Intended use of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"factorPrompts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelfactorprompts"},(0,r.kt)("code",null,"MLModelFactorPrompts"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Factors metadata of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#metrics"},(0,r.kt)("code",null,"Metrics"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metrics metadata of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"evaluationData",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#basedata"},(0,r.kt)("code",null,"[BaseData!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Evaluation Data of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"trainingData",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#basedata"},(0,r.kt)("code",null,"[BaseData!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Training Data of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"quantitativeAnalyses",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#quantitativeanalyses"},(0,r.kt)("code",null,"QuantitativeAnalyses"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Quantitative Analyses of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ethicalConsiderations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ethicalconsiderations"},(0,r.kt)("code",null,"EthicalConsiderations"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ethical Considerations of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"caveatsAndRecommendations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#caveatsandrecommendations"},(0,r.kt)("code",null,"CaveatsAndRecommendations"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Caveats and Recommendations of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#sourcecode"},(0,r.kt)("code",null,"SourceCode"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Code"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cost",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#cost"},(0,r.kt)("code",null,"Cost"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Cost Aspect of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the ML Model. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodeleditableproperties"},(0,r.kt)("code",null,"MLModelEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlmodeleditableproperties"},"MLModelEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlmodelfactorprompts"},"MLModelFactorPrompts"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"relevantFactors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelfactors"},(0,r.kt)("code",null,"[MLModelFactors!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"What are foreseeable salient factors for which MLModel performance may vary, and how were these determined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"evaluationFactors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelfactors"},(0,r.kt)("code",null,"[MLModelFactors!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Which factors are being reported, and why were these chosen"))))),(0,r.kt)("h2",{id:"mlmodelfactors"},"MLModelFactors"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Distinct categories with similar characteristics that are present in the evaluation data instances"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"instrumentation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Instrumentation used for MLModel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"environment",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Environment in which the MLModel is deployed"))))),(0,r.kt)("h2",{id:"mlmodelgroup"},"MLModelGroup"),(0,r.kt)("p",null,"An ML Model Group Metadata Entity\nNote that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML Model Group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name for the Entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the MLModelGroup is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fabric type where MLModelGroup belongs to or where it was generated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Human readable description for MLModelGroup"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelgroupproperties"},(0,r.kt)("code",null,"MLModelGroupProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the ML Model Group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the MLModelGroup"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the MLModelGroup"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the ML Model Group. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags applied to entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelgroupeditableproperties"},(0,r.kt)("code",null,"MLModelGroupEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlmodelgroupeditableproperties"},"MLModelGroupEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlmodelgroupproperties"},"MLModelGroupProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#versiontag"},(0,r.kt)("code",null,"VersionTag"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlmodelproperties"},"MLModelProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"date",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hyperParameters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#hyperparametermap"},(0,r.kt)("code",null,"HyperParameterMap"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hyperParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlhyperparam"},(0,r.kt)("code",null,"[MLHyperParam]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"trainingMetrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmetric"},(0,r.kt)("code",null,"[MLMetric]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mlFeatures",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelgroup"},(0,r.kt)("code",null,"[MLModelGroup]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlprimarykey"},"MLPrimaryKey"),(0,r.kt)("p",null,"An ML Primary Key Entity Note that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML Primary Key"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureNamespace",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLPrimaryKey featureNamespace"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLPrimaryKey description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mlfeaturedatatype"},(0,r.kt)("code",null,"MLFeatureDataType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLPrimaryKey data type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlprimarykeyproperties"},(0,r.kt)("code",null,"MLPrimaryKeyProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties of the ML Primary Key"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryKeyProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlprimarykeyproperties"},(0,r.kt)("code",null,"MLPrimaryKeyProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead MLPrimaryKeyProperties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the MLPrimaryKey"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the MLPrimaryKey"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the MLPrimaryKey"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags applied to entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlprimarykeyeditableproperties"},(0,r.kt)("code",null,"MLPrimaryKeyEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlprimarykeyeditableproperties"},"MLPrimaryKeyEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlprimarykeyproperties"},"MLPrimaryKeyProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mlfeaturedatatype"},(0,r.kt)("code",null,"MLFeatureDataType"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#versiontag"},(0,r.kt)("code",null,"VersionTag"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"namedbar"},"NamedBar"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"segments",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#barsegment"},(0,r.kt)("code",null,"[BarSegment!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"namedline"},"NamedLine"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"data",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#numericdatapoint"},(0,r.kt)("code",null,"[NumericDataPoint!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"notebook"},"Notebook"),(0,r.kt)("p",null,"A Notebook Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tool",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Notebook tool name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"notebookId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An id unique within the Notebook tool"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#notebookinfo"},(0,r.kt)("code",null,"NotebookInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only information about the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#notebookeditableproperties"},(0,r.kt)("code",null,"NotebookEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"content",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#notebookcontent"},(0,r.kt)("code",null,"NotebookContent!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content of this Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Sub Types that this entity implements"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the Notebook. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"notebookcell"},"NotebookCell"),(0,r.kt)("p",null,"The Union of every NotebookCell"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"chartCell",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartcell"},(0,r.kt)("code",null,"ChartCell"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The chart cell content. The will be non-null only when all other cell field is null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"textCell",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#textcell"},(0,r.kt)("code",null,"TextCell"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The text cell content. The will be non-null only when all other cell field is null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"queryChell",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#querycell"},(0,r.kt)("code",null,"QueryCell"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query cell content. The will be non-null only when all other cell field is null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#notebookcelltype"},(0,r.kt)("code",null,"NotebookCellType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of this Notebook cell"))))),(0,r.kt)("h2",{id:"notebookcontent"},"NotebookContent"),(0,r.kt)("p",null,"The actual content in a Notebook"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cells",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#notebookcell"},(0,r.kt)("code",null,"[NotebookCell!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content of a Notebook which is composed by a list of NotebookCell"))))),(0,r.kt)("h2",{id:"notebookeditableproperties"},"NotebookEditableProperties"),(0,r.kt)("p",null,"Notebook properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Notebook"))))),(0,r.kt)("h2",{id:"notebookinfo"},"NotebookInfo"),(0,r.kt)("p",null,"Additional read only information about a Notebook"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeAuditStamps",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#changeauditstamps"},(0,r.kt)("code",null,"ChangeAuditStamps"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who created/last modified/deleted this Notebook and when"))))),(0,r.kt)("h2",{id:"numericdatapoint"},"NumericDataPoint"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"x",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"y",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"operation"},"Operation"),(0,r.kt)("p",null,"Operational info for an entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the operation was reported"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Actor who issued this operation."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"operationType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#operationtype"},(0,r.kt)("code",null,"OperationType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Operation type of change."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customOperationType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A custom operation type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#operationsourcetype"},(0,r.kt)("code",null,"OperationSourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source of the operation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"numAffectedRows",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"How many rows were affected by this operation."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"affectedDatasets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Which other datasets were affected by this operation."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastUpdatedTimestamp",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"When time at which the asset was actually updated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional partition identifier"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom operation properties"))))),(0,r.kt)("h2",{id:"origin"},"Origin"),(0,r.kt)("p",null,"Carries information about where an entity originated from."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#origintype"},(0,r.kt)("code",null,"OriginType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Where an entity originated from. Either NATIVE or EXTERNAL"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Only populated if type is EXTERNAL. The externalType of the entity, such as the name of the identity provider."))))),(0,r.kt)("h2",{id:"owner"},"Owner"),(0,r.kt)("p",null,"An owner of a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owner",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#ownertype"},(0,r.kt)("code",null,"OwnerType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Owner object"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershiptype"},(0,r.kt)("code",null,"OwnershipType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"The type of the ownership. Deprecated - Use ownershipType field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershiptypeentity"},(0,r.kt)("code",null,"OwnershipTypeEntity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership type information"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"source",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershipsource"},(0,r.kt)("code",null,"OwnershipSource"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source information for the ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the owned urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"ownership"},"Ownership"),(0,r.kt)("p",null,"Ownership information about a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#owner"},(0,r.kt)("code",null,"[Owner!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of owners of the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Audit stamp containing who last modified the record and when"))))),(0,r.kt)("h2",{id:"ownershipsource"},"OwnershipSource"),(0,r.kt)("p",null,"Information about the source of Ownership metadata about a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershipsourcetype"},(0,r.kt)("code",null,"OwnershipSourceType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"url",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional reference URL for the source"))))),(0,r.kt)("h2",{id:"ownershiptypeentity"},"OwnershipTypeEntity"),(0,r.kt)("p",null,"A single Custom Ownership Type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A primary key associated with the custom ownership type."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershiptypeinfo"},(0,r.kt)("code",null,"OwnershipTypeInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Information about the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the Custom Ownership Type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"ownershiptypeinfo"},"OwnershipTypeInfo"),(0,r.kt)("p",null,"Properties about an individual Custom Ownership Type."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An Audit Stamp corresponding to the creation of this resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An Audit Stamp corresponding to the update of this resource"))))),(0,r.kt)("h2",{id:"parentcontainersresult"},"ParentContainersResult"),(0,r.kt)("p",null,"All of the parent containers for a given entity. Returns parents with direct parent first followed by the parent's parent etc."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of containers bubbling up for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"containers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"[Container!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of parent containers in order from direct parent, to parent's parent etc. If there are no containers, return an emty list"))))),(0,r.kt)("h2",{id:"parentnodesresult"},"ParentNodesResult"),(0,r.kt)("p",null,"All of the parent nodes for GlossaryTerms and GlossaryNodes"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of parent nodes bubbling up for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nodes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarynode"},(0,r.kt)("code",null,"[GlossaryNode!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of parent nodes in order from direct parent, to parent's parent etc. If there are no nodes, return an empty list"))))),(0,r.kt)("h2",{id:"partitionspec"},"PartitionSpec"),(0,r.kt)("p",null,"Information about the partition being profiled"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#partitiontype"},(0,r.kt)("code",null,"PartitionType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The partition type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The partition identifier"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"timePartition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#timewindow"},(0,r.kt)("code",null,"TimeWindow"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The optional time window partition information"))))),(0,r.kt)("h2",{id:"platformprivileges"},"PlatformPrivileges"),(0,r.kt)("p",null,"The platform privileges that the currently authenticated user has"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewAnalytics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to view analytics"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"managePolicies",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage policies"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageIdentities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage users & groups"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"generatePersonalAccessTokens",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to generate personal access tokens"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createDomains",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create new Domains"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageDomains",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage Domains"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageIngestion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is able to manage UI-based ingestion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageSecrets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is able to manage UI-based secrets"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageTokens",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage tokens on behalf of other users."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageTests",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is able to manage Tests"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageGlossaries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage Glossaries"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageUserCredentials",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is able to manage user credentials"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create new Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create and delete all Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageGlobalViews",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create, update, and delete global views."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageOwnershipTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create, update, and delete ownership types."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageGlobalAnnouncements",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user can create and delete posts pinned to the home page."))))),(0,r.kt)("h2",{id:"policiesconfig"},"PoliciesConfig"),(0,r.kt)("p",null,"Configurations related to the Policies Feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the policies feature is enabled and should be displayed in the UI"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformPrivileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#privilege"},(0,r.kt)("code",null,"[Privilege!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform privileges to display in the Policy Builder experience"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourcePrivileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#resourceprivileges"},(0,r.kt)("code",null,"[ResourcePrivileges!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of resource privileges to display in the Policy Builder experience"))))),(0,r.kt)("h2",{id:"policy"},"Policy"),(0,r.kt)("p",null,"DEPRECATED\nTODO: Eventually get rid of this in favor of DataHub Policy\nAn DataHub Platform Access Policy Access Policies determine who can perform what actions against which resources on the platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policytype"},(0,r.kt)("code",null,"PolicyType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"state",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policystate"},(0,r.kt)("code",null,"PolicyState!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The present state of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#resourcefilter"},(0,r.kt)("code",null,"ResourceFilter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resources that the Policy privileges apply to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The privileges that the Policy grants"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#actorfilter"},(0,r.kt)("code",null,"ActorFilter!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actors that the Policy grants privileges to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editable",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Policy is editable, ie system policies, or not"))))),(0,r.kt)("h2",{id:"policymatchcriterion"},"PolicyMatchCriterion"),(0,r.kt)("p",null,"Criterion to define relationship between field and values"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the field that the criterion refers to e.g. entity_type, entity_urn, domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policymatchcriterionvalue"},(0,r.kt)("code",null,"[PolicyMatchCriterionValue!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Values. Matches criterion if any one of the values matches condition (OR-relationship)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"condition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policymatchcondition"},(0,r.kt)("code",null,"PolicyMatchCondition!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the field that the criterion refers to"))))),(0,r.kt)("h2",{id:"policymatchcriterionvalue"},"PolicyMatchCriterionValue"),(0,r.kt)("p",null,"Value in PolicyMatchCriterion with hydrated entity if value is urn"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value of the field to match"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Hydrated entities of the above values. Only set if the value is an urn"))))),(0,r.kt)("h2",{id:"policymatchfilter"},"PolicyMatchFilter"),(0,r.kt)("p",null,"Filter object that encodes a complex filter logic with OR + AND"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"criteria",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policymatchcriterion"},(0,r.kt)("code",null,"[PolicyMatchCriterion!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of criteria to apply"))))),(0,r.kt)("h2",{id:"post"},"Post"),(0,r.kt)("p",null,"Input provided when creating a Post"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the Post"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"postType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#posttype"},(0,r.kt)("code",null,"PostType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"content",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#postcontent"},(0,r.kt)("code",null,"PostContent!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"When the post was last modified"))))),(0,r.kt)("h2",{id:"postcontent"},"PostContent"),(0,r.kt)("p",null,"Post content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"contentType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#postcontenttype"},(0,r.kt)("code",null,"PostContentType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of post content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The title of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional content of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"link",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional link that the post is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"media",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#media"},(0,r.kt)("code",null,"Media"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional media contained in the post"))))),(0,r.kt)("h2",{id:"privilege"},"Privilege"),(0,r.kt)("p",null,"An individual DataHub Access Privilege"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized privilege type, serving as a unique identifier for a privilege eg EDIT_ENTITY"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name to appear when displaying the privilege, eg Edit Entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A description of the privilege to display"))))),(0,r.kt)("h2",{id:"privileges"},"Privileges"),(0,r.kt)("p",null,"Object that encodes the privileges the actor has for a given resource"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granted Privileges"))))),(0,r.kt)("h2",{id:"quantitativeanalyses"},"QuantitativeAnalyses"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"unitaryResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#resultstype"},(0,r.kt)("code",null,"ResultsType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Link to a dashboard with results showing how the model performed with respect to each factor"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"intersectionalResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#resultstype"},(0,r.kt)("code",null,"ResultsType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Link to a dashboard with results showing how the model performed with respect to the intersection of evaluated factors"))))),(0,r.kt)("h2",{id:"queriestabconfig"},"QueriesTabConfig"),(0,r.kt)("p",null,"Configuration for the queries tab"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"queriesTabResultSize",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of queries to show in the queries tab"))))),(0,r.kt)("h2",{id:"querycell"},"QueryCell"),(0,r.kt)("p",null,"A Notebook cell which contains Query as content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellTitle",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the cell"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id for the cell."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeAuditStamps",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#changeauditstamps"},(0,r.kt)("code",null,"ChangeAuditStamps"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who created/last modified/deleted this TextCell and when"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rawQuery",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw query to explain some specific logic in a Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastExecuted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who last executed this query cell and when"))))),(0,r.kt)("h2",{id:"queryentity"},"QueryEntity"),(0,r.kt)("p",null,"An individual Query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A primary key associated with the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#queryproperties"},(0,r.kt)("code",null,"QueryProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subjects",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#querysubject"},(0,r.kt)("code",null,"[QuerySubject!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Subjects for the query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"queryproperties"},"QueryProperties"),(0,r.kt)("p",null,"Properties about an individual Query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"statement",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#querystatement"},(0,r.kt)("code",null,"QueryStatement!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Query statement itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"source",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#querysource"},(0,r.kt)("code",null,"QuerySource!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The source of the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An Audit Stamp corresponding to the creation of this resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An Audit Stamp corresponding to the update of this resource"))))),(0,r.kt)("h2",{id:"querystatement"},"QueryStatement"),(0,r.kt)("p",null,"An individual Query Statement"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query statement value"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"language",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#querylanguage"},(0,r.kt)("code",null,"QueryLanguage!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The language for the Query Statement"))))),(0,r.kt)("h2",{id:"querysubject"},"QuerySubject"),(0,r.kt)("p",null,"The subject for a Query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataset",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"Dataset!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The dataset which is the subject of the Query"))))),(0,r.kt)("h2",{id:"quickfilter"},"QuickFilter"),(0,r.kt)("p",null,"A quick filter in search and auto-complete"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of field to filter by"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Value to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity that the value maps to if any"))))),(0,r.kt)("h2",{id:"rawaspect"},"RawAspect"),(0,r.kt)("p",null,"Payload representing data about a single aspect"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"aspectName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the aspect"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"payload",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"JSON string containing the aspect's payload"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"renderSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#aspectrenderspec"},(0,r.kt)("code",null,"AspectRenderSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Details for the frontend on how the raw aspect should be rendered"))))),(0,r.kt)("h2",{id:"recommendationcontent"},"RecommendationContent"),(0,r.kt)("p",null,"Content to display within each recommendation module"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"String representation of content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity being recommended. Empty if the content being recommended is not an entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"params",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#recommendationparams"},(0,r.kt)("code",null,"RecommendationParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional context required to generate the the recommendation"))))),(0,r.kt)("h2",{id:"recommendationmodule"},"RecommendationModule"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the module to display"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"moduleId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id of the module being recommended"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"renderType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#recommendationrendertype"},(0,r.kt)("code",null,"RecommendationRenderType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Type of rendering that defines how the module should be rendered"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"content",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#recommendationcontent"},(0,r.kt)("code",null,"[RecommendationContent!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of content to display inside the module"))))),(0,r.kt)("h2",{id:"recommendationparams"},"RecommendationParams"),(0,r.kt)("p",null,"Parameters required to render a recommendation of a given type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchparams"},(0,r.kt)("code",null,"SearchParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context to define the search recommendations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityProfileParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprofileparams"},(0,r.kt)("code",null,"EntityProfileParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context to define the entity profile page"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"contentParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#contentparams"},(0,r.kt)("code",null,"ContentParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context about the recommendation"))))),(0,r.kt)("h2",{id:"resettoken"},"ResetToken"),(0,r.kt)("p",null,"Token that allows native users to reset their credentials"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resetToken",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The reset token"))))),(0,r.kt)("h2",{id:"resourcefilter"},"ResourceFilter"),(0,r.kt)("p",null,"The resources that a DataHub Access Policy applies to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the resource the policy should apply to Not required because in the future we want to support filtering by type OR by domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of specific resource urns to apply the filter to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allResources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether of not to apply the filter to all resources of the type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policymatchfilter"},(0,r.kt)("code",null,"PolicyMatchFilter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether of not to apply the filter to all resources of the type"))))),(0,r.kt)("h2",{id:"resourceprivileges"},"ResourcePrivileges"),(0,r.kt)("p",null,"A privilege associated with a particular resource type\nA resource is most commonly a DataHub Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Resource type associated with the Access Privilege, eg dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceTypeDisplayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name to used for displaying the resourceType"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional entity type to use when performing search and navigation to the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#privilege"},(0,r.kt)("code",null,"[Privilege!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of privileges that are supported against this resource"))))),(0,r.kt)("h2",{id:"role"},"Role"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A primary key of the Metadata Entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of relationships between the source Entity and some destination entities with a given types"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the Role"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#roleproperties"},(0,r.kt)("code",null,"RoleProperties!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Role properties to include Request Access Url"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#actor"},(0,r.kt)("code",null,"Actor!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))))),(0,r.kt)("h2",{id:"roleassociation"},"RoleAssociation"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"role",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#role"},(0,r.kt)("code",null,"Role!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Role entity itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"roleproperties"},"RoleProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the Role in an organisation "))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description about the role"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Role type can be READ, WRITE or ADMIN"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"requestUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Url to request a role for a user in an organisation"))))),(0,r.kt)("h2",{id:"roleuser"},"RoleUser"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"user",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Linked corp user of a role"))))),(0,r.kt)("h2",{id:"row"},"Row"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cells",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#cell"},(0,r.kt)("code",null,"[Cell!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"schema"},"Schema"),(0,r.kt)("p",null,"Deprecated, use SchemaMetadata instead\nMetadata about a Dataset schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Dataset this schema metadata is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Platform this schema metadata is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The version of the GMS Schema metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cluster",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The cluster this schema metadata is derived from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hash",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The SHA1 hash of the schema content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#platformschema"},(0,r.kt)("code",null,"PlatformSchema"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native schema in the datasets platform, schemaless if it was not provided"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafield"},(0,r.kt)("code",null,"[SchemaField!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided a list of fields from value schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided list of fields that define primary keys to access record"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"foreignKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#foreignkeyconstraint"},(0,r.kt)("code",null,"[ForeignKeyConstraint]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided list of foreign key constraints"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the schema metadata information was created"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastObserved",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the schema metadata information was last ingested"))))),(0,r.kt)("h2",{id:"schemafield"},"SchemaField"),(0,r.kt)("p",null,"Information about an individual field in a Dataset schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of the field computed from jsonPath field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"jsonPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of a field in JSON Path notation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"label",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Human readable label for the field. Not supplied by all data sources"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nullable",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Indicates if this field is optional or nullable"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#schemafielddatatype"},(0,r.kt)("code",null,"SchemaFieldDataType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Platform independent field type of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeDataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native type of the field in the datasets platform as declared by platform schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"recursive",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the field references its own type recursively"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead Tags associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Glossary terms associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"isPartOfKey",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the field is part of a key schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"isPartitioningKey",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the field is part of a partitioning key schema"))))),(0,r.kt)("h2",{id:"schemafieldblame"},"SchemaFieldBlame"),(0,r.kt)("p",null,"Blame for a single field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of a schema field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaFieldChange",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldchange"},(0,r.kt)("code",null,"SchemaFieldChange!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Attributes identifying a field change"))))),(0,r.kt)("h2",{id:"schemafieldchange"},"SchemaFieldChange"),(0,r.kt)("p",null,"Attributes identifying a field change"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the schema was updated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastSemanticVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The last semantic version that this schema was changed in"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"versionStamp",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Version stamp of the change"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#changeoperationtype"},(0,r.kt)("code",null,"ChangeOperationType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the change"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastSchemaFieldChange",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Last column update, such as Added/Modified/Removed in v1.2.3."))))),(0,r.kt)("h2",{id:"schemafieldentity"},"SchemaFieldEntity"),(0,r.kt)("p",null,"Standalone schema field entity. Differs from the SchemaField struct because it is not directly nested inside a\nschema field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Primary key of the schema field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Field path identifying the field in its dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parent",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The field's parent."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"schemafieldref"},"SchemaFieldRef"),(0,r.kt)("p",null,"A Dataset schema field (i.e. column)"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A schema field urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A schema field path"))))),(0,r.kt)("h2",{id:"schemametadata"},"SchemaMetadata"),(0,r.kt)("p",null,"Metadata about a Dataset schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#aspect"},"Aspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"aspectVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The logical version of the schema metadata, where zero represents the latest version with otherwise monotonic ordering starting at one"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Dataset this schema metadata is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Platform this schema metadata is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The version of the GMS Schema metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cluster",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The cluster this schema metadata is derived from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hash",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The SHA1 hash of the schema content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#platformschema"},(0,r.kt)("code",null,"PlatformSchema"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native schema in the datasets platform, schemaless if it was not provided"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafield"},(0,r.kt)("code",null,"[SchemaField!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided a list of fields from value schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided list of fields that define primary keys to access record"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"foreignKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#foreignkeyconstraint"},(0,r.kt)("code",null,"[ForeignKeyConstraint]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided list of foreign key constraints"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the schema metadata information was created"))))),(0,r.kt)("h2",{id:"scrollacrosslineageresults"},"ScrollAcrossLineageResults"),(0,r.kt)("p",null,"Results returned by issueing a search across relationships query using scroll API"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"nextScrollId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Opaque ID to pass to the next request to the server"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities included in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of search results matching the query and filters"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchacrosslineageresult"},(0,r.kt)("code",null,"[SearchAcrossLineageResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search result entities"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))))),(0,r.kt)("h2",{id:"scrollresults"},"ScrollResults"),(0,r.kt)("p",null,"Results returned by issuing a search query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"nextScrollId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Opaque ID to pass to the next request to the server"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities included in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of search results matching the query and filters"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresult"},(0,r.kt)("code",null,"[SearchResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search result entities for a scroll request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))))),(0,r.kt)("h2",{id:"searchacrosslineageresult"},"SearchAcrossLineageResult"),(0,r.kt)("p",null,"Individual search result from a search across relationships query (has added metadata about the path)"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resolved DataHub Metadata Entity matching the search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"insights",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchinsight"},(0,r.kt)("code",null,"[SearchInsight!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Insights about why the search result was matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"matchedFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#matchedfield"},(0,r.kt)("code",null,"[MatchedField!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Matched field hint"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"paths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitypath"},(0,r.kt)("code",null,"[EntityPath]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional list of entities between the source and destination node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"degree",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Degree of relationship (number of hops to get to entity)"))))),(0,r.kt)("h2",{id:"searchacrosslineageresults"},"SearchAcrossLineageResults"),(0,r.kt)("p",null,"Results returned by issueing a search across relationships query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities included in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of search results matching the query and filters"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchacrosslineageresult"},(0,r.kt)("code",null,"[SearchAcrossLineageResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search result entities"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"freshness",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#freshnessstats"},(0,r.kt)("code",null,"FreshnessStats"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional freshness characteristics of this query (cached, staleness etc.)"))))),(0,r.kt)("h2",{id:"searchinsight"},"SearchInsight"),(0,r.kt)("p",null,"Insights about why a search result was returned or ranked in the way that it was"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"text",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The insight to display"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"icon",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional emoji to display in front of the text"))))),(0,r.kt)("h2",{id:"searchparams"},"SearchParams"),(0,r.kt)("p",null,"Context to define the search recommendations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetfilter"},(0,r.kt)("code",null,"[FacetFilter!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Filters"))))),(0,r.kt)("h2",{id:"searchresult"},"SearchResult"),(0,r.kt)("p",null,"An individual search result hit"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resolved DataHub Metadata Entity matching the search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"insights",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchinsight"},(0,r.kt)("code",null,"[SearchInsight!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Insights about why the search result was matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"matchedFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#matchedfield"},(0,r.kt)("code",null,"[MatchedField!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Matched field hint"))))),(0,r.kt)("h2",{id:"searchresults"},"SearchResults"),(0,r.kt)("p",null,"Results returned by issuing a search query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities included in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of search results matching the query and filters"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresult"},(0,r.kt)("code",null,"[SearchResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search result entities"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))))),(0,r.kt)("h2",{id:"searchresultsvisualconfig"},"SearchResultsVisualConfig"),(0,r.kt)("p",null,"Configuration for a search result"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enableNameHighlight",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether a search result should highlight the name/description if it was matched on those fields."))))),(0,r.kt)("h2",{id:"secret"},"Secret"),(0,r.kt)("p",null,"A referencible secret stored in DataHub's system. Notice that we do not return the actual secret value."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the secret"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the secret"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the secret"))))),(0,r.kt)("h2",{id:"secretvalue"},"SecretValue"),(0,r.kt)("p",null,"A plaintext secret value"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the secret"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The plaintext value of the secret."))))),(0,r.kt)("h2",{id:"semanticversionstruct"},"SemanticVersionStruct"),(0,r.kt)("p",null,"Properties identify a semantic version"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"semanticVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Semantic version of the change"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"semanticVersionTimestamp",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Semantic version timestamp"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"versionStamp",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Version stamp of the change"))))),(0,r.kt)("h2",{id:"siblingproperties"},"SiblingProperties"),(0,r.kt)("p",null,"Metadata about the entity's siblings"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"isPrimary",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"If this entity is the primary sibling among the sibling set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"siblings",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The sibling entities"))))),(0,r.kt)("h2",{id:"sourcecode"},"SourceCode"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#sourcecodeurl"},(0,r.kt)("code",null,"[SourceCodeUrl!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Code along with types"))))),(0,r.kt)("h2",{id:"sourcecodeurl"},"SourceCodeUrl"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#sourcecodeurltype"},(0,r.kt)("code",null,"SourceCodeUrlType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Code Url Types"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceCodeUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Code Url"))))),(0,r.kt)("h2",{id:"status"},"Status"),(0,r.kt)("p",null,"The status of a particular Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"removed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the entity is removed or not"))))),(0,r.kt)("h2",{id:"stepstateresult"},"StepStateResult"),(0,r.kt)("p",null,"A single step state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id of the step"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The properties for the step state"))))),(0,r.kt)("h2",{id:"stringbox"},"StringBox"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"stringValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"stringmapentry"},"StringMapEntry"),(0,r.kt)("p",null,"An entry in a string string map represented as a tuple"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The key of the map entry"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value fo the map entry"))))),(0,r.kt)("h2",{id:"structuredreport"},"StructuredReport"),(0,r.kt)("p",null,"A flexible carrier for structured results of an execution request."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the structured report. (e.g. INGESTION_REPORT, TEST_CONNECTION_REPORT, etc.)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"serializedValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The serialized value of the structured report"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"contentType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content-type of the serialized value (e.g. application/json, application/json;gzip etc.)"))))),(0,r.kt)("h2",{id:"subtypes"},"SubTypes"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"typeNames",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,'The sub-types that this entity implements. e.g. Datasets that are views will implement the "view" subtype'))))),(0,r.kt)("h2",{id:"systemfreshness"},"SystemFreshness"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"systemName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the system"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"freshnessMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The latest timestamp in millis of the system that was used to respond to this query In case a cache was consulted, this reflects the freshness of the cache In case an index was consulted, this reflects the freshness of the index"))))),(0,r.kt)("h2",{id:"tablechart"},"TableChart"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"columns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rows",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#row"},(0,r.kt)("code",null,"[Row!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"tableschema"},"TableSchema"),(0,r.kt)("p",null,"Information about a raw Table Schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"schema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw table schema"))))),(0,r.kt)("h2",{id:"tag"},"Tag"),(0,r.kt)("p",null,"A Tag Entity, which can be associated with other Metadata Entities and subresources"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the TAG"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"A unique identifier for the Tag. Deprecated - Use properties.name field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#tagproperties"},(0,r.kt)("code",null,"TagProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional properties about the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#editabletagproperties"},(0,r.kt)("code",null,"EditableTagProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Additional read write properties about the Tag Deprecated! Use 'properties' field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties.description field instead"))))),(0,r.kt)("h2",{id:"tagassociation"},"TagAssociation"),(0,r.kt)("p",null,"An edge between a Metadata Entity and a Tag Modeled as a struct to permit\nadditional attributes\nTODO Consider whether this query should be serviced by the relationships field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tag",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#tag"},(0,r.kt)("code",null,"Tag!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tag itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"tagproperties"},"TagProperties"),(0,r.kt)("p",null,"Properties for a DataHub Tag"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A display name for the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A description of the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"colorHex",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional RGB hex code for a Tag color, e.g. #FFFFFF"))))),(0,r.kt)("h2",{id:"telemetryconfig"},"TelemetryConfig"),(0,r.kt)("p",null,"Configurations related to tracking users in the app"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enableThirdPartyLogging",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Env variable for whether or not third party logging should be enabled for this instance"))))),(0,r.kt)("h2",{id:"test"},"Test"),(0,r.kt)("p",null,"A metadata entity representing a DataHub Test"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Test itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"category",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The category of the Test (user defined)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testdefinition"},(0,r.kt)("code",null,"TestDefinition!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Definition for the test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unused for tests"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"testdefinition"},"TestDefinition"),(0,r.kt)("p",null,"Definition of the test"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"json",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"JSON-based def for the test"))))),(0,r.kt)("h2",{id:"testresult"},"TestResult"),(0,r.kt)("p",null,"The result of running a test"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"test",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#test"},(0,r.kt)("code",null,"Test"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The test itself, or null if the test has been deleted"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#testresulttype"},(0,r.kt)("code",null,"TestResultType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The final result, e.g. either SUCCESS or FAILURE."))))),(0,r.kt)("h2",{id:"testresults"},"TestResults"),(0,r.kt)("p",null,"A set of test results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"passing",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testresult"},(0,r.kt)("code",null,"[TestResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tests passing"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"failing",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testresult"},(0,r.kt)("code",null,"[TestResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tests failing"))))),(0,r.kt)("h2",{id:"testsconfig"},"TestsConfig"),(0,r.kt)("p",null,"Configurations related to DataHub Tests feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether Tests feature is enabled"))))),(0,r.kt)("h2",{id:"textcell"},"TextCell"),(0,r.kt)("p",null,"A Notebook cell which contains text as content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellTitle",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the cell"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id for the cell."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeAuditStamps",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#changeauditstamps"},(0,r.kt)("code",null,"ChangeAuditStamps"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who created/last modified/deleted this TextCell and when"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"text",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actual text in a TextCell in a Notebook"))))),(0,r.kt)("h2",{id:"timeserieschart"},"TimeSeriesChart"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lines",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#namedline"},(0,r.kt)("code",null,"[NamedLine!]!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dateRange",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#daterange"},(0,r.kt)("code",null,"DateRange!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"interval",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#dateinterval"},(0,r.kt)("code",null,"DateInterval!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"timewindow"},"TimeWindow"),(0,r.kt)("p",null,"A time window with a finite start and end time"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The start time of the time window"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"durationMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The end time of the time window"))))),(0,r.kt)("h2",{id:"updatestepstateresult"},"UpdateStepStateResult"),(0,r.kt)("p",null,"Result returned when fetching step state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the step"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"succeeded",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the update succeeded."))))),(0,r.kt)("h2",{id:"upstreamentityrelationships"},"UpstreamEntityRelationships"),(0,r.kt)("p",null,"Deprecated, use relationships query instead"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshiplegacy"},(0,r.kt)("code",null,"[EntityRelationshipLegacy]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"usageaggregation"},"UsageAggregation"),(0,r.kt)("p",null,"An aggregation of Dataset usage statistics"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"bucket",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time window start time"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"duration",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#windowduration"},(0,r.kt)("code",null,"WindowDuration"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time window span"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resource urn associated with the usage information, eg a Dataset urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#usageaggregationmetrics"},(0,r.kt)("code",null,"UsageAggregationMetrics"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The rolled up usage metrics"))))),(0,r.kt)("h2",{id:"usageaggregationmetrics"},"UsageAggregationMetrics"),(0,r.kt)("p",null,"Rolled up metrics about Dataset usage over time"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique number of users who have queried the dataset within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#userusagecounts"},(0,r.kt)("code",null,"[UserUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Usage statistics within the time range by user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"totalSqlQueries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of queries issued against the dataset within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"topSqlQueries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of common queries issued against the dataset within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#fieldusagecounts"},(0,r.kt)("code",null,"[FieldUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Per field usage statistics within the time range"))))),(0,r.kt)("h2",{id:"usagequeryresult"},"UsageQueryResult"),(0,r.kt)("p",null,"The result of a Dataset usage query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"buckets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#usageaggregation"},(0,r.kt)("code",null,"[UsageAggregation]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of relevant time windows for use in displaying usage statistics"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aggregations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#usagequeryresultaggregations"},(0,r.kt)("code",null,"UsageQueryResultAggregations"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of rolled up aggregations about the Dataset usage"))))),(0,r.kt)("h2",{id:"usagequeryresultaggregations"},"UsageQueryResultAggregations"),(0,r.kt)("p",null,"A set of rolled up aggregations about the Dataset usage"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The count of unique Dataset users within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#userusagecounts"},(0,r.kt)("code",null,"[UserUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific per user usage counts within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#fieldusagecounts"},(0,r.kt)("code",null,"[FieldUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific per field usage counts within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"totalSqlQueries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of queries executed within the queried time range Note that this field will likely be deprecated in favor of a totalQueries field"))))),(0,r.kt)("h2",{id:"userusagecounts"},"UserUsageCounts"),(0,r.kt)("p",null,"Information about individual user usage of a Dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"user",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of queries issued by the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userEmail",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The extracted user email Note that this field will soon be deprecated and merged with user"))))),(0,r.kt)("h2",{id:"versioneddataset"},"VersionedDataset"),(0,r.kt)("p",null,"A Dataset entity, which encompasses Relational Tables, Document store collections, streaming topics, and other sets of data having an independent lifecycle"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the dataset is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parent container in which the entity resides"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique guid for dataset No longer to be used as the Dataset display name. Use properties.name instead"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetproperties"},(0,r.kt)("code",null,"DatasetProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of read only properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataseteditableproperties"},(0,r.kt)("code",null,"DatasetEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#editableschemametadata"},(0,r.kt)("code",null,"EditableSchemaMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Editable schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"health",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#health"},(0,r.kt)("code",null,"[Health!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental! The resolved health status of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schema"},(0,r.kt)("code",null,"Schema"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Sub Types that this entity implements"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#viewproperties"},(0,r.kt)("code",null,"ViewProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"View related properties. Only relevant if subtypes field contains view."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, see the properties field instead Environment in which the dataset belongs to or where it was generated Note that this field will soon be deprecated in favor of a more standardized concept of Environment"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"No-op, has to be included due to model"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"versiontag"},"VersionTag"),(0,r.kt)("p",null,"The technical version associated with a given Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"versionTag",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"viewproperties"},"ViewProperties"),(0,r.kt)("p",null,"Properties about a Dataset of type view"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"materialized",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the view is materialized or not"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"logic",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The logic associated with the view, most commonly a SQL statement"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"language",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The language in which the view logic is written, for example SQL"))))),(0,r.kt)("h2",{id:"viewsconfig"},"ViewsConfig"),(0,r.kt)("p",null,"Configurations related to DataHub Views feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether Views feature is enabled"))))),(0,r.kt)("h2",{id:"visualconfig"},"VisualConfig"),(0,r.kt)("p",null,"Configurations related to visual appearance of the app"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"logoUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom logo url for the homepage & top banner"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"faviconUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom favicon url for the homepage & top banner"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"queriesTab",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#queriestabconfig"},(0,r.kt)("code",null,"QueriesTabConfig"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configuration for the queries tab"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityProfiles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprofilesconfig"},(0,r.kt)("code",null,"EntityProfilesConfig"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configuration for the queries tab"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResult",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresultsvisualconfig"},(0,r.kt)("code",null,"SearchResultsVisualConfig"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configuration for search results"))))))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/2ae2f03c.9d9e083e.js b/assets/js/2ae2f03c.9d9e083e.js new file mode 100644 index 0000000000000..9b3b8f049ffa6 --- /dev/null +++ b/assets/js/2ae2f03c.9d9e083e.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkdocs_website=self.webpackChunkdocs_website||[]).push([[135],{3905:(t,l,e)=>{e.d(l,{Zo:()=>i,kt:()=>c});var n=e(67294);function r(t,l,e){return l in t?Object.defineProperty(t,l,{value:e,enumerable:!0,configurable:!0,writable:!0}):t[l]=e,t}function a(t,l){var e=Object.keys(t);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(t);l&&(n=n.filter((function(l){return Object.getOwnPropertyDescriptor(t,l).enumerable}))),e.push.apply(e,n)}return e}function u(t){for(var l=1;l=0||(r[e]=t[e]);return r}(t,l);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(t);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(t,e)&&(r[e]=t[e])}return r}var k=n.createContext({}),o=function(t){var l=n.useContext(k),e=l;return t&&(e="function"==typeof t?t(l):u(u({},l),t)),e},i=function(t){var l=o(t.components);return n.createElement(k.Provider,{value:l},t.children)},d={inlineCode:"code",wrapper:function(t){var l=t.children;return n.createElement(n.Fragment,{},l)}},h=n.forwardRef((function(t,l){var e=t.components,r=t.mdxType,a=t.originalType,k=t.parentName,i=s(t,["components","mdxType","originalType","parentName"]),h=o(e),c=r,p=h["".concat(k,".").concat(c)]||h[c]||d[c]||a;return e?n.createElement(p,u(u({ref:l},i),{},{components:e})):n.createElement(p,u({ref:l},i))}));function c(t,l){var e=arguments,r=l&&l.mdxType;if("string"==typeof t||r){var a=e.length,u=new Array(a);u[0]=h;var s={};for(var k in l)hasOwnProperty.call(l,k)&&(s[k]=l[k]);s.originalType=t,s.mdxType="string"==typeof t?t:r,u[1]=s;for(var o=2;o{e.r(l),e.d(l,{assets:()=>k,contentTitle:()=>u,default:()=>d,frontMatter:()=>a,metadata:()=>s,toc:()=>o});var n=e(83117),r=(e(67294),e(3905));const a={id:"objects",title:"Objects",slug:"objects",sidebar_position:3},u=void 0,s={unversionedId:"graphql/objects",id:"graphql/objects",title:"Objects",description:"Access",source:"@site/genDocs/graphql/objects.md",sourceDirName:"graphql",slug:"/graphql/objects",permalink:"/docs/graphql/objects",draft:!1,editUrl:"https://github.com/datahub-project/datahub/blob/master/genDocs/graphql/objects.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{id:"objects",title:"Objects",slug:"objects",sidebar_position:3},sidebar:"overviewSidebar",previous:{title:"Mutations",permalink:"/docs/graphql/mutations"},next:{title:"Input objects",permalink:"/docs/graphql/inputObjects"}},k={},o=[{value:"Access",id:"access",level:2},{value:"AccessToken",id:"accesstoken",level:2},{value:"AccessTokenMetadata",id:"accesstokenmetadata",level:2},{value:"Actor",id:"actor",level:2},{value:"ActorFilter",id:"actorfilter",level:2},{value:"AggregateResults",id:"aggregateresults",level:2},{value:"AggregationMetadata",id:"aggregationmetadata",level:2},{value:"AnalyticsChartGroup",id:"analyticschartgroup",level:2},{value:"AnalyticsConfig",id:"analyticsconfig",level:2},{value:"AppConfig",id:"appconfig",level:2},{value:"AspectRenderSpec",id:"aspectrenderspec",level:2},{value:"Assertion",id:"assertion",level:2},{value:"AssertionInfo",id:"assertioninfo",level:2},{value:"AssertionResult",id:"assertionresult",level:2},{value:"AssertionRunEvent",id:"assertionrunevent",level:2},{value:"AssertionRunEventsResult",id:"assertionruneventsresult",level:2},{value:"AssertionStdParameter",id:"assertionstdparameter",level:2},{value:"AssertionStdParameters",id:"assertionstdparameters",level:2},{value:"AuditStamp",id:"auditstamp",level:2},{value:"AuthConfig",id:"authconfig",level:2},{value:"AuthenticatedUser",id:"authenticateduser",level:2},{value:"AutoCompleteMultipleResults",id:"autocompletemultipleresults",level:2},{value:"AutoCompleteResultForEntity",id:"autocompleteresultforentity",level:2},{value:"AutoCompleteResults",id:"autocompleteresults",level:2},{value:"BarChart",id:"barchart",level:2},{value:"BarSegment",id:"barsegment",level:2},{value:"BaseData",id:"basedata",level:2},{value:"BatchGetStepStatesResult",id:"batchgetstepstatesresult",level:2},{value:"BatchSpec",id:"batchspec",level:2},{value:"BatchUpdateStepStatesResult",id:"batchupdatestepstatesresult",level:2},{value:"BooleanBox",id:"booleanbox",level:2},{value:"BrowsePath",id:"browsepath",level:2},{value:"BrowsePathEntry",id:"browsepathentry",level:2},{value:"BrowsePathV2",id:"browsepathv2",level:2},{value:"BrowseResultGroup",id:"browseresultgroup",level:2},{value:"BrowseResultGroupV2",id:"browseresultgroupv2",level:2},{value:"BrowseResultMetadata",id:"browseresultmetadata",level:2},{value:"BrowseResults",id:"browseresults",level:2},{value:"BrowseResultsV2",id:"browseresultsv2",level:2},{value:"CaveatDetails",id:"caveatdetails",level:2},{value:"CaveatsAndRecommendations",id:"caveatsandrecommendations",level:2},{value:"Cell",id:"cell",level:2},{value:"ChangeAuditStamps",id:"changeauditstamps",level:2},{value:"Chart",id:"chart",level:2},{value:"ChartCell",id:"chartcell",level:2},{value:"ChartEditableProperties",id:"charteditableproperties",level:2},{value:"ChartInfo",id:"chartinfo",level:2},{value:"ChartProperties",id:"chartproperties",level:2},{value:"ChartQuery",id:"chartquery",level:2},{value:"ChartStatsSummary",id:"chartstatssummary",level:2},{value:"Container",id:"container",level:2},{value:"ContainerEditableProperties",id:"containereditableproperties",level:2},{value:"ContainerProperties",id:"containerproperties",level:2},{value:"ContentParams",id:"contentparams",level:2},{value:"CorpGroup",id:"corpgroup",level:2},{value:"CorpGroupEditableProperties",id:"corpgroupeditableproperties",level:2},{value:"CorpGroupInfo",id:"corpgroupinfo",level:2},{value:"CorpGroupProperties",id:"corpgroupproperties",level:2},{value:"CorpUser",id:"corpuser",level:2},{value:"CorpUserAppearanceSettings",id:"corpuserappearancesettings",level:2},{value:"CorpUserEditableInfo",id:"corpusereditableinfo",level:2},{value:"CorpUserEditableProperties",id:"corpusereditableproperties",level:2},{value:"CorpUserInfo",id:"corpuserinfo",level:2},{value:"CorpUserProperties",id:"corpuserproperties",level:2},{value:"CorpUserSettings",id:"corpusersettings",level:2},{value:"CorpUserViewsSettings",id:"corpuserviewssettings",level:2},{value:"Cost",id:"cost",level:2},{value:"CostValue",id:"costvalue",level:2},{value:"CustomPropertiesEntry",id:"custompropertiesentry",level:2},{value:"Dashboard",id:"dashboard",level:2},{value:"DashboardEditableProperties",id:"dashboardeditableproperties",level:2},{value:"DashboardInfo",id:"dashboardinfo",level:2},{value:"DashboardProperties",id:"dashboardproperties",level:2},{value:"DashboardStatsSummary",id:"dashboardstatssummary",level:2},{value:"DashboardUsageAggregation",id:"dashboardusageaggregation",level:2},{value:"DashboardUsageAggregationMetrics",id:"dashboardusageaggregationmetrics",level:2},{value:"DashboardUsageMetrics",id:"dashboardusagemetrics",level:2},{value:"DashboardUsageQueryResult",id:"dashboardusagequeryresult",level:2},{value:"DashboardUsageQueryResultAggregations",id:"dashboardusagequeryresultaggregations",level:2},{value:"DashboardUserUsageCounts",id:"dashboarduserusagecounts",level:2},{value:"DataFlow",id:"dataflow",level:2},{value:"DataFlowDataJobsRelationships",id:"dataflowdatajobsrelationships",level:2},{value:"DataFlowEditableProperties",id:"datafloweditableproperties",level:2},{value:"DataFlowInfo",id:"dataflowinfo",level:2},{value:"DataFlowProperties",id:"dataflowproperties",level:2},{value:"DataHubPolicy",id:"datahubpolicy",level:2},{value:"DataHubRole",id:"datahubrole",level:2},{value:"DataHubView",id:"datahubview",level:2},{value:"DataHubViewDefinition",id:"datahubviewdefinition",level:2},{value:"DataHubViewFilter",id:"datahubviewfilter",level:2},{value:"DataJob",id:"datajob",level:2},{value:"DataJobEditableProperties",id:"datajobeditableproperties",level:2},{value:"DataJobInfo",id:"datajobinfo",level:2},{value:"DataJobInputOutput",id:"datajobinputoutput",level:2},{value:"DataJobProperties",id:"datajobproperties",level:2},{value:"DataPlatform",id:"dataplatform",level:2},{value:"DataPlatformInfo",id:"dataplatforminfo",level:2},{value:"DataPlatformInstance",id:"dataplatforminstance",level:2},{value:"DataPlatformInstanceProperties",id:"dataplatforminstanceproperties",level:2},{value:"DataPlatformProperties",id:"dataplatformproperties",level:2},{value:"DataProcessInstance",id:"dataprocessinstance",level:2},{value:"DataProcessInstanceResult",id:"dataprocessinstanceresult",level:2},{value:"DataProcessInstanceRunResult",id:"dataprocessinstancerunresult",level:2},{value:"DataProcessRunEvent",id:"dataprocessrunevent",level:2},{value:"DataProduct",id:"dataproduct",level:2},{value:"DataProductProperties",id:"dataproductproperties",level:2},{value:"Dataset",id:"dataset",level:2},{value:"DatasetAssertionInfo",id:"datasetassertioninfo",level:2},{value:"DatasetDeprecation",id:"datasetdeprecation",level:2},{value:"DatasetEditableProperties",id:"dataseteditableproperties",level:2},{value:"DatasetFieldProfile",id:"datasetfieldprofile",level:2},{value:"DatasetProfile",id:"datasetprofile",level:2},{value:"DatasetProperties",id:"datasetproperties",level:2},{value:"DatasetStatsSummary",id:"datasetstatssummary",level:2},{value:"DateRange",id:"daterange",level:2},{value:"Deprecation",id:"deprecation",level:2},{value:"Domain",id:"domain",level:2},{value:"DomainAssociation",id:"domainassociation",level:2},{value:"DomainProperties",id:"domainproperties",level:2},{value:"DownstreamEntityRelationships",id:"downstreamentityrelationships",level:2},{value:"EditableSchemaFieldInfo",id:"editableschemafieldinfo",level:2},{value:"EditableSchemaMetadata",id:"editableschemametadata",level:2},{value:"EditableTagProperties",id:"editabletagproperties",level:2},{value:"Embed",id:"embed",level:2},{value:"EntityAssertionsResult",id:"entityassertionsresult",level:2},{value:"EntityCountResult",id:"entitycountresult",level:2},{value:"EntityCountResults",id:"entitycountresults",level:2},{value:"EntityLineageResult",id:"entitylineageresult",level:2},{value:"EntityPath",id:"entitypath",level:2},{value:"EntityPrivileges",id:"entityprivileges",level:2},{value:"EntityProfileConfig",id:"entityprofileconfig",level:2},{value:"EntityProfileParams",id:"entityprofileparams",level:2},{value:"EntityProfilesConfig",id:"entityprofilesconfig",level:2},{value:"EntityRelationship",id:"entityrelationship",level:2},{value:"EntityRelationshipLegacy",id:"entityrelationshiplegacy",level:2},{value:"EntityRelationshipsResult",id:"entityrelationshipsresult",level:2},{value:"EthicalConsiderations",id:"ethicalconsiderations",level:2},{value:"ExecutionRequest",id:"executionrequest",level:2},{value:"ExecutionRequestInput",id:"executionrequestinput",level:2},{value:"ExecutionRequestResult",id:"executionrequestresult",level:2},{value:"ExecutionRequestSource",id:"executionrequestsource",level:2},{value:"FacetFilter",id:"facetfilter",level:2},{value:"FacetMetadata",id:"facetmetadata",level:2},{value:"FeatureFlagsConfig",id:"featureflagsconfig",level:2},{value:"FieldUsageCounts",id:"fieldusagecounts",level:2},{value:"FineGrainedLineage",id:"finegrainedlineage",level:2},{value:"FloatBox",id:"floatbox",level:2},{value:"ForeignKeyConstraint",id:"foreignkeyconstraint",level:2},{value:"FreshnessStats",id:"freshnessstats",level:2},{value:"GetQuickFiltersResult",id:"getquickfiltersresult",level:2},{value:"GetRootGlossaryNodesResult",id:"getrootglossarynodesresult",level:2},{value:"GetRootGlossaryTermsResult",id:"getrootglossarytermsresult",level:2},{value:"GetSchemaBlameResult",id:"getschemablameresult",level:2},{value:"GetSchemaVersionListResult",id:"getschemaversionlistresult",level:2},{value:"GlobalTags",id:"globaltags",level:2},{value:"GlobalViewsSettings",id:"globalviewssettings",level:2},{value:"GlossaryNode",id:"glossarynode",level:2},{value:"GlossaryNodeProperties",id:"glossarynodeproperties",level:2},{value:"GlossaryTerm",id:"glossaryterm",level:2},{value:"GlossaryTermAssociation",id:"glossarytermassociation",level:2},{value:"GlossaryTermInfo",id:"glossaryterminfo",level:2},{value:"GlossaryTermProperties",id:"glossarytermproperties",level:2},{value:"GlossaryTerms",id:"glossaryterms",level:2},{value:"Health",id:"health",level:2},{value:"Highlight",id:"highlight",level:2},{value:"HyperParameterMap",id:"hyperparametermap",level:2},{value:"IdentityManagementConfig",id:"identitymanagementconfig",level:2},{value:"IngestionConfig",id:"ingestionconfig",level:2},{value:"IngestionRun",id:"ingestionrun",level:2},{value:"IngestionSchedule",id:"ingestionschedule",level:2},{value:"IngestionSource",id:"ingestionsource",level:2},{value:"IngestionSourceExecutionRequests",id:"ingestionsourceexecutionrequests",level:2},{value:"InputField",id:"inputfield",level:2},{value:"InputFields",id:"inputfields",level:2},{value:"InstitutionalMemory",id:"institutionalmemory",level:2},{value:"InstitutionalMemoryMetadata",id:"institutionalmemorymetadata",level:2},{value:"IntBox",id:"intbox",level:2},{value:"IntendedUse",id:"intendeduse",level:2},{value:"InviteToken",id:"invitetoken",level:2},{value:"KeyValueSchema",id:"keyvalueschema",level:2},{value:"LineageConfig",id:"lineageconfig",level:2},{value:"LineageRelationship",id:"lineagerelationship",level:2},{value:"LinkParams",id:"linkparams",level:2},{value:"ListAccessTokenResult",id:"listaccesstokenresult",level:2},{value:"ListDomainsResult",id:"listdomainsresult",level:2},{value:"ListGroupsResult",id:"listgroupsresult",level:2},{value:"ListIngestionSourcesResult",id:"listingestionsourcesresult",level:2},{value:"ListOwnershipTypesResult",id:"listownershiptypesresult",level:2},{value:"ListPoliciesResult",id:"listpoliciesresult",level:2},{value:"ListPostsResult",id:"listpostsresult",level:2},{value:"ListQueriesResult",id:"listqueriesresult",level:2},{value:"ListRecommendationsResult",id:"listrecommendationsresult",level:2},{value:"ListRolesResult",id:"listrolesresult",level:2},{value:"ListSecretsResult",id:"listsecretsresult",level:2},{value:"ListTestsResult",id:"listtestsresult",level:2},{value:"ListUsersResult",id:"listusersresult",level:2},{value:"ListViewsResult",id:"listviewsresult",level:2},{value:"ManagedIngestionConfig",id:"managedingestionconfig",level:2},{value:"MatchedField",id:"matchedfield",level:2},{value:"Media",id:"media",level:2},{value:"Metrics",id:"metrics",level:2},{value:"MLFeature",id:"mlfeature",level:2},{value:"MLFeatureEditableProperties",id:"mlfeatureeditableproperties",level:2},{value:"MLFeatureProperties",id:"mlfeatureproperties",level:2},{value:"MLFeatureTable",id:"mlfeaturetable",level:2},{value:"MLFeatureTableEditableProperties",id:"mlfeaturetableeditableproperties",level:2},{value:"MLFeatureTableProperties",id:"mlfeaturetableproperties",level:2},{value:"MLHyperParam",id:"mlhyperparam",level:2},{value:"MLMetric",id:"mlmetric",level:2},{value:"MLModel",id:"mlmodel",level:2},{value:"MLModelEditableProperties",id:"mlmodeleditableproperties",level:2},{value:"MLModelFactorPrompts",id:"mlmodelfactorprompts",level:2},{value:"MLModelFactors",id:"mlmodelfactors",level:2},{value:"MLModelGroup",id:"mlmodelgroup",level:2},{value:"MLModelGroupEditableProperties",id:"mlmodelgroupeditableproperties",level:2},{value:"MLModelGroupProperties",id:"mlmodelgroupproperties",level:2},{value:"MLModelProperties",id:"mlmodelproperties",level:2},{value:"MLPrimaryKey",id:"mlprimarykey",level:2},{value:"MLPrimaryKeyEditableProperties",id:"mlprimarykeyeditableproperties",level:2},{value:"MLPrimaryKeyProperties",id:"mlprimarykeyproperties",level:2},{value:"NamedBar",id:"namedbar",level:2},{value:"NamedLine",id:"namedline",level:2},{value:"Notebook",id:"notebook",level:2},{value:"NotebookCell",id:"notebookcell",level:2},{value:"NotebookContent",id:"notebookcontent",level:2},{value:"NotebookEditableProperties",id:"notebookeditableproperties",level:2},{value:"NotebookInfo",id:"notebookinfo",level:2},{value:"NumericDataPoint",id:"numericdatapoint",level:2},{value:"Operation",id:"operation",level:2},{value:"Origin",id:"origin",level:2},{value:"Owner",id:"owner",level:2},{value:"Ownership",id:"ownership",level:2},{value:"OwnershipSource",id:"ownershipsource",level:2},{value:"OwnershipTypeEntity",id:"ownershiptypeentity",level:2},{value:"OwnershipTypeInfo",id:"ownershiptypeinfo",level:2},{value:"ParentContainersResult",id:"parentcontainersresult",level:2},{value:"ParentNodesResult",id:"parentnodesresult",level:2},{value:"PartitionSpec",id:"partitionspec",level:2},{value:"PlatformPrivileges",id:"platformprivileges",level:2},{value:"PoliciesConfig",id:"policiesconfig",level:2},{value:"Policy",id:"policy",level:2},{value:"PolicyMatchCriterion",id:"policymatchcriterion",level:2},{value:"PolicyMatchCriterionValue",id:"policymatchcriterionvalue",level:2},{value:"PolicyMatchFilter",id:"policymatchfilter",level:2},{value:"Post",id:"post",level:2},{value:"PostContent",id:"postcontent",level:2},{value:"Privilege",id:"privilege",level:2},{value:"Privileges",id:"privileges",level:2},{value:"QuantitativeAnalyses",id:"quantitativeanalyses",level:2},{value:"QueriesTabConfig",id:"queriestabconfig",level:2},{value:"QueryCell",id:"querycell",level:2},{value:"QueryEntity",id:"queryentity",level:2},{value:"QueryProperties",id:"queryproperties",level:2},{value:"QueryStatement",id:"querystatement",level:2},{value:"QuerySubject",id:"querysubject",level:2},{value:"QuickFilter",id:"quickfilter",level:2},{value:"RawAspect",id:"rawaspect",level:2},{value:"RecommendationContent",id:"recommendationcontent",level:2},{value:"RecommendationModule",id:"recommendationmodule",level:2},{value:"RecommendationParams",id:"recommendationparams",level:2},{value:"ResetToken",id:"resettoken",level:2},{value:"ResourceFilter",id:"resourcefilter",level:2},{value:"ResourcePrivileges",id:"resourceprivileges",level:2},{value:"Role",id:"role",level:2},{value:"RoleAssociation",id:"roleassociation",level:2},{value:"RoleProperties",id:"roleproperties",level:2},{value:"RoleUser",id:"roleuser",level:2},{value:"Row",id:"row",level:2},{value:"Schema",id:"schema",level:2},{value:"SchemaField",id:"schemafield",level:2},{value:"SchemaFieldBlame",id:"schemafieldblame",level:2},{value:"SchemaFieldChange",id:"schemafieldchange",level:2},{value:"SchemaFieldEntity",id:"schemafieldentity",level:2},{value:"SchemaFieldRef",id:"schemafieldref",level:2},{value:"SchemaMetadata",id:"schemametadata",level:2},{value:"ScrollAcrossLineageResults",id:"scrollacrosslineageresults",level:2},{value:"ScrollResults",id:"scrollresults",level:2},{value:"SearchAcrossLineageResult",id:"searchacrosslineageresult",level:2},{value:"SearchAcrossLineageResults",id:"searchacrosslineageresults",level:2},{value:"SearchInsight",id:"searchinsight",level:2},{value:"SearchParams",id:"searchparams",level:2},{value:"SearchResult",id:"searchresult",level:2},{value:"SearchResults",id:"searchresults",level:2},{value:"SearchResultsVisualConfig",id:"searchresultsvisualconfig",level:2},{value:"SearchSuggestion",id:"searchsuggestion",level:2},{value:"Secret",id:"secret",level:2},{value:"SecretValue",id:"secretvalue",level:2},{value:"SemanticVersionStruct",id:"semanticversionstruct",level:2},{value:"SiblingProperties",id:"siblingproperties",level:2},{value:"SourceCode",id:"sourcecode",level:2},{value:"SourceCodeUrl",id:"sourcecodeurl",level:2},{value:"Status",id:"status",level:2},{value:"StepStateResult",id:"stepstateresult",level:2},{value:"StringBox",id:"stringbox",level:2},{value:"StringMapEntry",id:"stringmapentry",level:2},{value:"StructuredReport",id:"structuredreport",level:2},{value:"SubTypes",id:"subtypes",level:2},{value:"SystemFreshness",id:"systemfreshness",level:2},{value:"TableChart",id:"tablechart",level:2},{value:"TableSchema",id:"tableschema",level:2},{value:"Tag",id:"tag",level:2},{value:"TagAssociation",id:"tagassociation",level:2},{value:"TagProperties",id:"tagproperties",level:2},{value:"TelemetryConfig",id:"telemetryconfig",level:2},{value:"Test",id:"test",level:2},{value:"TestDefinition",id:"testdefinition",level:2},{value:"TestResult",id:"testresult",level:2},{value:"TestResults",id:"testresults",level:2},{value:"TestsConfig",id:"testsconfig",level:2},{value:"TextCell",id:"textcell",level:2},{value:"TimeSeriesChart",id:"timeserieschart",level:2},{value:"TimeWindow",id:"timewindow",level:2},{value:"UpdateStepStateResult",id:"updatestepstateresult",level:2},{value:"UpstreamEntityRelationships",id:"upstreamentityrelationships",level:2},{value:"UsageAggregation",id:"usageaggregation",level:2},{value:"UsageAggregationMetrics",id:"usageaggregationmetrics",level:2},{value:"UsageQueryResult",id:"usagequeryresult",level:2},{value:"UsageQueryResultAggregations",id:"usagequeryresultaggregations",level:2},{value:"UserUsageCounts",id:"userusagecounts",level:2},{value:"VersionedDataset",id:"versioneddataset",level:2},{value:"VersionTag",id:"versiontag",level:2},{value:"ViewProperties",id:"viewproperties",level:2},{value:"ViewsConfig",id:"viewsconfig",level:2},{value:"VisualConfig",id:"visualconfig",level:2}],i={toc:o};function d(t){let{components:l,...e}=t;return(0,r.kt)("wrapper",(0,n.Z)({},i,e,{components:l,mdxType:"MDXLayout"}),(0,r.kt)("h2",{id:"access"},"Access"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"roles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#roleassociation"},(0,r.kt)("code",null,"[RoleAssociation!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"accesstoken"},"AccessToken"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"accessToken",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The access token itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#accesstokenmetadata"},(0,r.kt)("code",null,"AccessTokenMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metadata about the generated token"))))),(0,r.kt)("h2",{id:"accesstokenmetadata"},"AccessTokenMetadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the access token"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique identifier of the token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the token, if it exists."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the token if defined."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actorUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor associated with the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownerUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor who created the Access Token."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when token was generated at."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"expiresAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Time when token will be expired."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"actor"},"Actor"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#roleuser"},(0,r.kt)("code",null,"[RoleUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of users for which the role is provisioned"))))),(0,r.kt)("h2",{id:"actorfilter"},"ActorFilter"),(0,r.kt)("p",null,"The actors that a DataHub Access Policy applies to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of users to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of groups to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"roles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A disjunctive set of roles to apply the policy to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceOwners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should return TRUE for owners of a particular resource Only applies to policies of type METADATA, which have a resource associated with them"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceOwnersTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set of OwnershipTypes to apply the policy to (if resourceOwners field is set to True)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resolvedOwnershipTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershiptypeentity"},(0,r.kt)("code",null,"[OwnershipTypeEntity!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Set of OwnershipTypes to apply the policy to (if resourceOwners field is set to True), resolved."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allUsers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should apply to all users"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allGroups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the filter should apply to all groups"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resolvedUsers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of users on the Policy, resolved."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resolvedGroups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroup"},(0,r.kt)("code",null,"[CorpGroup!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of groups on the Policy, resolved."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resolvedRoles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubrole"},(0,r.kt)("code",null,"[DataHubRole!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of roles on the Policy, resolved."))))),(0,r.kt)("h2",{id:"aggregateresults"},"AggregateResults"),(0,r.kt)("p",null,"Results returned from aggregateAcrossEntities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))))),(0,r.kt)("h2",{id:"aggregationmetadata"},"AggregationMetadata"),(0,r.kt)("p",null,"Information about the aggregation that can be used for filtering, included the field value and number of results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A particular value of a facet field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of search results containing the value"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity corresponding to the facet field"))))),(0,r.kt)("h2",{id:"analyticschartgroup"},"AnalyticsChartGroup"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"charts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#analyticschart"},(0,r.kt)("code",null,"[AnalyticsChart!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"analyticsconfig"},"AnalyticsConfig"),(0,r.kt)("p",null,"Configurations related to the Analytics Feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Analytics feature is enabled and should be displayed"))))),(0,r.kt)("h2",{id:"appconfig"},"AppConfig"),(0,r.kt)("p",null,"Config loaded at application boot time\nThis configuration dictates the behavior of the UI, such as which features are enabled or disabled"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"appVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"App version"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"authConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#authconfig"},(0,r.kt)("code",null,"AuthConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Auth-related configurations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"analyticsConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#analyticsconfig"},(0,r.kt)("code",null,"AnalyticsConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to the Analytics Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"policiesConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policiesconfig"},(0,r.kt)("code",null,"PoliciesConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to the Policies Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"identityManagementConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#identitymanagementconfig"},(0,r.kt)("code",null,"IdentityManagementConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to the User & Group management"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"managedIngestionConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#managedingestionconfig"},(0,r.kt)("code",null,"ManagedIngestionConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to UI-based ingestion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineageConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#lineageconfig"},(0,r.kt)("code",null,"LineageConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to Lineage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"visualConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#visualconfig"},(0,r.kt)("code",null,"VisualConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to visual appearance, allows styling the UI without rebuilding the bundle"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"telemetryConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#telemetryconfig"},(0,r.kt)("code",null,"TelemetryConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to tracking users in the app"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"testsConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testsconfig"},(0,r.kt)("code",null,"TestsConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to DataHub tests"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsConfig",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#viewsconfig"},(0,r.kt)("code",null,"ViewsConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configurations related to DataHub Views"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureFlags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#featureflagsconfig"},(0,r.kt)("code",null,"FeatureFlagsConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Feature flags telling the UI whether a feature is enabled or not"))))),(0,r.kt)("h2",{id:"aspectrenderspec"},"AspectRenderSpec"),(0,r.kt)("p",null,"Details for the frontend on how the raw aspect should be rendered"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Format the aspect should be displayed in for the UI. Powered by the renderSpec annotation on the aspect model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name to refer to the aspect type by for the UI. Powered by the renderSpec annotation on the aspect model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Field in the aspect payload to index into for rendering."))))),(0,r.kt)("h2",{id:"assertion"},"Assertion"),(0,r.kt)("p",null,"An assertion represents a programmatic validation, check, or test performed periodically against another Entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Assertion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the assertion is evaluated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertioninfo"},(0,r.kt)("code",null,"AssertionInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Details about assertion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runEvents",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionruneventsresult"},(0,r.kt)("code",null,"AssertionRunEventsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Lifecycle events detailing individual runs of this assertion. If startTimeMillis & endTimeMillis are not provided, the most recent events will be returned."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionrunstatus"},(0,r.kt)("code",null,"AssertionRunStatus"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#filterinput"},(0,r.kt)("code",null,"FilterInput"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"assertioninfo"},"AssertionInfo"),(0,r.kt)("p",null,"Type of assertion. Assertion types can evolve to span Datasets, Flows (Pipelines), Models, Features etc."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertiontype"},(0,r.kt)("code",null,"AssertionType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Top-level type of the assertion."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetAssertion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetassertioninfo"},(0,r.kt)("code",null,"DatasetAssertionInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Dataset-specific assertion information"))))),(0,r.kt)("h2",{id:"assertionresult"},"AssertionResult"),(0,r.kt)("p",null,"The result of evaluating an assertion."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionresulttype"},(0,r.kt)("code",null,"AssertionResultType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The final result, e.g. either SUCCESS or FAILURE."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rowCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of rows for evaluated batch"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"missingCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of rows with missing value for evaluated batch"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"unexpectedCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of rows with unexpected value for evaluated batch"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actualAggValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Observed aggregate value for evaluated batch"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"URL where full results are available"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native results / properties of evaluation"))))),(0,r.kt)("h2",{id:"assertionrunevent"},"AssertionRunEvent"),(0,r.kt)("p",null,"An event representing an event in the assertion evaluation lifecycle."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the assertion was evaluated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"assertionUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of assertion which is evaluated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"asserteeUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of entity on which the assertion is applicable"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native (platform-specific) identifier for this run"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionrunstatus"},(0,r.kt)("code",null,"AssertionRunStatus!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The status of the assertion run as per this timeseries event."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"batchSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#batchspec"},(0,r.kt)("code",null,"BatchSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Specification of the batch which this run is evaluating"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partitionSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#partitionspec"},(0,r.kt)("code",null,"PartitionSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Information about the partition that was evaluated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runtimeContext",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Runtime parameters of evaluation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"result",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionresult"},(0,r.kt)("code",null,"AssertionResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Results of assertion, present if the status is COMPLETE"))))),(0,r.kt)("h2",{id:"assertionruneventsresult"},"AssertionRunEventsResult"),(0,r.kt)("p",null,"Result returned when fetching run events for an assertion."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of run events returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"failed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of failed run events"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"succeeded",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of succeeded run events"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runEvents",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionrunevent"},(0,r.kt)("code",null,"[AssertionRunEvent!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The run events themselves"))))),(0,r.kt)("h2",{id:"assertionstdparameter"},"AssertionStdParameter"),(0,r.kt)("p",null,"Parameter for AssertionStdOperator."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parameter value"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionstdparametertype"},(0,r.kt)("code",null,"AssertionStdParameterType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the parameter"))))),(0,r.kt)("h2",{id:"assertionstdparameters"},"AssertionStdParameters"),(0,r.kt)("p",null,"Parameters for AssertionStdOperators"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionstdparameter"},(0,r.kt)("code",null,"AssertionStdParameter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value parameter of an assertion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"maxValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionstdparameter"},(0,r.kt)("code",null,"AssertionStdParameter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The maxValue parameter of an assertion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"minValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionstdparameter"},(0,r.kt)("code",null,"AssertionStdParameter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The minValue parameter of an assertion"))))),(0,r.kt)("h2",{id:"auditstamp"},"AuditStamp"),(0,r.kt)("p",null,"A time stamp along with an optional actor"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"time",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"When the audited action took place"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Who performed the audited action"))))),(0,r.kt)("h2",{id:"authconfig"},"AuthConfig"),(0,r.kt)("p",null,"Configurations related to auth"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tokenAuthEnabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether token-based auth is enabled."))))),(0,r.kt)("h2",{id:"authenticateduser"},"AuthenticatedUser"),(0,r.kt)("p",null,"Information about the currently authenticated user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"corpUser",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user information associated with the authenticated user, including properties used in rendering the profile"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformPrivileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#platformprivileges"},(0,r.kt)("code",null,"PlatformPrivileges!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The privileges assigned to the currently authenticated user, which dictates which parts of the UI they should be able to use"))))),(0,r.kt)("h2",{id:"autocompletemultipleresults"},"AutoCompleteMultipleResults"),(0,r.kt)("p",null,"The results returned on a multi entity autocomplete query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The raw query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"suggestions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#autocompleteresultforentity"},(0,r.kt)("code",null,"[AutoCompleteResultForEntity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The autocompletion suggestions"))))),(0,r.kt)("h2",{id:"autocompleteresultforentity"},"AutoCompleteResultForEntity"),(0,r.kt)("p",null,"An individual auto complete result specific to an individual Metadata Entity Type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"suggestions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The autocompletion results for specified entity type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of entities to render in autocomplete"))))),(0,r.kt)("h2",{id:"autocompleteresults"},"AutoCompleteResults"),(0,r.kt)("p",null,"The results returned on a single entity autocomplete query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query string"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"suggestions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The autocompletion results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of entities to render in autocomplete"))))),(0,r.kt)("h2",{id:"barchart"},"BarChart"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"bars",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#namedbar"},(0,r.kt)("code",null,"[NamedBar!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"barsegment"},"BarSegment"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"label",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"basedata"},"BaseData"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataset",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Dataset used for the Training or Evaluation of the MLModel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"motivation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Motivation to pick these datasets"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"preProcessing",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Details of Data Proprocessing"))))),(0,r.kt)("h2",{id:"batchgetstepstatesresult"},"BatchGetStepStatesResult"),(0,r.kt)("p",null,"Result returned when fetching step state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"results",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stepstateresult"},(0,r.kt)("code",null,"[StepStateResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The step states"))))),(0,r.kt)("h2",{id:"batchspec"},"BatchSpec"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeBatchId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native identifier as specified by the system operating on the batch."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A query that identifies a batch of data"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Any limit to the number of rows in the batch, if applied"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the Batch"))))),(0,r.kt)("h2",{id:"batchupdatestepstatesresult"},"BatchUpdateStepStatesResult"),(0,r.kt)("p",null,"Result returned when fetching step state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"results",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#updatestepstateresult"},(0,r.kt)("code",null,"[UpdateStepStateResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Results for each step"))))),(0,r.kt)("h2",{id:"booleanbox"},"BooleanBox"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"booleanValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"browsepath"},"BrowsePath"),(0,r.kt)("p",null,"A hierarchical entity path"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The components of the browse path"))))),(0,r.kt)("h2",{id:"browsepathentry"},"BrowsePathEntry"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The path name of a group of browse results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional entity associated with this browse entry. This will usually be a container entity. If this entity is not populated, the name must be used."))))),(0,r.kt)("h2",{id:"browsepathv2"},"BrowsePathV2"),(0,r.kt)("p",null,"A hierarchical entity path V2"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathentry"},(0,r.kt)("code",null,"[BrowsePathEntry!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The components of the browse path"))))),(0,r.kt)("h2",{id:"browseresultgroup"},"BrowseResultGroup"),(0,r.kt)("p",null,"A group of Entities under a given browse path"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The path name of a group of browse results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities within the group"))))),(0,r.kt)("h2",{id:"browseresultgroupv2"},"BrowseResultGroupV2"),(0,r.kt)("p",null,"A group of Entities under a given browse path"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The path name of a group of browse results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional entity associated with this browse group. This will usually be a container entity. If this entity is not populated, the name must be used."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities within the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hasSubGroups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not there are any more groups underneath this group"))))),(0,r.kt)("h2",{id:"browseresultmetadata"},"BrowseResultMetadata"),(0,r.kt)("p",null,"Metadata about the Browse Paths response"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The provided path"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"totalNumEntities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of entities under the provided browse path"))))),(0,r.kt)("h2",{id:"browseresults"},"BrowseResults"),(0,r.kt)("p",null,"The results of a browse path traversal query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browseresultgroup"},(0,r.kt)("code",null,"[BrowseResultGroup!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The groups present at the provided browse path"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of elements included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of browse results under the path with filters applied"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browseresultmetadata"},(0,r.kt)("code",null,"BrowseResultMetadata!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metadata containing resulting browse groups"))))),(0,r.kt)("h2",{id:"browseresultsv2"},"BrowseResultsV2"),(0,r.kt)("p",null,"The results of a browse path V2 traversal query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browseresultgroupv2"},(0,r.kt)("code",null,"[BrowseResultGroupV2!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The groups present at the provided browse path V2"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting point of paginated results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of groups included in the results"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of browse groups under the path with filters applied"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browseresultmetadata"},(0,r.kt)("code",null,"BrowseResultMetadata!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metadata containing resulting browse groups"))))),(0,r.kt)("h2",{id:"caveatdetails"},"CaveatDetails"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"needsFurtherTesting",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Did the results suggest any further testing"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"caveatDescription",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Caveat Description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groupsNotRepresented",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Relevant groups that were not represented in the evaluation dataset"))))),(0,r.kt)("h2",{id:"caveatsandrecommendations"},"CaveatsAndRecommendations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"caveats",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#caveatdetails"},(0,r.kt)("code",null,"CaveatDetails"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Caveats on using this MLModel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"recommendations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recommendations on where this MLModel should be used"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"idealDatasetCharacteristics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ideal characteristics of an evaluation dataset for this MLModel"))))),(0,r.kt)("h2",{id:"cell"},"Cell"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"linkParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#linkparams"},(0,r.kt)("code",null,"LinkParams"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"changeauditstamps"},"ChangeAuditStamps"),(0,r.kt)("p",null,"Captures information about who created/last modified/deleted the entity and when"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion"))))),(0,r.kt)("h2",{id:"chart"},"Chart"),(0,r.kt)("p",null,"A Chart Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parent container in which the entity resides"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tool",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The chart tool name Note that this field will soon be deprecated in favor a unified notion of Data Platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"chartId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An id unique within the charting tool"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartproperties"},(0,r.kt)("code",null,"ChartProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#charteditableproperties"},(0,r.kt)("code",null,"ChartEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartquery"},(0,r.kt)("code",null,"ChartQuery"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Info about the query which is used to render the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"embed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#embed"},(0,r.kt)("code",null,"Embed"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Embed information about the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statsSummary",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartstatssummary"},(0,r.kt)("code",null,"ChartStatsSummary"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Not yet implemented."),(0,r.kt)("p",null,"Experimental - Summary operational & usage statistics about a Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the chart. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartinfo"},(0,r.kt)("code",null,"ChartInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only information about the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#charteditableproperties"},(0,r.kt)("code",null,"ChartEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use editableProperties field instead Additional read write information about the Chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags instead The structured tags associated with the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the chart is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#inputfields"},(0,r.kt)("code",null,"InputFields"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Input fields to power the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"chartcell"},"ChartCell"),(0,r.kt)("p",null,"A Notebook cell which contains chart as content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellTitle",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the cell"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id for the cell."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeAuditStamps",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#changeauditstamps"},(0,r.kt)("code",null,"ChangeAuditStamps"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who created/last modified/deleted this TextCell and when"))))),(0,r.kt)("h2",{id:"charteditableproperties"},"ChartEditableProperties"),(0,r.kt)("p",null,"Chart properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Chart"))))),(0,r.kt)("h2",{id:"chartinfo"},"ChartInfo"),(0,r.kt)("p",null,"Deprecated, use ChartProperties instead\nAdditional read only information about the chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship Consumes instead Data sources for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#charttype"},(0,r.kt)("code",null,"ChartType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesslevel"},(0,r.kt)("code",null,"AccessLevel"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastRefreshed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this chart last refreshed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification of this chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion of this chart"))))),(0,r.kt)("h2",{id:"chartproperties"},"ChartProperties"),(0,r.kt)("p",null,"Additional read only properties about the chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#charttype"},(0,r.kt)("code",null,"ChartType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesslevel"},(0,r.kt)("code",null,"AccessLevel"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastRefreshed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this chart last refreshed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification of this chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion of this chart"))))),(0,r.kt)("h2",{id:"chartquery"},"ChartQuery"),(0,r.kt)("p",null,"The query that was used to populate a Chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"rawQuery",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw query to build a chart from input datasets"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#chartquerytype"},(0,r.kt)("code",null,"ChartQueryType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the chart query"))))),(0,r.kt)("h2",{id:"chartstatssummary"},"ChartStatsSummary"),(0,r.kt)("p",null,"Experimental - subject to change. A summary of usage metrics about a Chart."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total view count for the chart"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The view count in the last 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique user count in the past 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"topUsersLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The top users in the past 30 days"))))),(0,r.kt)("h2",{id:"container"},"Container"),(0,r.kt)("p",null,"A container of other Metadata Entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fetch an Entity Container by primary key (urn)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#containerproperties"},(0,r.kt)("code",null,"ContainerProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read-only properties that originate in the source data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#containereditableproperties"},(0,r.kt)("code",null,"ContainerEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read-write properties that originate in DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,'Sub types of the container, e.g. "Database" etc'))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresults"},(0,r.kt)("code",null,"SearchResults"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Children entities inside of the Container"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#containerentitiesinput"},(0,r.kt)("code",null,"ContainerEntitiesInput"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"containereditableproperties"},"ContainerEditableProperties"),(0,r.kt)("p",null,"Read-write properties that originate in DataHub"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"DataHub description of the Container"))))),(0,r.kt)("h2",{id:"containerproperties"},"ContainerProperties"),(0,r.kt)("p",null,"Read-only properties that originate in the source data platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the Container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"System description of the Container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the Container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the Container"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"qualifiedName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fully-qualified name of the Container"))))),(0,r.kt)("h2",{id:"contentparams"},"ContentParams"),(0,r.kt)("p",null,"Params about the recommended content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of entities corresponding to the recommended content"))))),(0,r.kt)("h2",{id:"corpgroup"},"CorpGroup"),(0,r.kt)("p",null,"A DataHub Group entity, which represents a Person on the Metadata Entity Graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Group name eg wherehows dev, ask_metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the Corp Group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroupproperties"},(0,r.kt)("code",null,"CorpGroupProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroupeditableproperties"},(0,r.kt)("code",null,"CorpGroupEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#origin"},(0,r.kt)("code",null,"Origin"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Origin info about this group."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroupinfo"},(0,r.kt)("code",null,"CorpGroupInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only info about the group"))))),(0,r.kt)("h2",{id:"corpgroupeditableproperties"},"CorpGroupEditableProperties"),(0,r.kt)("p",null,"Additional read write properties about a group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"DataHub description of the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Slack handle for the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address for the group"))))),(0,r.kt)("h2",{id:"corpgroupinfo"},"CorpGroupInfo"),(0,r.kt)("p",null,"Deprecated, use CorpUserProperties instead\nAdditional read only info about a group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name to display when rendering the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description provided for the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"email of this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"admins",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, do not use owners of this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"members",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship IsMemberOfGroup instead List of ldap urn in this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, do not use List of groups urns in this group"))))),(0,r.kt)("h2",{id:"corpgroupproperties"},"CorpGroupProperties"),(0,r.kt)("p",null,"Additional read only properties about a group"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"display name of this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description provided for the group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"email of this group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Slack handle for the group"))))),(0,r.kt)("h2",{id:"corpuser"},"CorpUser"),(0,r.kt)("p",null,"A DataHub User entity, which represents a Person on the Metadata Entity Graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"username",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A username associated with the user This uniquely identifies the user within DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuserproperties"},(0,r.kt)("code",null,"CorpUserProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the corp user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpusereditableproperties"},(0,r.kt)("code",null,"CorpUserEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read write properties about the corp user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#corpuserstatus"},(0,r.kt)("code",null,"CorpUserStatus"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The status of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"isNativeUser",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this user is a native DataHub user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuserinfo"},(0,r.kt)("code",null,"CorpUserInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only info about the corp user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpusereditableinfo"},(0,r.kt)("code",null,"CorpUserEditableInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use editableProperties field instead Read write info about the corp user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use the tags field instead The structured tags associated with the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"settings",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpusersettings"},(0,r.kt)("code",null,"CorpUserSettings"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Settings that a user can customize through the datahub ui"))))),(0,r.kt)("h2",{id:"corpuserappearancesettings"},"CorpUserAppearanceSettings"),(0,r.kt)("p",null,"Settings that control look and feel of the DataHub UI for the user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"showSimplifiedHomepage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flag whether the user should see a homepage with only datasets, charts & dashboards. Intended for users who have less operational use cases for the datahub tool."))))),(0,r.kt)("h2",{id:"corpusereditableinfo"},"CorpUserEditableInfo"),(0,r.kt)("p",null,"Deprecated, use CorpUserEditableProperties instead\nAdditional read write info about a user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aboutMe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"About me section of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"teams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Teams that the user belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skills",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Skills that the user possesses"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"pictureLink",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A URL which points to a picture which user wants to set as a profile photo"))))),(0,r.kt)("h2",{id:"corpusereditableproperties"},"CorpUserEditableProperties"),(0,r.kt)("p",null,"Additional read write properties about a user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title to show on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aboutMe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"About me section of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"teams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Teams that the user belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"skills",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Skills that the user possesses"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"pictureLink",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A URL which points to a picture which user wants to set as a profile photo"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"slack",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The slack handle of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"phone",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Phone number for the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address for the user"))))),(0,r.kt)("h2",{id:"corpuserinfo"},"CorpUserInfo"),(0,r.kt)("p",null,"Deprecated, use CorpUserProperties instead\nAdditional read only info about a user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"active",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is active"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manager",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Direct manager of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"departmentId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"department id the user belong to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"departmentName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"department name this user belong to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"firstName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"first name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"last name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fullName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Common name of this user, format is firstName plus lastName"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"countryCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"two uppercase letters country code"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the ldap"))))),(0,r.kt)("h2",{id:"corpuserproperties"},"CorpUserProperties"),(0,r.kt)("p",null,"Additional read only properties about a user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"active",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is active"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"email",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Email address of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manager",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Direct manager of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"departmentId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"department id the user belong to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"departmentName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"department name this user belong to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"firstName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"first name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"last name of the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fullName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Common name of this user, format is firstName plus lastName"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"countryCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"two uppercase letters country code"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the ldap"))))),(0,r.kt)("h2",{id:"corpusersettings"},"CorpUserSettings"),(0,r.kt)("p",null,"Settings that a user can customize through the datahub ui"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"appearance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuserappearancesettings"},(0,r.kt)("code",null,"CorpUserAppearanceSettings"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Settings that control look and feel of the DataHub UI for the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"views",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuserviewssettings"},(0,r.kt)("code",null,"CorpUserViewsSettings"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Settings related to the DataHub Views feature"))))),(0,r.kt)("h2",{id:"corpuserviewssettings"},"CorpUserViewsSettings"),(0,r.kt)("p",null,"Settings related to the Views feature of DataHub."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultView",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubview"},(0,r.kt)("code",null,"DataHubView"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The default view for the User."))))),(0,r.kt)("h2",{id:"cost"},"Cost"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"costType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#costtype"},(0,r.kt)("code",null,"CostType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Type of Cost Code"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"costValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#costvalue"},(0,r.kt)("code",null,"CostValue!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Code to which the Cost of this entity should be attributed to ie organizational cost ID"))))),(0,r.kt)("h2",{id:"costvalue"},"CostValue"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"costId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Organizational Cost ID"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"costCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Organizational Cost Code"))))),(0,r.kt)("h2",{id:"custompropertiesentry"},"CustomPropertiesEntry"),(0,r.kt)("p",null,"An entry in a custom properties map represented as a tuple"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The key of the map entry"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value fo the map entry"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the entity this property came from for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"dashboard"},"Dashboard"),(0,r.kt)("p",null,"A Dashboard Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parent container in which the entity resides"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tool",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The dashboard tool name Note that this will soon be deprecated in favor of a standardized notion of Data Platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dashboardId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An id unique within the dashboard tool"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardproperties"},(0,r.kt)("code",null,"DashboardProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardeditableproperties"},(0,r.kt)("code",null,"DashboardEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"embed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#embed"},(0,r.kt)("code",null,"Embed"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Embed information about the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the dashboard. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"usageStats",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusagequeryresult"},(0,r.kt)("code",null,"DashboardUsageQueryResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental (Subject to breaking change) -- Statistics about how this Dashboard is used"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statsSummary",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardstatssummary"},(0,r.kt)("code",null,"DashboardStatsSummary"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental - Summary operational & usage statistics about a Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardinfo"},(0,r.kt)("code",null,"DashboardInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only information about the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardeditableproperties"},(0,r.kt)("code",null,"DashboardEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use editableProperties instead Additional read write properties about the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead The structured tags associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the dashboard is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#inputfields"},(0,r.kt)("code",null,"InputFields"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Input fields that power all the charts in the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Sub Types of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"dashboardeditableproperties"},"DashboardEditableProperties"),(0,r.kt)("p",null,"Dashboard properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Dashboard"))))),(0,r.kt)("h2",{id:"dashboardinfo"},"DashboardInfo"),(0,r.kt)("p",null,"Deprecated, use DashboardProperties instead\nAdditional read only info about a Dashboard"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"charts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chart"},(0,r.kt)("code",null,"[Chart!]!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship Contains instead Charts that comprise the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesslevel"},(0,r.kt)("code",null,"AccessLevel"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the dashboard Note that this will soon be deprecated for low usage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastRefreshed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this dashboard last refreshed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification of this dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion of this dashboard"))))),(0,r.kt)("h2",{id:"dashboardproperties"},"DashboardProperties"),(0,r.kt)("p",null,"Additional read only properties about a Dashboard"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#accesslevel"},(0,r.kt)("code",null,"AccessLevel"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Access level for the dashboard Note that this will soon be deprecated for low usage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastRefreshed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this dashboard last refreshed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the modification of this dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deleted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional AuditStamp corresponding to the deletion of this dashboard"))))),(0,r.kt)("h2",{id:"dashboardstatssummary"},"DashboardStatsSummary"),(0,r.kt)("p",null,"Experimental - subject to change. A summary of usage metrics about a Dashboard."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total view count for the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The view count in the last 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique user count in the past 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"topUsersLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The top users in the past 30 days"))))),(0,r.kt)("h2",{id:"dashboardusageaggregation"},"DashboardUsageAggregation"),(0,r.kt)("p",null,"An aggregation of Dashboard usage statistics"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"bucket",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time window start time"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"duration",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#windowduration"},(0,r.kt)("code",null,"WindowDuration"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time window span"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resource urn associated with the usage information, eg a Dashboard urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusageaggregationmetrics"},(0,r.kt)("code",null,"DashboardUsageAggregationMetrics"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The rolled up usage metrics"))))),(0,r.kt)("h2",{id:"dashboardusageaggregationmetrics"},"DashboardUsageAggregationMetrics"),(0,r.kt)("p",null,"Rolled up metrics about Dashboard usage over time"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique number of dashboard users within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard views within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard executions within the time range"))))),(0,r.kt)("h2",{id:"dashboardusagemetrics"},"DashboardUsageMetrics"),(0,r.kt)("p",null,"A set of absolute dashboard usage metrics"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the metrics were reported"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"favoritesCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of times dashboard has been favorited FIXME: Qualifies as Popularity Metric rather than Usage Metric?"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard views"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard execution"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastViewed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time when this dashboard was last viewed"))))),(0,r.kt)("h2",{id:"dashboardusagequeryresult"},"DashboardUsageQueryResult"),(0,r.kt)("p",null,"The result of a dashboard usage query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"buckets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusageaggregation"},(0,r.kt)("code",null,"[DashboardUsageAggregation]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of relevant time windows for use in displaying usage statistics"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aggregations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusagequeryresultaggregations"},(0,r.kt)("code",null,"DashboardUsageQueryResultAggregations"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of rolled up aggregations about the dashboard usage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboardusagemetrics"},(0,r.kt)("code",null,"[DashboardUsageMetrics!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of absolute dashboard usage metrics"))))),(0,r.kt)("h2",{id:"dashboardusagequeryresultaggregations"},"DashboardUsageQueryResultAggregations"),(0,r.kt)("p",null,"A set of rolled up aggregations about the Dashboard usage"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The count of unique Dashboard users within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dashboarduserusagecounts"},(0,r.kt)("code",null,"[DashboardUserUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific per user usage counts within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard views within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of dashboard executions within the queried time range"))))),(0,r.kt)("h2",{id:"dashboarduserusagecounts"},"DashboardUserUsageCounts"),(0,r.kt)("p",null,"Information about individual user usage of a Dashboard"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"user",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user of the Dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"number of times dashboard has been viewed by the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionsCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"number of dashboard executions by the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"usageCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Normalized numeric metric representing user's dashboard usage Higher value represents more usage"))))),(0,r.kt)("h2",{id:"dataflow"},"DataFlow"),(0,r.kt)("p",null,"A Data Flow Metadata Entity, representing an set of pipelined Data Job or Tasks required\nto produce an output Dataset Also known as a Data Pipeline"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of a Data Flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"orchestrator",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Workflow orchestrator ei Azkaban, Airflow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"flowId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cluster",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Cluster of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataflowproperties"},(0,r.kt)("code",null,"DataFlowProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about a Data flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datafloweditableproperties"},(0,r.kt)("code",null,"DataFlowEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about a Data Flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the dataflow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the dataflow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the Data Flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the DataFlow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the data flow. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataflowinfo"},(0,r.kt)("code",null,"DataFlowInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only information about a Data flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead The structured tags associated with the dataflow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataJobs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataflowdatajobsrelationships"},(0,r.kt)("code",null,"DataFlowDataJobsRelationships"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship IsPartOf instead Data Jobs"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the datflow is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"dataflowdatajobsrelationships"},"DataFlowDataJobsRelationships"),(0,r.kt)("p",null,"Deprecated, use relationships query instead"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshiplegacy"},(0,r.kt)("code",null,"[EntityRelationshipLegacy]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"datafloweditableproperties"},"DataFlowEditableProperties"),(0,r.kt)("p",null,"Data Flow properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Data Flow"))))),(0,r.kt)("h2",{id:"dataflowinfo"},"DataFlowInfo"),(0,r.kt)("p",null,"Deprecated, use DataFlowProperties instead\nAdditional read only properties about a Data Flow aka Pipeline"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"project",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional project or namespace associated with the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the DataFlow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))))),(0,r.kt)("h2",{id:"dataflowproperties"},"DataFlowProperties"),(0,r.kt)("p",null,"Additional read only properties about a Data Flow aka Pipeline"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"project",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional project or namespace associated with the flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the DataFlow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))))),(0,r.kt)("h2",{id:"datahubpolicy"},"DataHubPolicy"),(0,r.kt)("p",null,"An DataHub Platform Access Policy - Policies determine who can perform what actions against which resources on the platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the Role"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"policyType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policytype"},(0,r.kt)("code",null,"PolicyType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"state",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policystate"},(0,r.kt)("code",null,"PolicyState!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The present state of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#resourcefilter"},(0,r.kt)("code",null,"ResourceFilter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resources that the Policy privileges apply to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The privileges that the Policy grants"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#actorfilter"},(0,r.kt)("code",null,"ActorFilter!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actors that the Policy grants privileges to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editable",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Policy is editable, ie system policies, or not"))))),(0,r.kt)("h2",{id:"datahubrole"},"DataHubRole"),(0,r.kt)("p",null,"A DataHub Role is a high-level abstraction on top of Policies that dictates what actions users can take."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the role"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the Role"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Role."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Role"))))),(0,r.kt)("h2",{id:"datahubview"},"DataHubView"),(0,r.kt)("p",null,"An DataHub View - Filters that are applied across the application automatically."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#datahubviewtype"},(0,r.kt)("code",null,"DataHubViewType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubviewdefinition"},(0,r.kt)("code",null,"DataHubViewDefinition!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The definition of the View"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the View"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"datahubviewdefinition"},"DataHubViewDefinition"),(0,r.kt)("p",null,"An DataHub View Definition"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters to apply. If left empty, then ALL entity types are in scope."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubviewfilter"},(0,r.kt)("code",null,"DataHubViewFilter!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters to apply. If left empty, then no filters will be applied."))))),(0,r.kt)("h2",{id:"datahubviewfilter"},"DataHubViewFilter"),(0,r.kt)("p",null,"A DataHub View Filter. Note that"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"operator",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#logicaloperator"},(0,r.kt)("code",null,"LogicalOperator!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The operator used to combine the filters."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetfilter"},(0,r.kt)("code",null,"[FacetFilter!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of filters combined using the operator. If left empty, then no filters will be applied."))))),(0,r.kt)("h2",{id:"datajob"},"DataJob"),(0,r.kt)("p",null,"A Data Job Metadata Entity, representing an individual unit of computation or Task\nto produce an output Dataset Always part of a parent Data Flow aka Pipeline"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Data Job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataFlow",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataflow"},(0,r.kt)("code",null,"DataFlow"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use relationship IsPartOf instead The associated data flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"jobId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajobproperties"},(0,r.kt)("code",null,"DataJobProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties associated with the Data Job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajobeditableproperties"},(0,r.kt)("code",null,"DataJobEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties associated with the Data Job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the Data Flow"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dashboard"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Data Job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the data job. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajobinfo"},(0,r.kt)("code",null,"DataJobInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional read only information about a Data processing job"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputOutput",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajobinputoutput"},(0,r.kt)("code",null,"DataJobInputOutput"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Information about the inputs and outputs of a Data processing job including column-level lineage."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use the tags field instead The structured tags associated with the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessinstanceresult"},(0,r.kt)("code",null,"DataProcessInstanceResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"History of runs of this task"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"datajobeditableproperties"},"DataJobEditableProperties"),(0,r.kt)("p",null,"Data Job properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Data Job"))))),(0,r.kt)("h2",{id:"datajobinfo"},"DataJobInfo"),(0,r.kt)("p",null,"Deprecated, use DataJobProperties instead\nAdditional read only information about a Data Job aka Task"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Job display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Job description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))))),(0,r.kt)("h2",{id:"datajobinputoutput"},"DataJobInputOutput"),(0,r.kt)("p",null,"The lineage information for a DataJob\nTODO Rename this to align with other Lineage models"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputDatasets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship Consumes instead Input datasets produced by the data job during processing"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"outputDatasets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship Produces instead Output datasets produced by the data job during processing"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"inputDatajobs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datajob"},(0,r.kt)("code",null,"[DataJob!]"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use relationship DownstreamOf instead Input datajobs that this data job depends on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fineGrainedLineages",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#finegrainedlineage"},(0,r.kt)("code",null,"[FineGrainedLineage!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Lineage information for the column-level. Includes a list of objects detailing which columns are upstream and which are downstream of each other. The upstream and downstream columns are from datasets."))))),(0,r.kt)("h2",{id:"datajobproperties"},"DataJobProperties"),(0,r.kt)("p",null,"Additional read only properties about a Data Job aka Task"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Job display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Job description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the DataJob"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))))),(0,r.kt)("h2",{id:"dataplatform"},"DataPlatform"),(0,r.kt)("p",null,"A Data Platform represents a specific third party Data System or Tool Examples include\nwarehouses like Snowflake, orchestrators like Airflow, and dashboarding tools like Looker"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatformproperties"},(0,r.kt)("code",null,"DataPlatformProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties associated with a data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties displayName instead Display name of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminfo"},(0,r.kt)("code",null,"DataPlatformInfo"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead Additional properties associated with a data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"dataplatforminfo"},"DataPlatformInfo"),(0,r.kt)("p",null,"Deprecated, use DataPlatformProperties instead\nAdditional read only information about a Data Platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#platformtype"},(0,r.kt)("code",null,"PlatformType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The platform category"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name associated with the platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetNameDelimiter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The delimiter in the dataset names on the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"logoUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A logo URL associated with the platform"))))),(0,r.kt)("h2",{id:"dataplatforminstance"},"DataPlatformInstance"),(0,r.kt)("p",null,"A Data Platform instance represents an instance of a 3rd party platform like Looker, Snowflake, etc."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"instanceId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The platform instance id"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstanceproperties"},(0,r.kt)("code",null,"DataPlatformInstanceProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties associated with a data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the container"))))),(0,r.kt)("h2",{id:"dataplatforminstanceproperties"},"DataPlatformInstanceProperties"),(0,r.kt)("p",null,"Additional read only properties about a DataPlatformInstance"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the data platform instance used in display"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read only technical description for the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the data platform instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the data platform instance"))))),(0,r.kt)("h2",{id:"dataplatformproperties"},"DataPlatformProperties"),(0,r.kt)("p",null,"Additional read only properties about a Data Platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#platformtype"},(0,r.kt)("code",null,"PlatformType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The platform category"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name associated with the platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetNameDelimiter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The delimiter in the dataset names on the data platform"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"logoUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A logo URL associated with the platform"))))),(0,r.kt)("h2",{id:"dataprocessinstance"},"DataProcessInstance"),(0,r.kt)("p",null,"A DataProcessInstance Metadata Entity, representing an individual run of\na task or datajob."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the DataProcessInstance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"state",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessrunevent"},(0,r.kt)("code",null,"[DataProcessRunEvent]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The history of state changes for the run"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"When the run was kicked off"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the data process"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity. In the UI, used for inputs, outputs and parentTemplate"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The link to view the task run in the source system"))))),(0,r.kt)("h2",{id:"dataprocessinstanceresult"},"DataProcessInstanceResult"),(0,r.kt)("p",null,"Data Process instances that match the provided query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities to include in result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of run events returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessinstance"},(0,r.kt)("code",null,"[DataProcessInstance]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The data process instances that produced or consumed the entity"))))),(0,r.kt)("h2",{id:"dataprocessinstancerunresult"},"DataProcessInstanceRunResult"),(0,r.kt)("p",null,"the result of a run, part of the run state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resultType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#dataprocessinstancerunresulttype"},(0,r.kt)("code",null,"DataProcessInstanceRunResultType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The outcome of the run"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeResultType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The outcome of the run in the data platforms native language"))))),(0,r.kt)("h2",{id:"dataprocessrunevent"},"DataProcessRunEvent"),(0,r.kt)("p",null,"A state change event in the data process instance lifecycle"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#dataprocessrunstatus"},(0,r.kt)("code",null,"DataProcessRunStatus"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The status of the data process instance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"attempt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The try number that this instance run is in"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"result",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessinstancerunresult"},(0,r.kt)("code",null,"DataProcessInstanceRunResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The result of a run"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp associated with the run event in milliseconds"))))),(0,r.kt)("h2",{id:"dataproduct"},"DataProduct"),(0,r.kt)("p",null,"A Data Product, or a logical grouping of Metadata Entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataproductproperties"},(0,r.kt)("code",null,"DataProductProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about a Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresults"},(0,r.kt)("code",null,"SearchResults"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Children entities inside of the DataProduct"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#searchacrossentitiesinput"},(0,r.kt)("code",null,"SearchAcrossEntitiesInput"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching Data Product"))))),(0,r.kt)("h2",{id:"dataproductproperties"},"DataProductProperties"),(0,r.kt)("p",null,"Properties about a domain"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Data Product"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL for the DataProduct (most likely GitHub repo where Data Products are managed as code)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"numAssets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of children entities inside of the Data Product. This number includes soft deleted entities."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the Data Product"))))),(0,r.kt)("h2",{id:"dataset"},"Dataset"),(0,r.kt)("p",null,"A Dataset entity, which encompasses Relational Tables, Document store collections, streaming topics, and other sets of data having an independent lifecycle"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the dataset is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parent container in which the entity resides"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique guid for dataset No longer to be used as the Dataset display name. Use properties.name instead"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetproperties"},(0,r.kt)("code",null,"DatasetProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of read only properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataseteditableproperties"},(0,r.kt)("code",null,"DatasetEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemametadata"},(0,r.kt)("code",null,"SchemaMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema metadata of the dataset, available by version number"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#editableschemametadata"},(0,r.kt)("code",null,"EditableSchemaMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Editable schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"embed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#embed"},(0,r.kt)("code",null,"Embed"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Embed information about the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"access",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#access"},(0,r.kt)("code",null,"Access"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Roles and the properties to access the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"usageStats",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#usagequeryresult"},(0,r.kt)("code",null,"UsageQueryResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Statistics about how this Dataset is used The first parameter, ",(0,r.kt)("code",null,"resource"),", is deprecated and no longer needs to be provided"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"range",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#timerange"},(0,r.kt)("code",null,"TimeRange"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"statsSummary",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetstatssummary"},(0,r.kt)("code",null,"DatasetStatsSummary"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental - Summary operational & usage statistics about a Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetProfiles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetprofile"},(0,r.kt)("code",null,"[DatasetProfile!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Profile Stats resource that retrieves the events in a previous unit of time in descending order If no start or end time are provided, the most recent events will be returned"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#filterinput"},(0,r.kt)("code",null,"FilterInput"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"operations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#operation"},(0,r.kt)("code",null,"[Operation!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Operational events for an entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"endTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#filterinput"},(0,r.kt)("code",null,"FilterInput"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"limit",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"assertions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityassertionsresult"},(0,r.kt)("code",null,"EntityAssertionsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Assertions associated with the Dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the dataset. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"health",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#health"},(0,r.kt)("code",null,"[Health!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental! The resolved health statuses of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schema"},(0,r.kt)("code",null,"Schema"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: Use `schemaMetadata`"),(0,r.kt)("p",null,"Schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead External URL associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, see the properties field instead Environment in which the dataset belongs to or where it was generated Note that this field will soon be deprecated in favor of a more standardized concept of Environment"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use the properties field instead Read only technical description for dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformNativeType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#platformnativetype"},(0,r.kt)("code",null,"PlatformNativeType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, do not use this field The logical type of the dataset ie table, stream, etc"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uri",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties instead Native Dataset Uri Uri should not include any environment specific properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead The structured tags associated with the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Sub Types that this entity implements"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#viewproperties"},(0,r.kt)("code",null,"ViewProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"View related properties. Only relevant if subtypes field contains view."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aspects",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#rawaspect"},(0,r.kt)("code",null,"[RawAspect!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental API. For fetching extra entities that do not have custom UI code yet"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#aspectparams"},(0,r.kt)("code",null,"AspectParams"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"runs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataprocessinstanceresult"},(0,r.kt)("code",null,"DataProcessInstanceResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"History of datajob runs that either produced or consumed this dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#relationshipdirection"},(0,r.kt)("code",null,"RelationshipDirection!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"siblings",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#siblingproperties"},(0,r.kt)("code",null,"SiblingProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metadata about the datasets siblings"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fineGrainedLineages",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#finegrainedlineage"},(0,r.kt)("code",null,"[FineGrainedLineage!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Lineage information for the column-level. Includes a list of objects detailing which columns are upstream and which are downstream of each other. The upstream and downstream columns are from datasets."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"testResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testresults"},(0,r.kt)("code",null,"TestResults"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The results of evaluating tests"))))),(0,r.kt)("h2",{id:"datasetassertioninfo"},"DatasetAssertionInfo"),(0,r.kt)("p",null,"Detailed information about a Dataset Assertion"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the dataset that the assertion is related to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"scope",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#datasetassertionscope"},(0,r.kt)("code",null,"DatasetAssertionScope!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The scope of the Dataset assertion."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldref"},(0,r.kt)("code",null,"[SchemaFieldRef!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The fields serving as input to the assertion. Empty if there are none."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aggregation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionstdaggregation"},(0,r.kt)("code",null,"AssertionStdAggregation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized assertion operator"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"operator",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#assertionstdoperator"},(0,r.kt)("code",null,"AssertionStdOperator!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized assertion operator"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parameters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertionstdparameters"},(0,r.kt)("code",null,"AssertionStdParameters"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standard parameters required for the assertion. e.g. min_value, max_value, value, columns"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native operator for the assertion. For Great Expectations, this will contain the original expectation name."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeParameters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native parameters required for the assertion."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"logic",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Logic comprising a raw, unstructured assertion."))))),(0,r.kt)("h2",{id:"datasetdeprecation"},"DatasetDeprecation"),(0,r.kt)("p",null,"Deprecated, use Deprecation instead\nInformation about Dataset deprecation status\nNote that this model will soon be migrated to a more general purpose Entity status"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the dataset has been deprecated by owner"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time user plan to decommission this dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional information about the dataset deprecation plan"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user who will be credited for modifying this deprecation content"))))),(0,r.kt)("h2",{id:"dataseteditableproperties"},"DatasetEditableProperties"),(0,r.kt)("p",null,"Dataset properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Dataset"))))),(0,r.kt)("h2",{id:"datasetfieldprofile"},"DatasetFieldProfile"),(0,r.kt)("p",null,"An individual Dataset Field Profile"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standardized path of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique value count for the field across the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueProportion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The proportion of rows with unique values across the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nullCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of NULL row values across the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nullProportion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The proportion of rows with NULL values across the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"min",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The min value for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"max",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The max value for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mean",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The mean value for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"median",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The median value for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"stdev",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard deviation for the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sampleValues",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of sample values for the field"))))),(0,r.kt)("h2",{id:"datasetprofile"},"DatasetProfile"),(0,r.kt)("p",null,"A Dataset Profile associated with a Dataset, containing profiling statistics about the Dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the profile was reported"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rowCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional row count of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"columnCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional column count of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sizeInBytes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The storage size in bytes"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldProfiles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetfieldprofile"},(0,r.kt)("code",null,"[DatasetFieldProfile!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional set of per field statistics obtained in the profile"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partitionSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#partitionspec"},(0,r.kt)("code",null,"PartitionSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Information about the partition that was profiled"))))),(0,r.kt)("h2",{id:"datasetproperties"},"DatasetProperties"),(0,r.kt)("p",null,"Additional read only properties about a Dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the dataset used in display"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"qualifiedName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fully-qualified name of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Environment in which the dataset belongs to or where it was generated Note that this field will soon be deprecated in favor of a more standardized concept of Environment"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Read only technical description for dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom properties of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"External URL associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Created timestamp millis associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdActor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Actor associated with the Dataset's created timestamp"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Last Modified timestamp millis associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModifiedActor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Actor associated with the Dataset's lastModified timestamp"))))),(0,r.kt)("h2",{id:"datasetstatssummary"},"DatasetStatsSummary"),(0,r.kt)("p",null,"Experimental - subject to change. A summary of usage metrics about a Dataset."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"queryCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query count in the past 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCountLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique user count in the past 30 days"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"topUsersLast30Days",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The top users in the past 30 days"))))),(0,r.kt)("h2",{id:"daterange"},"DateRange"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"end",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"deprecation"},"Deprecation"),(0,r.kt)("p",null,"Information about Metadata Entity deprecation status"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the entity has been deprecated by owner"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decommissionTime",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time user plan to decommission this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"note",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional information about the entity deprecation plan"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user who will be credited for modifying this deprecation content"))))),(0,r.kt)("h2",{id:"domain"},"Domain"),(0,r.kt)("p",null,"A domain, or a logical grouping of Metadata Entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainproperties"},(0,r.kt)("code",null,"DomainProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about a domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresults"},(0,r.kt)("code",null,"SearchResults"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Children entities inside of the Domain"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#domainentitiesinput"},(0,r.kt)("code",null,"DomainEntitiesInput"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"domainassociation"},"DomainAssociation"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domain"},(0,r.kt)("code",null,"Domain!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The domain related to the assocaited urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"domainproperties"},"DomainProperties"),(0,r.kt)("p",null,"Properties about a domain"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Domain"))))),(0,r.kt)("h2",{id:"downstreamentityrelationships"},"DownstreamEntityRelationships"),(0,r.kt)("p",null,"Deprecated, use relationships query instead"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshiplegacy"},(0,r.kt)("code",null,"[EntityRelationshipLegacy]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"editableschemafieldinfo"},"EditableSchemaFieldInfo"),(0,r.kt)("p",null,"Editable schema field metadata ie descriptions, tags, etc"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of a field identifying the field the editable info is applied to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edited description of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead Tags associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Glossary terms associated with the field"))))),(0,r.kt)("h2",{id:"editableschemametadata"},"EditableSchemaMetadata"),(0,r.kt)("p",null,"Information about schema metadata that is editable via the UI"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaFieldInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#editableschemafieldinfo"},(0,r.kt)("code",null,"[EditableSchemaFieldInfo!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Editable schema field metadata"))))),(0,r.kt)("h2",{id:"editabletagproperties"},"EditableTagProperties"),(0,r.kt)("p",null,"Additional read write Tag properties\nDeprecated! Replaced by TagProperties."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A display name for the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A description of the Tag"))))),(0,r.kt)("h2",{id:"embed"},"Embed"),(0,r.kt)("p",null,"Information required to render an embedded version of an asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"renderUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A URL which can be rendered inside of an iframe."))))),(0,r.kt)("h2",{id:"entityassertionsresult"},"EntityAssertionsResult"),(0,r.kt)("p",null,"A list of Assertions Associated with an Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of assertions in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of assertions in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"assertions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#assertion"},(0,r.kt)("code",null,"[Assertion!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The assertions themselves"))))),(0,r.kt)("h2",{id:"entitycountresult"},"EntityCountResult"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"entitycountresults"},"EntityCountResults"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"counts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitycountresult"},(0,r.kt)("code",null,"[EntityCountResult!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"entitylineageresult"},"EntityLineageResult"),(0,r.kt)("p",null,"A list of lineage information associated with a source Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Start offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of results in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filtered",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results that were filtered out of the page (soft-deleted or non-existent)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#lineagerelationship"},(0,r.kt)("code",null,"[LineageRelationship!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Relationships in the result set"))))),(0,r.kt)("h2",{id:"entitypath"},"EntityPath"),(0,r.kt)("p",null,"An overview of the field that was matched in the entity search document"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Path of entities between source and destination nodes"))))),(0,r.kt)("h2",{id:"entityprivileges"},"EntityPrivileges"),(0,r.kt)("p",null,"Shared privileges object across entities. Not all privileges apply to every entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"canManageChildren",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user can create child entities under a parent entity. For example, can one create Terms/Node sunder a Glossary Node."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"canManageEntity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user can delete or move this entity."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"canEditLineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user can create or delete lineage edges for an entity."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"canEditEmbed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user update the embed information"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"canEditQueries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not a user can update the Queries for the entity (e.g. dataset)"))))),(0,r.kt)("h2",{id:"entityprofileconfig"},"EntityProfileConfig"),(0,r.kt)("p",null,"Configuration for an entity profile"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultTab",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The enum value from EntityProfileTab for which tab should be showed by default on entity profile pages. If null, rely on default sorting from React code."))))),(0,r.kt)("h2",{id:"entityprofileparams"},"EntityProfileParams"),(0,r.kt)("p",null,"Context to define the entity profile page"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the entity being shown"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Type of the enity being displayed"))))),(0,r.kt)("h2",{id:"entityprofilesconfig"},"EntityProfilesConfig"),(0,r.kt)("p",null,"Configuration for different entity profiles"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprofileconfig"},(0,r.kt)("code",null,"EntityProfileConfig"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The configurations for a Domain entity profile"))))),(0,r.kt)("h2",{id:"entityrelationship"},"EntityRelationship"),(0,r.kt)("p",null,"A relationship between two entities TODO Migrate all entity relationships to this more generic model"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the relationship"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"direction",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#relationshipdirection"},(0,r.kt)("code",null,"RelationshipDirection!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The direction of the relationship relative to the source entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity that is related via lineage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the last modification of this relationship"))))),(0,r.kt)("h2",{id:"entityrelationshiplegacy"},"EntityRelationshipLegacy"),(0,r.kt)("p",null,"Deprecated, use relationships query instead"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entitywithrelationships"},(0,r.kt)("code",null,"EntityWithRelationships"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity that is related via lineage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the last modification of this relationship"))))),(0,r.kt)("h2",{id:"entityrelationshipsresult"},"EntityRelationshipsResult"),(0,r.kt)("p",null,"A list of relationship information associated with a source Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Start offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of results in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationship"},(0,r.kt)("code",null,"[EntityRelationship!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Relationships in the result set"))))),(0,r.kt)("h2",{id:"ethicalconsiderations"},"EthicalConsiderations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"data",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Does the model use any sensitive data eg, protected classes"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"humanLife",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Is the model intended to inform decisions about matters central to human life or flourishing eg, health or safety"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mitigations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"What risk mitigation strategies were used during model development"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"risksAndHarms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"What risks may be present in model usage Try to identify the potential recipients, likelihood, and magnitude of harms If these cannot be determined, note that they were considered but remain unknown"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"useCases",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Are there any known model use cases that are especially fraught This may connect directly to the intended use section"))))),(0,r.kt)("h2",{id:"executionrequest"},"ExecutionRequest"),(0,r.kt)("p",null,"Retrieve an ingestion execution request"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Urn of the execution request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id for the execution request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#executionrequestinput"},(0,r.kt)("code",null,"ExecutionRequestInput!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Input provided when creating the Execution Request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"result",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#executionrequestresult"},(0,r.kt)("code",null,"ExecutionRequestResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Result of the execution request"))))),(0,r.kt)("h2",{id:"executionrequestinput"},"ExecutionRequestInput"),(0,r.kt)("p",null,"Input provided when creating an Execution Request"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"task",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the task to executed"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"source",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#executionrequestsource"},(0,r.kt)("code",null,"ExecutionRequestSource!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The source of the execution request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"arguments",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Arguments provided when creating the execution request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"requestedAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the request was created"))))),(0,r.kt)("h2",{id:"executionrequestresult"},"ExecutionRequestResult"),(0,r.kt)("p",null,"The result of an ExecutionRequest"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The result of the request, e.g. either SUCCEEDED or FAILED"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Time at which the task began"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"durationMs",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Duration of the task"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"report",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A report about the ingestion run"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"structuredReport",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#structuredreport"},(0,r.kt)("code",null,"StructuredReport"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A structured report for this Execution Request"))))),(0,r.kt)("h2",{id:"executionrequestsource"},"ExecutionRequestSource"),(0,r.kt)("p",null,"Information about the source of an execution request"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the source, e.g. SCHEDULED_INGESTION_SOURCE"))))),(0,r.kt)("h2",{id:"facetfilter"},"FacetFilter"),(0,r.kt)("p",null,"A single filter value"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of field to filter by"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"condition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#filteroperator"},(0,r.kt)("code",null,"FilterOperator"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Condition for the values."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Values, one of which the intended field should match."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"negated",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"If the filter should or should not be matched"))))),(0,r.kt)("h2",{id:"facetmetadata"},"FacetMetadata"),(0,r.kt)("p",null,"Contains valid fields to filter search results further on"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of a field present in the search entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display name of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aggregations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#aggregationmetadata"},(0,r.kt)("code",null,"[AggregationMetadata!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Aggregated search result counts by value of the field"))))),(0,r.kt)("h2",{id:"featureflagsconfig"},"FeatureFlagsConfig"),(0,r.kt)("p",null,"Configurations related to DataHub Views feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"readOnlyModeEnabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether read only mode is enabled on an instance. Right now this only affects ability to edit user profile image URL but can be extended."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"showSearchFiltersV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether search filters V2 should be shown or the default filter side-panel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"showBrowseV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether browse V2 sidebar should be shown"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"showAcrylInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether we should show CTAs in the UI related to moving to Managed DataHub by Acryl."))))),(0,r.kt)("h2",{id:"fieldusagecounts"},"FieldUsageCounts"),(0,r.kt)("p",null,"The usage for a particular Dataset field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The path of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The count of usages"))))),(0,r.kt)("h2",{id:"finegrainedlineage"},"FineGrainedLineage"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"upstreams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldref"},(0,r.kt)("code",null,"[SchemaFieldRef!]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"downstreams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldref"},(0,r.kt)("code",null,"[SchemaFieldRef!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"floatbox"},"FloatBox"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"floatValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"foreignkeyconstraint"},"ForeignKeyConstraint"),(0,r.kt)("p",null,"Metadata around a foreign key constraint between two datasets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The human-readable name of the constraint"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"foreignFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldentity"},(0,r.kt)("code",null,"[SchemaFieldEntity]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of fields in the foreign dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldentity"},(0,r.kt)("code",null,"[SchemaFieldEntity]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of fields in this dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"foreignDataset",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"Dataset"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The foreign dataset for easy reference"))))),(0,r.kt)("h2",{id:"freshnessstats"},"FreshnessStats"),(0,r.kt)("p",null,"Freshness stats for a query result.\nCaptures whether the query was served out of a cache, what the staleness was, etc."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cached",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether a cache was used to respond to this query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"systemFreshness",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#systemfreshness"},(0,r.kt)("code",null,"[SystemFreshness]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The latest timestamp in millis of the system that was used to respond to this query In case a cache was consulted, this reflects the freshness of the cache In case an index was consulted, this reflects the freshness of the index"))))),(0,r.kt)("h2",{id:"getquickfiltersresult"},"GetQuickFiltersResult"),(0,r.kt)("p",null,"The result object when fetching quick filters"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"quickFilters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#quickfilter"},(0,r.kt)("code",null,"[QuickFilter]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The list of quick filters to render in the UI"))))),(0,r.kt)("h2",{id:"getrootglossarynodesresult"},"GetRootGlossaryNodesResult"),(0,r.kt)("p",null,"The result when getting Glossary entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"nodes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarynode"},(0,r.kt)("code",null,"[GlossaryNode!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of Glossary Nodes without a parent node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of nodes in the returned result"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of nodes in the result set"))))),(0,r.kt)("h2",{id:"getrootglossarytermsresult"},"GetRootGlossaryTermsResult"),(0,r.kt)("p",null,"The result when getting root GlossaryTerms"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"terms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterm"},(0,r.kt)("code",null,"[GlossaryTerm!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of Glossary Terms without a parent node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of terms in the returned result"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of terms in the result set"))))),(0,r.kt)("h2",{id:"getschemablameresult"},"GetSchemaBlameResult"),(0,r.kt)("p",null,"Schema changes computed at a specific version."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#semanticversionstruct"},(0,r.kt)("code",null,"SemanticVersionStruct"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Selected semantic version"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaFieldBlameList",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldblame"},(0,r.kt)("code",null,"[SchemaFieldBlame!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of schema blame. Absent when there are no fields to return history for."))))),(0,r.kt)("h2",{id:"getschemaversionlistresult"},"GetSchemaVersionListResult"),(0,r.kt)("p",null,"Schema changes computed at a specific version."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"latestVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#semanticversionstruct"},(0,r.kt)("code",null,"SemanticVersionStruct"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Latest and current semantic version"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#semanticversionstruct"},(0,r.kt)("code",null,"SemanticVersionStruct"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Selected semantic version"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"semanticVersionList",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#semanticversionstruct"},(0,r.kt)("code",null,"[SemanticVersionStruct!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"All semantic versions. Absent when there are no versions."))))),(0,r.kt)("h2",{id:"globaltags"},"GlobalTags"),(0,r.kt)("p",null,"Tags attached to a particular Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#tagassociation"},(0,r.kt)("code",null,"[TagAssociation!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of tags attached to the Metadata Entity"))))),(0,r.kt)("h2",{id:"globalviewssettings"},"GlobalViewsSettings"),(0,r.kt)("p",null,"Global (platform-level) settings related to the Views feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"defaultView",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The global default View. If a user does not have a personal default, then this will be the default view."))))),(0,r.kt)("h2",{id:"glossarynode"},"GlossaryNode"),(0,r.kt)("p",null,"A Glossary Node, or a directory in a Business Glossary represents a container of\nGlossary Terms or other Glossary Nodes"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarynodeproperties"},(0,r.kt)("code",null,"GlossaryNodeProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional properties associated with the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentNodes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentnodesresult"},(0,r.kt)("code",null,"ParentNodesResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of glossary nodes for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"glossarynodeproperties"},"GlossaryNodeProperties"),(0,r.kt)("p",null,"Additional read only properties about a Glossary Node"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the glossary term"))))),(0,r.kt)("h2",{id:"glossaryterm"},"GlossaryTerm"),(0,r.kt)("p",null,"A Glossary Term, or a node in a Business Glossary representing a standardized domain\ndata type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"A unique identifier for the Glossary Term. Deprecated - Use properties.name field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hierarchicalName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"hierarchicalName of glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarytermproperties"},(0,r.kt)("code",null,"GlossaryTermProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional properties associated with the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTermInfo",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterminfo"},(0,r.kt)("code",null,"GlossaryTermInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecated, use properties field instead Details of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemametadata"},(0,r.kt)("code",null,"SchemaMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema metadata of the dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentNodes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentnodesresult"},(0,r.kt)("code",null,"ParentNodesResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of glossary nodes for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprivileges"},(0,r.kt)("code",null,"EntityPrivileges"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Privileges given to a user relevant to this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"glossarytermassociation"},"GlossaryTermAssociation"),(0,r.kt)("p",null,"An edge between a Metadata Entity and a Glossary Term Modeled as a struct to permit\nadditional attributes\nTODO Consider whether this query should be serviced by the relationships field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"term",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterm"},(0,r.kt)("code",null,"GlossaryTerm!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The glossary term itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the associated urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"glossaryterminfo"},"GlossaryTermInfo"),(0,r.kt)("p",null,"Deprecated, use GlossaryTermProperties instead\nInformation about a glossary term"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Definition of the glossary term. Deprecated - Use 'description' instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"termSource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Term Source of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceRef",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Ref of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Url of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rawSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema definition of glossary term"))))),(0,r.kt)("h2",{id:"glossarytermproperties"},"GlossaryTermProperties"),(0,r.kt)("p",null,"Additional read only properties about a Glossary Term"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Glossary Term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Definition of the glossary term. Deprecated - Use 'description' instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"termSource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Term Source of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceRef",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Ref of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Url of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties of the glossary term"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rawSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema definition of glossary term"))))),(0,r.kt)("h2",{id:"glossaryterms"},"GlossaryTerms"),(0,r.kt)("p",null,"Glossary Terms attached to a particular Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"terms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarytermassociation"},(0,r.kt)("code",null,"[GlossaryTermAssociation!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The set of glossary terms attached to the Metadata Entity"))))),(0,r.kt)("h2",{id:"health"},"Health"),(0,r.kt)("p",null,"The resolved Health of an Asset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#healthstatustype"},(0,r.kt)("code",null,"HealthStatusType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An enum representing the type of health indicator"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#healthstatus"},(0,r.kt)("code",null,"HealthStatus!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An enum representing the resolved Health status of an Asset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"message",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional message describing the resolved health status"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"causes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The causes responsible for the health status"))))),(0,r.kt)("h2",{id:"highlight"},"Highlight"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"body",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"hyperparametermap"},"HyperParameterMap"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#hyperparametervaluetype"},(0,r.kt)("code",null,"HyperParameterValueType!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"identitymanagementconfig"},"IdentityManagementConfig"),(0,r.kt)("p",null,"Configurations related to Identity Management"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether identity management screen is able to be shown in the UI"))))),(0,r.kt)("h2",{id:"ingestionconfig"},"IngestionConfig"),(0,r.kt)("p",null,"A set of configurations for an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"recipe",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The JSON-encoded recipe to use for ingestion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executorId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: The specific executor that should handle the execution request. Defaults to 'default'."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: The version of the ingestion framework to use"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"debugMode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Advanced: Whether or not to run ingestion in debug mode"))))),(0,r.kt)("h2",{id:"ingestionrun"},"IngestionRun"),(0,r.kt)("p",null,"The runs associated with an Ingestion Source managed by DataHub"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionRequestUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the execution request associated with the user"))))),(0,r.kt)("h2",{id:"ingestionschedule"},"IngestionSchedule"),(0,r.kt)("p",null,"A schedule associated with an Ingestion Source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timezone",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Time Zone abbreviation (e.g. GMT, EDT). Defaults to UTC."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"interval",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The cron-formatted interval to execute the ingestion source on"))))),(0,r.kt)("h2",{id:"ingestionsource"},"IngestionSource"),(0,r.kt)("p",null,"An Ingestion Source Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Ingestion Source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the source itself, e.g. mysql, bigquery, bigquery-usage. Should match the recipe."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name of the Ingestion Source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schedule",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ingestionschedule"},(0,r.kt)("code",null,"IngestionSchedule"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional schedule associated with the Ingestion Source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The data platform associated with this ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"config",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ingestionconfig"},(0,r.kt)("code",null,"IngestionConfig!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An type-specific set of configurations for the ingestion source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ingestionsourceexecutionrequests"},(0,r.kt)("code",null,"IngestionSourceExecutionRequests"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Previous requests to execute the ingestion source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"ingestionsourceexecutionrequests"},"IngestionSourceExecutionRequests"),(0,r.kt)("p",null,"Requests for execution associated with an ingestion source"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"executionRequests",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#executionrequest"},(0,r.kt)("code",null,"[ExecutionRequest!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The execution request objects comprising the result set"))))),(0,r.kt)("h2",{id:"inputfield"},"InputField"),(0,r.kt)("p",null,"Input field of the chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaFieldUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaField",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafield"},(0,r.kt)("code",null,"SchemaField"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"inputfields"},"InputFields"),(0,r.kt)("p",null,"Input fields of the chart"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#inputfield"},(0,r.kt)("code",null,"[InputField]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"institutionalmemory"},"InstitutionalMemory"),(0,r.kt)("p",null,"Institutional memory metadata, meaning internal links and pointers related to an Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"elements",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemorymetadata"},(0,r.kt)("code",null,"[InstitutionalMemoryMetadata!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of records that represent the institutional memory or internal documentation of an entity"))))),(0,r.kt)("h2",{id:"institutionalmemorymetadata"},"InstitutionalMemoryMetadata"),(0,r.kt)("p",null,"An institutional memory resource about a particular Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"url",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Link to a document or wiki page or another internal resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"label",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Label associated with the URL"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"author",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The author of this metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An AuditStamp corresponding to the creation of this resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use label instead Description of the resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the owned urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"intbox"},"IntBox"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"intValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"intendeduse"},"IntendedUse"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryUses",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Primary Use cases for the model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryUsers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#intendedusertype"},(0,r.kt)("code",null,"[IntendedUserType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Primary Intended Users"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"outOfScopeUses",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Out of scope uses of the MLModel"))))),(0,r.kt)("h2",{id:"invitetoken"},"InviteToken"),(0,r.kt)("p",null,"Token that allows users to sign up as a native user"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"inviteToken",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The invite token"))))),(0,r.kt)("h2",{id:"keyvalueschema"},"KeyValueSchema"),(0,r.kt)("p",null,"Information about a raw Key Value Schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"keySchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw key schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"valueSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw value schema"))))),(0,r.kt)("h2",{id:"lineageconfig"},"LineageConfig"),(0,r.kt)("p",null,"Configurations related to Lineage"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"supportsImpactAnalysis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the backend support impact analysis feature"))))),(0,r.kt)("h2",{id:"lineagerelationship"},"LineageRelationship"),(0,r.kt)("p",null,"Metadata about a lineage relationship between two entities"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the relationship"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity that is related via lineage"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"degree",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Degree of relationship (number of hops to get to entity)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdOn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Timestamp for when this lineage relationship was created. Could be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdActor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor who created this lineage relationship. Could be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"updatedOn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Timestamp for when this lineage relationship was last updated. Could be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"updatedActor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actor who last updated this lineage relationship. Could be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"isManual",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether this edge is a manual edge. Could be null."))))),(0,r.kt)("h2",{id:"linkparams"},"LinkParams"),(0,r.kt)("p",null,"Parameters required to specify the page to land once clicked"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchparams"},(0,r.kt)("code",null,"SearchParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context to define the search page"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityProfileParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprofileparams"},(0,r.kt)("code",null,"EntityProfileParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context to define the entity profile page"))))),(0,r.kt)("h2",{id:"listaccesstokenresult"},"ListAccessTokenResult"),(0,r.kt)("p",null,"Results returned when listing access tokens"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tokens",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#accesstokenmetadata"},(0,r.kt)("code",null,"[AccessTokenMetadata!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The token metadata themselves"))))),(0,r.kt)("h2",{id:"listdomainsresult"},"ListDomainsResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Domains"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Domains in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Domains in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domains",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domain"},(0,r.kt)("code",null,"[Domain!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domains themselves"))))),(0,r.kt)("h2",{id:"listgroupsresult"},"ListGroupsResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Groups"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Policies in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Policies in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpgroup"},(0,r.kt)("code",null,"[CorpGroup!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The groups themselves"))))),(0,r.kt)("h2",{id:"listingestionsourcesresult"},"ListIngestionSourcesResult"),(0,r.kt)("p",null,"Results returned when listing ingestion sources"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ingestionSources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ingestionsource"},(0,r.kt)("code",null,"[IngestionSource!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Ingestion Sources themselves"))))),(0,r.kt)("h2",{id:"listownershiptypesresult"},"ListOwnershipTypesResult"),(0,r.kt)("p",null,"Results when listing custom ownership types."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershiptypeentity"},(0,r.kt)("code",null,"[OwnershipTypeEntity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Custom Ownership Types themselves"))))),(0,r.kt)("h2",{id:"listpoliciesresult"},"ListPoliciesResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Access Policies"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Policies in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Policies in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"policies",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policy"},(0,r.kt)("code",null,"[Policy!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Policies themselves"))))),(0,r.kt)("h2",{id:"listpostsresult"},"ListPostsResult"),(0,r.kt)("p",null,"The result obtained when listing Posts"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Roles in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Roles in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"posts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#post"},(0,r.kt)("code",null,"[Post!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Posts themselves"))))),(0,r.kt)("h2",{id:"listqueriesresult"},"ListQueriesResult"),(0,r.kt)("p",null,"Results when listing entity queries"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"queries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#queryentity"},(0,r.kt)("code",null,"[QueryEntity!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Queries themselves"))))),(0,r.kt)("h2",{id:"listrecommendationsresult"},"ListRecommendationsResult"),(0,r.kt)("p",null,"Results returned by the ListRecommendations query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"modules",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#recommendationmodule"},(0,r.kt)("code",null,"[RecommendationModule!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of modules to show"))))),(0,r.kt)("h2",{id:"listrolesresult"},"ListRolesResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Roles"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Roles in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Roles in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"roles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubrole"},(0,r.kt)("code",null,"[DataHubRole!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Roles themselves"))))),(0,r.kt)("h2",{id:"listsecretsresult"},"ListSecretsResult"),(0,r.kt)("p",null,"Input for listing DataHub Secrets"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of results to be returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of results in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"secrets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#secret"},(0,r.kt)("code",null,"[Secret!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The secrets themselves"))))),(0,r.kt)("h2",{id:"listtestsresult"},"ListTestsResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Tests"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Tests in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Tests in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tests",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#test"},(0,r.kt)("code",null,"[Test!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Tests themselves"))))),(0,r.kt)("h2",{id:"listusersresult"},"ListUsersResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Users"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Policies in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Policies in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"[CorpUser!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The users themselves"))))),(0,r.kt)("h2",{id:"listviewsresult"},"ListViewsResult"),(0,r.kt)("p",null,"The result obtained when listing DataHub Views"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The starting offset of the result set returned"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of Views in the returned result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of Views in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"views",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datahubview"},(0,r.kt)("code",null,"[DataHubView!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Views themselves"))))),(0,r.kt)("h2",{id:"managedingestionconfig"},"ManagedIngestionConfig"),(0,r.kt)("p",null,"Configurations related to managed, UI based ingestion"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether ingestion screen is enabled in the UI"))))),(0,r.kt)("h2",{id:"matchedfield"},"MatchedField"),(0,r.kt)("p",null,"An overview of the field that was matched in the entity search document"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the field that matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Value of the field that matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity if the value is an urn"))))),(0,r.kt)("h2",{id:"media"},"Media"),(0,r.kt)("p",null,"Media content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mediatype"},(0,r.kt)("code",null,"MediaType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of media"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"location",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The location of the media (a URL)"))))),(0,r.kt)("h2",{id:"metrics"},"Metrics"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"performanceMeasures",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Measures of ML Model performance"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"decisionThreshold",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Decision Thresholds used if any"))))),(0,r.kt)("h2",{id:"mlfeature"},"MLFeature"),(0,r.kt)("p",null,"An ML Feature Metadata Entity Note that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name for the ML Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureNamespace",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLFeature featureNamespace"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description about the ML Feature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mlfeaturedatatype"},(0,r.kt)("code",null,"MLFeatureDataType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLFeature data type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeatureproperties"},(0,r.kt)("code",null,"MLFeatureProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"ModelProperties metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeatureproperties"},(0,r.kt)("code",null,"MLFeatureProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"ModelProperties metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags applied to entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeatureeditableproperties"},(0,r.kt)("code",null,"MLFeatureEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlfeatureeditableproperties"},"MLFeatureEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlfeatureproperties"},"MLFeatureProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mlfeaturedatatype"},(0,r.kt)("code",null,"MLFeatureDataType"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#versiontag"},(0,r.kt)("code",null,"VersionTag"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlfeaturetable"},"MLFeatureTable"),(0,r.kt)("p",null,"An ML Feature Table Entity Note that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML Feature Table"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the MLFeatureTable is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLFeatureTable description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the MLFeatureTable"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeaturetableproperties"},(0,r.kt)("code",null,"MLFeatureTableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties associated the the ML Feature Table"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureTableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeaturetableproperties"},(0,r.kt)("code",null,"MLFeatureTableProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead ModelProperties metadata of the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the MLFeature"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the MLFeatureTable"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the ML Feature Table. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags applied to entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeaturetableeditableproperties"},(0,r.kt)("code",null,"MLFeatureTableEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlfeaturetableeditableproperties"},"MLFeatureTableEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlfeaturetableproperties"},"MLFeatureTableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mlFeatures",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlfeature"},(0,r.kt)("code",null,"[MLFeature]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mlPrimaryKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlprimarykey"},(0,r.kt)("code",null,"[MLPrimaryKey]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlhyperparam"},"MLHyperParam"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlmetric"},"MLMetric"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlmodel"},"MLModel"),(0,r.kt)("p",null,"An ML Model Metadata Entity Note that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"ML model display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the MLModel is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fabric type where mlmodel belongs to or where it was generated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Human readable description for mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead The standard tags for the ML Model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard tags for the ML Model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelproperties"},(0,r.kt)("code",null,"MLModelProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only information about the ML Model"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"intendedUse",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#intendeduse"},(0,r.kt)("code",null,"IntendedUse"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Intended use of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"factorPrompts",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelfactorprompts"},(0,r.kt)("code",null,"MLModelFactorPrompts"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Factors metadata of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#metrics"},(0,r.kt)("code",null,"Metrics"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Metrics metadata of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"evaluationData",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#basedata"},(0,r.kt)("code",null,"[BaseData!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Evaluation Data of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"trainingData",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#basedata"},(0,r.kt)("code",null,"[BaseData!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Training Data of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"quantitativeAnalyses",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#quantitativeanalyses"},(0,r.kt)("code",null,"QuantitativeAnalyses"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Quantitative Analyses of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ethicalConsiderations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ethicalconsiderations"},(0,r.kt)("code",null,"EthicalConsiderations"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ethical Considerations of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"caveatsAndRecommendations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#caveatsandrecommendations"},(0,r.kt)("code",null,"CaveatsAndRecommendations"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Caveats and Recommendations of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#sourcecode"},(0,r.kt)("code",null,"SourceCode"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Code"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cost",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#cost"},(0,r.kt)("code",null,"Cost"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Cost Aspect of the mlmodel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the ML Model. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodeleditableproperties"},(0,r.kt)("code",null,"MLModelEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlmodeleditableproperties"},"MLModelEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlmodelfactorprompts"},"MLModelFactorPrompts"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"relevantFactors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelfactors"},(0,r.kt)("code",null,"[MLModelFactors!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"What are foreseeable salient factors for which MLModel performance may vary, and how were these determined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"evaluationFactors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelfactors"},(0,r.kt)("code",null,"[MLModelFactors!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Which factors are being reported, and why were these chosen"))))),(0,r.kt)("h2",{id:"mlmodelfactors"},"MLModelFactors"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Distinct categories with similar characteristics that are present in the evaluation data instances"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"instrumentation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Instrumentation used for MLModel"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"environment",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Environment in which the MLModel is deployed"))))),(0,r.kt)("h2",{id:"mlmodelgroup"},"MLModelGroup"),(0,r.kt)("p",null,"An ML Model Group Metadata Entity\nNote that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML Model Group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name for the Entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the MLModelGroup is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Fabric type where MLModelGroup belongs to or where it was generated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Human readable description for MLModelGroup"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelgroupproperties"},(0,r.kt)("code",null,"MLModelGroupProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties about the ML Model Group"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the MLModelGroup"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the MLModelGroup"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the ML Model Group. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags applied to entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelgroupeditableproperties"},(0,r.kt)("code",null,"MLModelGroupEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlmodelgroupeditableproperties"},"MLModelGroupEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlmodelgroupproperties"},"MLModelGroupProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#versiontag"},(0,r.kt)("code",null,"VersionTag"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlmodelproperties"},"MLModelProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"date",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hyperParameters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#hyperparametermap"},(0,r.kt)("code",null,"HyperParameterMap"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hyperParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlhyperparam"},(0,r.kt)("code",null,"[MLHyperParam]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"trainingMetrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmetric"},(0,r.kt)("code",null,"[MLMetric]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"mlFeatures",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"groups",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlmodelgroup"},(0,r.kt)("code",null,"[MLModelGroup]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"mlprimarykey"},"MLPrimaryKey"),(0,r.kt)("p",null,"An ML Primary Key Entity Note that this entity is incubating"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entitywithrelationships"},"EntityWithRelationships")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the ML Primary Key"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastIngested",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The timestamp for the last time this entity was ingested"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The display name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"featureNamespace",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLPrimaryKey featureNamespace"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLPrimaryKey description"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mlfeaturedatatype"},(0,r.kt)("code",null,"MLFeatureDataType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"MLPrimaryKey data type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlprimarykeyproperties"},(0,r.kt)("code",null,"MLPrimaryKeyProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only properties of the ML Primary Key"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryKeyProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlprimarykeyproperties"},(0,r.kt)("code",null,"MLPrimaryKeyProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties field instead MLPrimaryKeyProperties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the MLPrimaryKey"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the MLPrimaryKey"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the MLPrimaryKey"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Deprecation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lineage",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitylineageresult"},(0,r.kt)("code",null,"EntityLineageResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity grouped by direction in the lineage graph"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#lineageinput"},(0,r.kt)("code",null,"LineageInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags applied to entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#mlprimarykeyeditableproperties"},(0,r.kt)("code",null,"MLPrimaryKeyEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"mlprimarykeyeditableproperties"},"MLPrimaryKeyEditableProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The edited description"))))),(0,r.kt)("h2",{id:"mlprimarykeyproperties"},"MLPrimaryKeyProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#mlfeaturedatatype"},(0,r.kt)("code",null,"MLFeatureDataType"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#versiontag"},(0,r.kt)("code",null,"VersionTag"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"[Dataset]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"namedbar"},"NamedBar"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"segments",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#barsegment"},(0,r.kt)("code",null,"[BarSegment!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"namedline"},"NamedLine"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"data",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#numericdatapoint"},(0,r.kt)("code",null,"[NumericDataPoint!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"notebook"},"Notebook"),(0,r.kt)("p",null,"A Notebook Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity")),(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#browsableentity"},"BrowsableEntity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tool",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Notebook tool name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"notebookId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An id unique within the Notebook tool"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#notebookinfo"},(0,r.kt)("code",null,"NotebookInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read only information about the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#notebookeditableproperties"},(0,r.kt)("code",null,"NotebookEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional read write properties about the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status metadata of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"content",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#notebookcontent"},(0,r.kt)("code",null,"NotebookContent!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content of this Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tags associated with the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataPlatformInstance",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatforminstance"},(0,r.kt)("code",null,"DataPlatformInstance"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific instance of the data platform that this entity belongs to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Sub Types that this entity implements"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePaths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepath"},(0,r.kt)("code",null,"[BrowsePath!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse paths corresponding to the Notebook. If no Browse Paths have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"browsePathV2",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#browsepathv2"},(0,r.kt)("code",null,"BrowsePathV2"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The browse path V2 corresponding to an entity. If no Browse Paths V2 have been generated before, this will be null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"exists",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether or not this entity exists on DataHub"))))),(0,r.kt)("h2",{id:"notebookcell"},"NotebookCell"),(0,r.kt)("p",null,"The Union of every NotebookCell"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"chartCell",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#chartcell"},(0,r.kt)("code",null,"ChartCell"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The chart cell content. The will be non-null only when all other cell field is null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"textCell",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#textcell"},(0,r.kt)("code",null,"TextCell"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The text cell content. The will be non-null only when all other cell field is null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"queryChell",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#querycell"},(0,r.kt)("code",null,"QueryCell"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query cell content. The will be non-null only when all other cell field is null."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#notebookcelltype"},(0,r.kt)("code",null,"NotebookCellType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of this Notebook cell"))))),(0,r.kt)("h2",{id:"notebookcontent"},"NotebookContent"),(0,r.kt)("p",null,"The actual content in a Notebook"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cells",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#notebookcell"},(0,r.kt)("code",null,"[NotebookCell!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content of a Notebook which is composed by a list of NotebookCell"))))),(0,r.kt)("h2",{id:"notebookeditableproperties"},"NotebookEditableProperties"),(0,r.kt)("p",null,"Notebook properties that are editable via the UI This represents logical metadata,\nas opposed to technical metadata"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Notebook"))))),(0,r.kt)("h2",{id:"notebookinfo"},"NotebookInfo"),(0,r.kt)("p",null,"Additional read only information about a Notebook"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Display of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Native platform URL of the Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#custompropertiesentry"},(0,r.kt)("code",null,"[CustomPropertiesEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform specific metadata tuples"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeAuditStamps",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#changeauditstamps"},(0,r.kt)("code",null,"ChangeAuditStamps"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who created/last modified/deleted this Notebook and when"))))),(0,r.kt)("h2",{id:"numericdatapoint"},"NumericDataPoint"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"x",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"y",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"operation"},"Operation"),(0,r.kt)("p",null,"Operational info for an entity."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#timeseriesaspect"},"TimeSeriesAspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the operation was reported"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actor",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Actor who issued this operation."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"operationType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#operationtype"},(0,r.kt)("code",null,"OperationType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Operation type of change."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customOperationType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A custom operation type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#operationsourcetype"},(0,r.kt)("code",null,"OperationSourceType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source of the operation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"numAffectedRows",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"How many rows were affected by this operation."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"affectedDatasets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Which other datasets were affected by this operation."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastUpdatedTimestamp",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"When time at which the asset was actually updated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional partition identifier"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"customProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom operation properties"))))),(0,r.kt)("h2",{id:"origin"},"Origin"),(0,r.kt)("p",null,"Carries information about where an entity originated from."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#origintype"},(0,r.kt)("code",null,"OriginType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Where an entity originated from. Either NATIVE or EXTERNAL"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"externalType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Only populated if type is EXTERNAL. The externalType of the entity, such as the name of the identity provider."))))),(0,r.kt)("h2",{id:"owner"},"Owner"),(0,r.kt)("p",null,"An owner of a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owner",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#ownertype"},(0,r.kt)("code",null,"OwnerType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Owner object"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershiptype"},(0,r.kt)("code",null,"OwnershipType"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"The type of the ownership. Deprecated - Use ownershipType field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownershipType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershiptypeentity"},(0,r.kt)("code",null,"OwnershipTypeEntity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership type information"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"source",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershipsource"},(0,r.kt)("code",null,"OwnershipSource"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source information for the ownership"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the owned urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"ownership"},"Ownership"),(0,r.kt)("p",null,"Ownership information about a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"owners",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#owner"},(0,r.kt)("code",null,"[Owner!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of owners of the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Audit stamp containing who last modified the record and when"))))),(0,r.kt)("h2",{id:"ownershipsource"},"OwnershipSource"),(0,r.kt)("p",null,"Information about the source of Ownership metadata about a Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#ownershipsourcetype"},(0,r.kt)("code",null,"OwnershipSourceType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the source"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"url",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional reference URL for the source"))))),(0,r.kt)("h2",{id:"ownershiptypeentity"},"OwnershipTypeEntity"),(0,r.kt)("p",null,"A single Custom Ownership Type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A primary key associated with the custom ownership type."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"info",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownershiptypeinfo"},(0,r.kt)("code",null,"OwnershipTypeInfo"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Information about the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the Custom Ownership Type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"ownershiptypeinfo"},"OwnershipTypeInfo"),(0,r.kt)("p",null,"Properties about an individual Custom Ownership Type."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Custom Ownership Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An Audit Stamp corresponding to the creation of this resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An Audit Stamp corresponding to the update of this resource"))))),(0,r.kt)("h2",{id:"parentcontainersresult"},"ParentContainersResult"),(0,r.kt)("p",null,"All of the parent containers for a given entity. Returns parents with direct parent first followed by the parent's parent etc."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of containers bubbling up for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"containers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"[Container!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of parent containers in order from direct parent, to parent's parent etc. If there are no containers, return an emty list"))))),(0,r.kt)("h2",{id:"parentnodesresult"},"ParentNodesResult"),(0,r.kt)("p",null,"All of the parent nodes for GlossaryTerms and GlossaryNodes"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of parent nodes bubbling up for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nodes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossarynode"},(0,r.kt)("code",null,"[GlossaryNode!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of parent nodes in order from direct parent, to parent's parent etc. If there are no nodes, return an empty list"))))),(0,r.kt)("h2",{id:"partitionspec"},"PartitionSpec"),(0,r.kt)("p",null,"Information about the partition being profiled"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#partitiontype"},(0,r.kt)("code",null,"PartitionType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The partition type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"partition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The partition identifier"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"timePartition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#timewindow"},(0,r.kt)("code",null,"TimeWindow"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The optional time window partition information"))))),(0,r.kt)("h2",{id:"platformprivileges"},"PlatformPrivileges"),(0,r.kt)("p",null,"The platform privileges that the currently authenticated user has"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewAnalytics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to view analytics"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"managePolicies",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage policies"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageIdentities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage users & groups"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"generatePersonalAccessTokens",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to generate personal access tokens"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createDomains",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create new Domains"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageDomains",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage Domains"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageIngestion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is able to manage UI-based ingestion"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageSecrets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is able to manage UI-based secrets"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageTokens",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage tokens on behalf of other users."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageTests",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is able to manage Tests"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageGlossaries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to manage Glossaries"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageUserCredentials",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user is able to manage user credentials"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create new Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create and delete all Tags"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageGlobalViews",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create, update, and delete global views."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageOwnershipTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user should be able to create, update, and delete ownership types."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"manageGlobalAnnouncements",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the user can create and delete posts pinned to the home page."))))),(0,r.kt)("h2",{id:"policiesconfig"},"PoliciesConfig"),(0,r.kt)("p",null,"Configurations related to the Policies Feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the policies feature is enabled and should be displayed in the UI"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformPrivileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#privilege"},(0,r.kt)("code",null,"[Privilege!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of platform privileges to display in the Policy Builder experience"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourcePrivileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#resourceprivileges"},(0,r.kt)("code",null,"[ResourcePrivileges!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of resource privileges to display in the Policy Builder experience"))))),(0,r.kt)("h2",{id:"policy"},"Policy"),(0,r.kt)("p",null,"DEPRECATED\nTODO: Eventually get rid of this in favor of DataHub Policy\nAn DataHub Platform Access Policy Access Policies determine who can perform what actions against which resources on the platform"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policytype"},(0,r.kt)("code",null,"PolicyType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"state",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policystate"},(0,r.kt)("code",null,"PolicyState!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The present state of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Policy"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#resourcefilter"},(0,r.kt)("code",null,"ResourceFilter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resources that the Policy privileges apply to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The privileges that the Policy grants"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#actorfilter"},(0,r.kt)("code",null,"ActorFilter!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actors that the Policy grants privileges to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editable",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the Policy is editable, ie system policies, or not"))))),(0,r.kt)("h2",{id:"policymatchcriterion"},"PolicyMatchCriterion"),(0,r.kt)("p",null,"Criterion to define relationship between field and values"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the field that the criterion refers to e.g. entity_type, entity_urn, domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policymatchcriterionvalue"},(0,r.kt)("code",null,"[PolicyMatchCriterionValue!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Values. Matches criterion if any one of the values matches condition (OR-relationship)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"condition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#policymatchcondition"},(0,r.kt)("code",null,"PolicyMatchCondition!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the field that the criterion refers to"))))),(0,r.kt)("h2",{id:"policymatchcriterionvalue"},"PolicyMatchCriterionValue"),(0,r.kt)("p",null,"Value in PolicyMatchCriterion with hydrated entity if value is urn"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value of the field to match"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Hydrated entities of the above values. Only set if the value is an urn"))))),(0,r.kt)("h2",{id:"policymatchfilter"},"PolicyMatchFilter"),(0,r.kt)("p",null,"Filter object that encodes a complex filter logic with OR + AND"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"criteria",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policymatchcriterion"},(0,r.kt)("code",null,"[PolicyMatchCriterion!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of criteria to apply"))))),(0,r.kt)("h2",{id:"post"},"Post"),(0,r.kt)("p",null,"Input provided when creating a Post"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from the Post"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"postType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#posttype"},(0,r.kt)("code",null,"PostType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"content",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#postcontent"},(0,r.kt)("code",null,"PostContent!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"When the post was last modified"))))),(0,r.kt)("h2",{id:"postcontent"},"PostContent"),(0,r.kt)("p",null,"Post content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"contentType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#postcontenttype"},(0,r.kt)("code",null,"PostContentType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of post content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The title of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional content of the post"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"link",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional link that the post is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"media",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#media"},(0,r.kt)("code",null,"Media"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional media contained in the post"))))),(0,r.kt)("h2",{id:"privilege"},"Privilege"),(0,r.kt)("p",null,"An individual DataHub Access Privilege"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized privilege type, serving as a unique identifier for a privilege eg EDIT_ENTITY"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"displayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name to appear when displaying the privilege, eg Edit Entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A description of the privilege to display"))))),(0,r.kt)("h2",{id:"privileges"},"Privileges"),(0,r.kt)("p",null,"Object that encodes the privileges the actor has for a given resource"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granted Privileges"))))),(0,r.kt)("h2",{id:"quantitativeanalyses"},"QuantitativeAnalyses"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"unitaryResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#resultstype"},(0,r.kt)("code",null,"ResultsType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Link to a dashboard with results showing how the model performed with respect to each factor"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"intersectionalResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#resultstype"},(0,r.kt)("code",null,"ResultsType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Link to a dashboard with results showing how the model performed with respect to the intersection of evaluated factors"))))),(0,r.kt)("h2",{id:"queriestabconfig"},"QueriesTabConfig"),(0,r.kt)("p",null,"Configuration for the queries tab"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"queriesTabResultSize",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Number of queries to show in the queries tab"))))),(0,r.kt)("h2",{id:"querycell"},"QueryCell"),(0,r.kt)("p",null,"A Notebook cell which contains Query as content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellTitle",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the cell"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id for the cell."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeAuditStamps",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#changeauditstamps"},(0,r.kt)("code",null,"ChangeAuditStamps"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who created/last modified/deleted this TextCell and when"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rawQuery",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw query to explain some specific logic in a Notebook"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastExecuted",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who last executed this query cell and when"))))),(0,r.kt)("h2",{id:"queryentity"},"QueryEntity"),(0,r.kt)("p",null,"An individual Query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A primary key associated with the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#queryproperties"},(0,r.kt)("code",null,"QueryProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Properties about the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subjects",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#querysubject"},(0,r.kt)("code",null,"[QuerySubject!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Subjects for the query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"queryproperties"},"QueryProperties"),(0,r.kt)("p",null,"Properties about an individual Query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"statement",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#querystatement"},(0,r.kt)("code",null,"QueryStatement!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Query statement itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"source",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#querysource"},(0,r.kt)("code",null,"QuerySource!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The source of the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The description of the Query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"created",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An Audit Stamp corresponding to the creation of this resource"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastModified",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#auditstamp"},(0,r.kt)("code",null,"AuditStamp!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An Audit Stamp corresponding to the update of this resource"))))),(0,r.kt)("h2",{id:"querystatement"},"QueryStatement"),(0,r.kt)("p",null,"An individual Query Statement"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The query statement value"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"language",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#querylanguage"},(0,r.kt)("code",null,"QueryLanguage!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The language for the Query Statement"))))),(0,r.kt)("h2",{id:"querysubject"},"QuerySubject"),(0,r.kt)("p",null,"The subject for a Query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"dataset",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataset"},(0,r.kt)("code",null,"Dataset!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The dataset which is the subject of the Query"))))),(0,r.kt)("h2",{id:"quickfilter"},"QuickFilter"),(0,r.kt)("p",null,"A quick filter in search and auto-complete"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"field",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of field to filter by"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Value to filter on"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity that the value maps to if any"))))),(0,r.kt)("h2",{id:"rawaspect"},"RawAspect"),(0,r.kt)("p",null,"Payload representing data about a single aspect"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"aspectName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the aspect"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"payload",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"JSON string containing the aspect's payload"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"renderSpec",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#aspectrenderspec"},(0,r.kt)("code",null,"AspectRenderSpec"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Details for the frontend on how the raw aspect should be rendered"))))),(0,r.kt)("h2",{id:"recommendationcontent"},"RecommendationContent"),(0,r.kt)("p",null,"Content to display within each recommendation module"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"String representation of content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity being recommended. Empty if the content being recommended is not an entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"params",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#recommendationparams"},(0,r.kt)("code",null,"RecommendationParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional context required to generate the the recommendation"))))),(0,r.kt)("h2",{id:"recommendationmodule"},"RecommendationModule"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the module to display"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"moduleId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id of the module being recommended"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"renderType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#recommendationrendertype"},(0,r.kt)("code",null,"RecommendationRenderType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Type of rendering that defines how the module should be rendered"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"content",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#recommendationcontent"},(0,r.kt)("code",null,"[RecommendationContent!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of content to display inside the module"))))),(0,r.kt)("h2",{id:"recommendationparams"},"RecommendationParams"),(0,r.kt)("p",null,"Parameters required to render a recommendation of a given type"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchparams"},(0,r.kt)("code",null,"SearchParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context to define the search recommendations"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityProfileParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprofileparams"},(0,r.kt)("code",null,"EntityProfileParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context to define the entity profile page"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"contentParams",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#contentparams"},(0,r.kt)("code",null,"ContentParams"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Context about the recommendation"))))),(0,r.kt)("h2",{id:"resettoken"},"ResetToken"),(0,r.kt)("p",null,"Token that allows native users to reset their credentials"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resetToken",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The reset token"))))),(0,r.kt)("h2",{id:"resourcefilter"},"ResourceFilter"),(0,r.kt)("p",null,"The resources that a DataHub Access Policy applies to"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the resource the policy should apply to Not required because in the future we want to support filtering by type OR by domain"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of specific resource urns to apply the filter to"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"allResources",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether of not to apply the filter to all resources of the type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filter",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#policymatchfilter"},(0,r.kt)("code",null,"PolicyMatchFilter"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether of not to apply the filter to all resources of the type"))))),(0,r.kt)("h2",{id:"resourceprivileges"},"ResourcePrivileges"),(0,r.kt)("p",null,"A privilege associated with a particular resource type\nA resource is most commonly a DataHub Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Resource type associated with the Access Privilege, eg dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resourceTypeDisplayName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name to used for displaying the resourceType"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional entity type to use when performing search and navigation to the entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"privileges",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#privilege"},(0,r.kt)("code",null,"[Privilege!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A list of privileges that are supported against this resource"))))),(0,r.kt)("h2",{id:"role"},"Role"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A primary key of the Metadata Entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"List of relationships between the source Entity and some destination entities with a given types"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the Role"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#roleproperties"},(0,r.kt)("code",null,"RoleProperties!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Role properties to include Request Access Url"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"actors",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#actor"},(0,r.kt)("code",null,"Actor!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))))),(0,r.kt)("h2",{id:"roleassociation"},"RoleAssociation"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"role",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#role"},(0,r.kt)("code",null,"Role!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Role entity itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"roleproperties"},"RoleProperties"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the Role in an organisation "))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description about the role"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Role type can be READ, WRITE or ADMIN"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"requestUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Url to request a role for a user in an organisation"))))),(0,r.kt)("h2",{id:"roleuser"},"RoleUser"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"user",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Linked corp user of a role"))))),(0,r.kt)("h2",{id:"row"},"Row"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"values",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cells",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#cell"},(0,r.kt)("code",null,"[Cell!]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"schema"},"Schema"),(0,r.kt)("p",null,"Deprecated, use SchemaMetadata instead\nMetadata about a Dataset schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Dataset this schema metadata is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Platform this schema metadata is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The version of the GMS Schema metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cluster",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The cluster this schema metadata is derived from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hash",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The SHA1 hash of the schema content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#platformschema"},(0,r.kt)("code",null,"PlatformSchema"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native schema in the datasets platform, schemaless if it was not provided"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafield"},(0,r.kt)("code",null,"[SchemaField!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided a list of fields from value schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided list of fields that define primary keys to access record"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"foreignKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#foreignkeyconstraint"},(0,r.kt)("code",null,"[ForeignKeyConstraint]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided list of foreign key constraints"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the schema metadata information was created"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastObserved",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the schema metadata information was last ingested"))))),(0,r.kt)("h2",{id:"schemafield"},"SchemaField"),(0,r.kt)("p",null,"Information about an individual field in a Dataset schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of the field computed from jsonPath field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"jsonPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of a field in JSON Path notation"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"label",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Human readable label for the field. Not supplied by all data sources"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nullable",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Indicates if this field is optional or nullable"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#schemafielddatatype"},(0,r.kt)("code",null,"SchemaFieldDataType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Platform independent field type of the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"nativeDataType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native type of the field in the datasets platform as declared by platform schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"recursive",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the field references its own type recursively"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"globalTags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use tags field instead Tags associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Glossary terms associated with the field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"isPartOfKey",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the field is part of a key schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"isPartitioningKey",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the field is part of a partitioning key schema"))))),(0,r.kt)("h2",{id:"schemafieldblame"},"SchemaFieldBlame"),(0,r.kt)("p",null,"Blame for a single field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Flattened name of a schema field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schemaFieldChange",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafieldchange"},(0,r.kt)("code",null,"SchemaFieldChange!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Attributes identifying a field change"))))),(0,r.kt)("h2",{id:"schemafieldchange"},"SchemaFieldChange"),(0,r.kt)("p",null,"Attributes identifying a field change"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"timestampMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the schema was updated"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastSemanticVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The last semantic version that this schema was changed in"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"versionStamp",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Version stamp of the change"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#changeoperationtype"},(0,r.kt)("code",null,"ChangeOperationType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the change"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lastSchemaFieldChange",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Last column update, such as Added/Modified/Removed in v1.2.3."))))),(0,r.kt)("h2",{id:"schemafieldentity"},"SchemaFieldEntity"),(0,r.kt)("p",null,"Standalone schema field entity. Differs from the SchemaField struct because it is not directly nested inside a\nschema field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Primary key of the schema field"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fieldPath",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Field path identifying the field in its dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parent",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The field's parent."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"schemafieldref"},"SchemaFieldRef"),(0,r.kt)("p",null,"A Dataset schema field (i.e. column)"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A schema field urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"path",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A schema field path"))))),(0,r.kt)("h2",{id:"schemametadata"},"SchemaMetadata"),(0,r.kt)("p",null,"Metadata about a Dataset schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#aspect"},"Aspect"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"aspectVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The logical version of the schema metadata, where zero represents the latest version with otherwise monotonic ordering starting at one"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"datasetUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Dataset this schema metadata is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema name"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Platform this schema metadata is associated with"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"version",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The version of the GMS Schema metadata"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cluster",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The cluster this schema metadata is derived from"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"hash",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The SHA1 hash of the schema content"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platformSchema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/unions#platformschema"},(0,r.kt)("code",null,"PlatformSchema"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The native schema in the datasets platform, schemaless if it was not provided"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schemafield"},(0,r.kt)("code",null,"[SchemaField!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided a list of fields from value schema"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"primaryKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided list of fields that define primary keys to access record"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"foreignKeys",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#foreignkeyconstraint"},(0,r.kt)("code",null,"[ForeignKeyConstraint]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Client provided list of foreign key constraints"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"createdAt",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time at which the schema metadata information was created"))))),(0,r.kt)("h2",{id:"scrollacrosslineageresults"},"ScrollAcrossLineageResults"),(0,r.kt)("p",null,"Results returned by issueing a search across relationships query using scroll API"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"nextScrollId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Opaque ID to pass to the next request to the server"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities included in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of search results matching the query and filters"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchacrosslineageresult"},(0,r.kt)("code",null,"[SearchAcrossLineageResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search result entities"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))))),(0,r.kt)("h2",{id:"scrollresults"},"ScrollResults"),(0,r.kt)("p",null,"Results returned by issuing a search query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"nextScrollId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Opaque ID to pass to the next request to the server"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities included in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of search results matching the query and filters"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresult"},(0,r.kt)("code",null,"[SearchResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search result entities for a scroll request"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))))),(0,r.kt)("h2",{id:"searchacrosslineageresult"},"SearchAcrossLineageResult"),(0,r.kt)("p",null,"Individual search result from a search across relationships query (has added metadata about the path)"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resolved DataHub Metadata Entity matching the search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"insights",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchinsight"},(0,r.kt)("code",null,"[SearchInsight!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Insights about why the search result was matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"matchedFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#matchedfield"},(0,r.kt)("code",null,"[MatchedField!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Matched field hint"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"paths",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entitypath"},(0,r.kt)("code",null,"[EntityPath]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional list of entities between the source and destination node"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"degree",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Degree of relationship (number of hops to get to entity)"))))),(0,r.kt)("h2",{id:"searchacrosslineageresults"},"SearchAcrossLineageResults"),(0,r.kt)("p",null,"Results returned by issueing a search across relationships query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities included in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of search results matching the query and filters"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchacrosslineageresult"},(0,r.kt)("code",null,"[SearchAcrossLineageResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search result entities"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"freshness",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#freshnessstats"},(0,r.kt)("code",null,"FreshnessStats"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Optional freshness characteristics of this query (cached, staleness etc.)"))))),(0,r.kt)("h2",{id:"searchinsight"},"SearchInsight"),(0,r.kt)("p",null,"Insights about why a search result was returned or ranked in the way that it was"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"text",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The insight to display"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"icon",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional emoji to display in front of the text"))))),(0,r.kt)("h2",{id:"searchparams"},"SearchParams"),(0,r.kt)("p",null,"Context to define the search recommendations"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"types",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"[EntityType!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Entity types to be searched. If this is not provided, all entities will be searched."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"query",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"filters",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetfilter"},(0,r.kt)("code",null,"[FacetFilter!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Filters"))))),(0,r.kt)("h2",{id:"searchresult"},"SearchResult"),(0,r.kt)("p",null,"An individual search result hit"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entity",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"Entity!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resolved DataHub Metadata Entity matching the search query"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"insights",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchinsight"},(0,r.kt)("code",null,"[SearchInsight!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Insights about why the search result was matched"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"matchedFields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#matchedfield"},(0,r.kt)("code",null,"[MatchedField!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Matched field hint"))))),(0,r.kt)("h2",{id:"searchresults"},"SearchResults"),(0,r.kt)("p",null,"Results returned by issuing a search query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"start",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The offset of the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities included in the result set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"total",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of search results matching the query and filters"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResults",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresult"},(0,r.kt)("code",null,"[SearchResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The search result entities"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"facets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#facetmetadata"},(0,r.kt)("code",null,"[FacetMetadata!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Candidate facet aggregations used for search filtering"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"suggestions",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchsuggestion"},(0,r.kt)("code",null,"[SearchSuggestion!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Search suggestions based on the query provided for alternate query texts"))))),(0,r.kt)("h2",{id:"searchresultsvisualconfig"},"SearchResultsVisualConfig"),(0,r.kt)("p",null,"Configuration for a search result"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enableNameHighlight",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether a search result should highlight the name/description if it was matched on those fields."))))),(0,r.kt)("h2",{id:"searchsuggestion"},"SearchSuggestion"),(0,r.kt)("p",null,"A suggestion for an alternate search query given an original query compared to all\nof the entity names in our search index."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"text",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The suggested text based on the provided query text compared to the entity name field in the search index."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"score",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#float"},(0,r.kt)("code",null,"Float"))),(0,r.kt)("td",null,(0,r.kt)("p",null,'The "edit distance" for this suggestion. The closer this number is to 1, the closer the suggested text is to the original text. The closer it is to 0, the further from the original text it is.'))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"frequency",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of entities that would match on the name field given the suggested text"))))),(0,r.kt)("h2",{id:"secret"},"Secret"),(0,r.kt)("p",null,"A referencible secret stored in DataHub's system. Notice that we do not return the actual secret value."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The urn of the secret"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the secret"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional description for the secret"))))),(0,r.kt)("h2",{id:"secretvalue"},"SecretValue"),(0,r.kt)("p",null,"A plaintext secret value"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the secret"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The plaintext value of the secret."))))),(0,r.kt)("h2",{id:"semanticversionstruct"},"SemanticVersionStruct"),(0,r.kt)("p",null,"Properties identify a semantic version"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"semanticVersion",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Semantic version of the change"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"semanticVersionTimestamp",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Semantic version timestamp"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"versionStamp",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Version stamp of the change"))))),(0,r.kt)("h2",{id:"siblingproperties"},"SiblingProperties"),(0,r.kt)("p",null,"Metadata about the entity's siblings"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"isPrimary",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"If this entity is the primary sibling among the sibling set"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"siblings",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/interfaces#entity"},(0,r.kt)("code",null,"[Entity]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The sibling entities"))))),(0,r.kt)("h2",{id:"sourcecode"},"SourceCode"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceCode",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#sourcecodeurl"},(0,r.kt)("code",null,"[SourceCodeUrl!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Code along with types"))))),(0,r.kt)("h2",{id:"sourcecodeurl"},"SourceCodeUrl"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#sourcecodeurltype"},(0,r.kt)("code",null,"SourceCodeUrlType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Code Url Types"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"sourceCodeUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Source Code Url"))))),(0,r.kt)("h2",{id:"status"},"Status"),(0,r.kt)("p",null,"The status of a particular Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"removed",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the entity is removed or not"))))),(0,r.kt)("h2",{id:"stepstateresult"},"StepStateResult"),(0,r.kt)("p",null,"A single step state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id of the step"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#stringmapentry"},(0,r.kt)("code",null,"[StringMapEntry!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The properties for the step state"))))),(0,r.kt)("h2",{id:"stringbox"},"StringBox"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"stringValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"stringmapentry"},"StringMapEntry"),(0,r.kt)("p",null,"An entry in a string string map represented as a tuple"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"key",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The key of the map entry"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"value",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The value fo the map entry"))))),(0,r.kt)("h2",{id:"structuredreport"},"StructuredReport"),(0,r.kt)("p",null,"A flexible carrier for structured results of an execution request."),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The type of the structured report. (e.g. INGESTION_REPORT, TEST_CONNECTION_REPORT, etc.)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"serializedValue",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The serialized value of the structured report"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"contentType",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The content-type of the serialized value (e.g. application/json, application/json;gzip etc.)"))))),(0,r.kt)("h2",{id:"subtypes"},"SubTypes"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"typeNames",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,'The sub-types that this entity implements. e.g. Datasets that are views will implement the "view" subtype'))))),(0,r.kt)("h2",{id:"systemfreshness"},"SystemFreshness"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"systemName",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Name of the system"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"freshnessMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The latest timestamp in millis of the system that was used to respond to this query In case a cache was consulted, this reflects the freshness of the cache In case an index was consulted, this reflects the freshness of the index"))))),(0,r.kt)("h2",{id:"tablechart"},"TableChart"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"columns",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String!]!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"rows",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#row"},(0,r.kt)("code",null,"[Row!]!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"tableschema"},"TableSchema"),(0,r.kt)("p",null,"Information about a raw Table Schema"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"schema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Raw table schema"))))),(0,r.kt)("h2",{id:"tag"},"Tag"),(0,r.kt)("p",null,"A Tag Entity, which can be associated with other Metadata Entities and subresources"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the TAG"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"A unique identifier for the Tag. Deprecated - Use properties.name field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#tagproperties"},(0,r.kt)("code",null,"TagProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Additional properties about the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#editabletagproperties"},(0,r.kt)("code",null,"EditableTagProperties"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Additional read write properties about the Tag Deprecated! Use 'properties' field instead."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Granular API for querying edges extending from this entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, use properties.description field instead"))))),(0,r.kt)("h2",{id:"tagassociation"},"TagAssociation"),(0,r.kt)("p",null,"An edge between a Metadata Entity and a Tag Modeled as a struct to permit\nadditional attributes\nTODO Consider whether this query should be serviced by the relationships field"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"tag",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#tag"},(0,r.kt)("code",null,"Tag!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tag itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"associatedUrn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together"))))),(0,r.kt)("h2",{id:"tagproperties"},"TagProperties"),(0,r.kt)("p",null,"Properties for a DataHub Tag"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A display name for the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A description of the Tag"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"colorHex",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An optional RGB hex code for a Tag color, e.g. #FFFFFF"))))),(0,r.kt)("h2",{id:"telemetryconfig"},"TelemetryConfig"),(0,r.kt)("p",null,"Configurations related to tracking users in the app"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enableThirdPartyLogging",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Env variable for whether or not third party logging should be enabled for this instance"))))),(0,r.kt)("h2",{id:"test"},"Test"),(0,r.kt)("p",null,"A metadata entity representing a DataHub Test"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Test itself"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The name of the Test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"category",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The category of the Test (user defined)"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"description",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Description of the test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"definition",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testdefinition"},(0,r.kt)("code",null,"TestDefinition!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Definition for the test"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unused for tests"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"testdefinition"},"TestDefinition"),(0,r.kt)("p",null,"Definition of the test"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"json",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"JSON-based def for the test"))))),(0,r.kt)("h2",{id:"testresult"},"TestResult"),(0,r.kt)("p",null,"The result of running a test"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"test",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#test"},(0,r.kt)("code",null,"Test"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The test itself, or null if the test has been deleted"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#testresulttype"},(0,r.kt)("code",null,"TestResultType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The final result, e.g. either SUCCESS or FAILURE."))))),(0,r.kt)("h2",{id:"testresults"},"TestResults"),(0,r.kt)("p",null,"A set of test results"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"passing",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testresult"},(0,r.kt)("code",null,"[TestResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tests passing"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"failing",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#testresult"},(0,r.kt)("code",null,"[TestResult!]!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The tests failing"))))),(0,r.kt)("h2",{id:"testsconfig"},"TestsConfig"),(0,r.kt)("p",null,"Configurations related to DataHub Tests feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether Tests feature is enabled"))))),(0,r.kt)("h2",{id:"textcell"},"TextCell"),(0,r.kt)("p",null,"A Notebook cell which contains text as content"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellTitle",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Title of the cell"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"cellId",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique id for the cell."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"changeAuditStamps",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#changeauditstamps"},(0,r.kt)("code",null,"ChangeAuditStamps"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Captures information about who created/last modified/deleted this TextCell and when"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"text",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The actual text in a TextCell in a Notebook"))))),(0,r.kt)("h2",{id:"timeserieschart"},"TimeSeriesChart"),(0,r.kt)("p",null,"For consumption by UI only"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"title",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"lines",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#namedline"},(0,r.kt)("code",null,"[NamedLine!]!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"dateRange",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#daterange"},(0,r.kt)("code",null,"DateRange!"))),(0,r.kt)("td",null)),(0,r.kt)("tr",null,(0,r.kt)("td",null,"interval",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#dateinterval"},(0,r.kt)("code",null,"DateInterval!"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"timewindow"},"TimeWindow"),(0,r.kt)("p",null,"A time window with a finite start and end time"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"startTimeMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The start time of the time window"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"durationMillis",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The end time of the time window"))))),(0,r.kt)("h2",{id:"updatestepstateresult"},"UpdateStepStateResult"),(0,r.kt)("p",null,"Result returned when fetching step state"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"id",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Id of the step"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"succeeded",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the update succeeded."))))),(0,r.kt)("h2",{id:"upstreamentityrelationships"},"UpstreamEntityRelationships"),(0,r.kt)("p",null,"Deprecated, use relationships query instead"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"entities",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshiplegacy"},(0,r.kt)("code",null,"[EntityRelationshipLegacy]"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"usageaggregation"},"UsageAggregation"),(0,r.kt)("p",null,"An aggregation of Dataset usage statistics"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"bucket",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#long"},(0,r.kt)("code",null,"Long"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time window start time"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"duration",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#windowduration"},(0,r.kt)("code",null,"WindowDuration"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The time window span"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"resource",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The resource urn associated with the usage information, eg a Dataset urn"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"metrics",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#usageaggregationmetrics"},(0,r.kt)("code",null,"UsageAggregationMetrics"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The rolled up usage metrics"))))),(0,r.kt)("h2",{id:"usageaggregationmetrics"},"UsageAggregationMetrics"),(0,r.kt)("p",null,"Rolled up metrics about Dataset usage over time"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The unique number of users who have queried the dataset within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#userusagecounts"},(0,r.kt)("code",null,"[UserUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Usage statistics within the time range by user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"totalSqlQueries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of queries issued against the dataset within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"topSqlQueries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"[String]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of common queries issued against the dataset within the time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#fieldusagecounts"},(0,r.kt)("code",null,"[FieldUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Per field usage statistics within the time range"))))),(0,r.kt)("h2",{id:"usagequeryresult"},"UsageQueryResult"),(0,r.kt)("p",null,"The result of a Dataset usage query"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"buckets",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#usageaggregation"},(0,r.kt)("code",null,"[UsageAggregation]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of relevant time windows for use in displaying usage statistics"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"aggregations",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#usagequeryresultaggregations"},(0,r.kt)("code",null,"UsageQueryResultAggregations"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"A set of rolled up aggregations about the Dataset usage"))))),(0,r.kt)("h2",{id:"usagequeryresultaggregations"},"UsageQueryResultAggregations"),(0,r.kt)("p",null,"A set of rolled up aggregations about the Dataset usage"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"uniqueUserCount",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The count of unique Dataset users within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"users",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#userusagecounts"},(0,r.kt)("code",null,"[UserUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific per user usage counts within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"fields",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#fieldusagecounts"},(0,r.kt)("code",null,"[FieldUsageCounts]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The specific per field usage counts within the queried time range"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"totalSqlQueries",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The total number of queries executed within the queried time range Note that this field will likely be deprecated in favor of a totalQueries field"))))),(0,r.kt)("h2",{id:"userusagecounts"},"UserUsageCounts"),(0,r.kt)("p",null,"Information about individual user usage of a Dataset"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"user",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#corpuser"},(0,r.kt)("code",null,"CorpUser"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The user of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"count",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#int"},(0,r.kt)("code",null,"Int"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The number of queries issued by the user"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"userEmail",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The extracted user email Note that this field will soon be deprecated and merged with user"))))),(0,r.kt)("h2",{id:"versioneddataset"},"VersionedDataset"),(0,r.kt)("p",null,"A Dataset entity, which encompasses Relational Tables, Document store collections, streaming topics, and other sets of data having an independent lifecycle"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Implements")),(0,r.kt)("ul",null,(0,r.kt)("li",{parentName:"ul"},(0,r.kt)("a",{parentName:"li",href:"/docs/graphql/interfaces#entity"},"Entity"))),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"urn",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The primary key of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"type",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#entitytype"},(0,r.kt)("code",null,"EntityType!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The standard Entity Type"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"platform",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataplatform"},(0,r.kt)("code",null,"DataPlatform!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Standardized platform urn where the dataset is defined"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"container",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#container"},(0,r.kt)("code",null,"Container"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The parent container in which the entity resides"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"parentContainers",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#parentcontainersresult"},(0,r.kt)("code",null,"ParentContainersResult"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Recursively get the lineage of containers for this entity"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"name",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Unique guid for dataset No longer to be used as the Dataset display name. Use properties.name instead"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"properties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#datasetproperties"},(0,r.kt)("code",null,"DatasetProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of read only properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#dataseteditableproperties"},(0,r.kt)("code",null,"DatasetEditableProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"An additional set of of read write properties"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"ownership",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#ownership"},(0,r.kt)("code",null,"Ownership"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Ownership metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"deprecation",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#deprecation"},(0,r.kt)("code",null,"Deprecation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The deprecation status of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"institutionalMemory",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#institutionalmemory"},(0,r.kt)("code",null,"InstitutionalMemory"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"References to internal resources related to the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"editableSchemaMetadata",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#editableschemametadata"},(0,r.kt)("code",null,"EditableSchemaMetadata"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Editable schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"status",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#status"},(0,r.kt)("code",null,"Status"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Status of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"tags",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#globaltags"},(0,r.kt)("code",null,"GlobalTags"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Tags used for searching dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"glossaryTerms",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#glossaryterms"},(0,r.kt)("code",null,"GlossaryTerms"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The structured glossary terms associated with the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"domain",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#domainassociation"},(0,r.kt)("code",null,"DomainAssociation"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The Domain associated with the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"health",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#health"},(0,r.kt)("code",null,"[Health!]"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Experimental! The resolved health status of the Dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"schema",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#schema"},(0,r.kt)("code",null,"Schema"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Schema metadata of the dataset"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"subTypes",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#subtypes"},(0,r.kt)("code",null,"SubTypes"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Sub Types that this entity implements"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"viewProperties",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#viewproperties"},(0,r.kt)("code",null,"ViewProperties"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"View related properties. Only relevant if subtypes field contains view."))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"origin",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/enums#fabrictype"},(0,r.kt)("code",null,"FabricType!"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"Deprecated, see the properties field instead Environment in which the dataset belongs to or where it was generated Note that this field will soon be deprecated in favor of a more standardized concept of Environment"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"relationships",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityrelationshipsresult"},(0,r.kt)("code",null,"EntityRelationshipsResult"))),(0,r.kt)("td",null,(0,r.kt)("blockquote",null,"Deprecated: No longer supported"),(0,r.kt)("p",null,"No-op, has to be included due to model"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Arguments")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"input",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/inputObjects#relationshipsinput"},(0,r.kt)("code",null,"RelationshipsInput!"))),(0,r.kt)("td",null)))))))),(0,r.kt)("h2",{id:"versiontag"},"VersionTag"),(0,r.kt)("p",null,"The technical version associated with a given Metadata Entity"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"versionTag",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null)))),(0,r.kt)("h2",{id:"viewproperties"},"ViewProperties"),(0,r.kt)("p",null,"Properties about a Dataset of type view"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"materialized",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether the view is materialized or not"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"logic",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The logic associated with the view, most commonly a SQL statement"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"language",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"The language in which the view logic is written, for example SQL"))))),(0,r.kt)("h2",{id:"viewsconfig"},"ViewsConfig"),(0,r.kt)("p",null,"Configurations related to DataHub Views feature"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"enabled",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#boolean"},(0,r.kt)("code",null,"Boolean!"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Whether Views feature is enabled"))))),(0,r.kt)("h2",{id:"visualconfig"},"VisualConfig"),(0,r.kt)("p",null,"Configurations related to visual appearance of the app"),(0,r.kt)("p",{style:{marginBottom:"0.4em"}},(0,r.kt)("strong",null,"Fields")),(0,r.kt)("table",null,(0,r.kt)("thead",null,(0,r.kt)("tr",null,(0,r.kt)("th",null,"Name"),(0,r.kt)("th",null,"Description"))),(0,r.kt)("tbody",null,(0,r.kt)("tr",null,(0,r.kt)("td",null,"logoUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom logo url for the homepage & top banner"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"faviconUrl",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/scalars#string"},(0,r.kt)("code",null,"String"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Custom favicon url for the homepage & top banner"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"queriesTab",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#queriestabconfig"},(0,r.kt)("code",null,"QueriesTabConfig"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configuration for the queries tab"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"entityProfiles",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#entityprofilesconfig"},(0,r.kt)("code",null,"EntityProfilesConfig"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configuration for the queries tab"))),(0,r.kt)("tr",null,(0,r.kt)("td",null,"searchResult",(0,r.kt)("br",null),(0,r.kt)("a",{href:"/docs/graphql/objects#searchresultsvisualconfig"},(0,r.kt)("code",null,"SearchResultsVisualConfig"))),(0,r.kt)("td",null,(0,r.kt)("p",null,"Configuration for search results"))))))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/4d58b9df.bd6b464d.js b/assets/js/4d58b9df.da938499.js similarity index 99% rename from assets/js/4d58b9df.bd6b464d.js rename to assets/js/4d58b9df.da938499.js index b4fc41e99fe42..05a4148ae059b 100644 --- a/assets/js/4d58b9df.bd6b464d.js +++ b/assets/js/4d58b9df.da938499.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdocs_website=self.webpackChunkdocs_website||[]).push([[701],{3905:(e,t,a)=>{a.d(t,{Zo:()=>d,kt:()=>u});var n=a(67294);function l(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function s(e){for(var t=1;t=0||(l[a]=e[a]);return l}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(l[a]=e[a])}return l}var o=n.createContext({}),p=function(e){var t=n.useContext(o),a=t;return e&&(a="function"==typeof e?e(t):s(s({},t),e)),a},d=function(e){var t=p(e.components);return n.createElement(o.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var a=e.components,l=e.mdxType,i=e.originalType,o=e.parentName,d=r(e,["components","mdxType","originalType","parentName"]),c=p(a),u=l,f=c["".concat(o,".").concat(u)]||c[u]||m[u]||i;return a?n.createElement(f,s(s({ref:t},d),{},{components:a})):n.createElement(f,s({ref:t},d))}));function u(e,t){var a=arguments,l=t&&t.mdxType;if("string"==typeof e||l){var i=a.length,s=new Array(i);s[0]=c;var r={};for(var o in t)hasOwnProperty.call(t,o)&&(r[o]=t[o]);r.originalType=e,r.mdxType="string"==typeof e?e:l,s[1]=r;for(var p=2;p{a.d(t,{Z:()=>s});var n=a(67294),l=a(86010);const i="tabItem_Ymn6";function s(e){let{children:t,hidden:a,className:s}=e;return n.createElement("div",{role:"tabpanel",className:(0,l.Z)(i,s),hidden:a},t)}},34259:(e,t,a)=>{a.d(t,{Z:()=>u});var n=a(83117),l=a(67294),i=a(86010),s=a(51048),r=a(33609),o=a(1943),p=a(72957);const d="tabList__CuJ",m="tabItem_LNqP";function c(e){const{lazy:t,block:a,defaultValue:s,values:c,groupId:u,className:f}=e,k=l.Children.map(e.children,(e=>{if((0,l.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),g=c??k.map((e=>{let{props:{value:t,label:a,attributes:n}}=e;return{value:t,label:a,attributes:n}})),N=(0,r.l)(g,((e,t)=>e.value===t.value));if(N.length>0)throw new Error(`Docusaurus error: Duplicate values "${N.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const h=null===s?s:s??k.find((e=>e.props.default))?.props.value??k[0].props.value;if(null!==h&&!g.some((e=>e.value===h)))throw new Error(`Docusaurus error: The has a defaultValue "${h}" but none of its children has the corresponding value. Available values are: ${g.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:y,setTabGroupChoices:b}=(0,o.U)(),[v,_]=(0,l.useState)(h),w=[],{blockElementScrollPositionUntilNextRender:x}=(0,p.o5)();if(null!=u){const e=y[u];null!=e&&e!==v&&g.some((t=>t.value===e))&&_(e)}const q=e=>{const t=e.currentTarget,a=w.indexOf(t),n=g[a].value;n!==v&&(x(t),_(n),null!=u&&b(u,String(n)))},D=e=>{let t=null;switch(e.key){case"Enter":q(e);break;case"ArrowRight":{const a=w.indexOf(e.currentTarget)+1;t=w[a]??w[0];break}case"ArrowLeft":{const a=w.indexOf(e.currentTarget)-1;t=w[a]??w[w.length-1];break}}t?.focus()};return l.createElement("div",{className:(0,i.Z)("tabs-container",d)},l.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,i.Z)("tabs",{"tabs--block":a},f)},g.map((e=>{let{value:t,label:a,attributes:s}=e;return l.createElement("li",(0,n.Z)({role:"tab",tabIndex:v===t?0:-1,"aria-selected":v===t,key:t,ref:e=>w.push(e),onKeyDown:D,onClick:q},s,{className:(0,i.Z)("tabs__item",m,s?.className,{"tabs__item--active":v===t})}),a??t)}))),t?(0,l.cloneElement)(k.filter((e=>e.props.value===v))[0],{className:"margin-top--md"}):l.createElement("div",{className:"margin-top--md"},k.map(((e,t)=>(0,l.cloneElement)(e,{key:t,hidden:e.props.value!==v})))))}function u(e){const t=(0,s.Z)();return l.createElement(c,(0,n.Z)({key:String(t)},e))}},10328:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>d,contentTitle:()=>o,default:()=>u,frontMatter:()=>r,metadata:()=>p,toc:()=>m});var n=a(83117),l=(a(67294),a(3905)),i=a(34259),s=a(18679);const r={sidebar_position:3,title:"BigQuery",slug:"/generated/ingestion/sources/bigquery",custom_edit_url:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/bigquery.md"},o="BigQuery",p={unversionedId:"docs/generated/ingestion/sources/bigquery",id:"docs/generated/ingestion/sources/bigquery",title:"BigQuery",description:"Ingesting metadata from BigQuery requires using the bigquery module.",source:"@site/genDocs/docs/generated/ingestion/sources/bigquery.md",sourceDirName:"docs/generated/ingestion/sources",slug:"/generated/ingestion/sources/bigquery",permalink:"/docs/generated/ingestion/sources/bigquery",draft:!1,editUrl:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/bigquery.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{sidebar_position:3,title:"BigQuery",slug:"/generated/ingestion/sources/bigquery",custom_edit_url:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/bigquery.md"},sidebar:"overviewSidebar",previous:{title:"Azure AD",permalink:"/docs/generated/ingestion/sources/azure-ad"},next:{title:"Business Glossary",permalink:"/docs/generated/ingestion/sources/business-glossary"}},d={},m=[{value:"Important Capabilities",id:"important-capabilities",level:3},{value:"Prerequisites",id:"prerequisites",level:3},{value:"Create a datahub profile in GCP",id:"create-a-datahub-profile-in-gcp",level:4},{value:"Basic Requirements (needed for metadata ingestion)",id:"basic-requirements-needed-for-metadata-ingestion",level:5},{value:"Create a service account in the Extractor Project",id:"create-a-service-account-in-the-extractor-project",level:4},{value:"Profiling Requirements",id:"profiling-requirements",level:5},{value:"Lineage Computation Details",id:"lineage-computation-details",level:3},{value:"Profiling Details",id:"profiling-details",level:3},{value:"Caveats",id:"caveats",level:3},{value:"CLI based Ingestion",id:"cli-based-ingestion",level:3},{value:"Install the Plugin",id:"install-the-plugin",level:4},{value:"Starter Recipe",id:"starter-recipe",level:3},{value:"Config Details",id:"config-details",level:3},{value:"Code Coordinates",id:"code-coordinates",level:3}],c={toc:m};function u(e){let{components:t,...a}=e;return(0,l.kt)("wrapper",(0,n.Z)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,l.kt)("h1",{id:"bigquery"},"BigQuery"),(0,l.kt)("p",null,"Ingesting metadata from BigQuery requires using the ",(0,l.kt)("strong",{parentName:"p"},"bigquery")," module.\n",(0,l.kt)("img",{parentName:"p",src:"https://img.shields.io/badge/support%20status-certified-brightgreen",alt:"Certified"})),(0,l.kt)("h3",{id:"important-capabilities"},"Important Capabilities"),(0,l.kt)("table",null,(0,l.kt)("thead",{parentName:"table"},(0,l.kt)("tr",{parentName:"thead"},(0,l.kt)("th",{parentName:"tr",align:null},"Capability"),(0,l.kt)("th",{parentName:"tr",align:null},"Status"),(0,l.kt)("th",{parentName:"tr",align:null},"Notes"))),(0,l.kt)("tbody",{parentName:"table"},(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Asset Containers"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Enabled by default")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"/docs/metadata-ingestion/docs/dev_guides/sql_profiles"},"Data Profiling")),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Optionally enabled via configuration")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Dataset Usage"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Enabled by default, can be disabled via configuration ",(0,l.kt)("inlineCode",{parentName:"td"},"include_usage_statistics"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Descriptions"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Enabled by default")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"/docs/metadata-ingestion/docs/dev_guides/stateful#stale-entity-removal"},"Detect Deleted Entities")),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Optionally enabled via ",(0,l.kt)("inlineCode",{parentName:"td"},"stateful_ingestion.remove_stale_metadata"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"/docs/domains"},"Domains")),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Supported via the ",(0,l.kt)("inlineCode",{parentName:"td"},"domain")," config field")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"/docs/platform-instances"},"Platform Instance")),(0,l.kt)("td",{parentName:"tr",align:null},"\u274c"),(0,l.kt)("td",{parentName:"tr",align:null},"Platform instance is pre-set to the BigQuery project id")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Schema Metadata"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Enabled by default")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Table-Level Lineage"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Optionally enabled via configuration")))),(0,l.kt)("h3",{id:"prerequisites"},"Prerequisites"),(0,l.kt)("p",null,"To understand how BigQuery ingestion needs to be set up, first familiarize yourself with the concepts in the diagram below:"),(0,l.kt)("p",{align:"center"},(0,l.kt)("img",{width:"70%",src:"https://github.com/datahub-project/static-assets/raw/main/imgs/integrations/bigquery/source-bigquery-setup.png"})),(0,l.kt)("p",null,"There are two important concepts to understand and identify:"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("em",{parentName:"li"},"Extractor Project"),": This is the project associated with a service-account, whose credentials you will be configuring in the connector. The connector uses this service-account to run jobs (including queries) within the project."),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("em",{parentName:"li"},"Bigquery Projects")," are the projects from which table metadata, lineage, usage, and profiling data need to be collected. By default, the extractor project is included in the list of projects that DataHub collects metadata from, but you can control that by passing in a specific list of project ids that you want to collect metadata from. Read the configuration section below to understand how to limit the list of projects that DataHub extracts metadata from. ")),(0,l.kt)("h4",{id:"create-a-datahub-profile-in-gcp"},"Create a datahub profile in GCP"),(0,l.kt)("ol",null,(0,l.kt)("li",{parentName:"ol"},"Create a custom role for datahub as per ",(0,l.kt)("a",{parentName:"li",href:"https://cloud.google.com/iam/docs/creating-custom-roles#creating_a_custom_role"},"BigQuery docs"),"."),(0,l.kt)("li",{parentName:"ol"},"Follow the sections below to grant permissions to this role on this project and other projects.")),(0,l.kt)("h5",{id:"basic-requirements-needed-for-metadata-ingestion"},"Basic Requirements (needed for metadata ingestion)"),(0,l.kt)("ol",null,(0,l.kt)("li",{parentName:"ol"},"Identify your Extractor Project where the service account will run queries to extract metadata.")),(0,l.kt)("table",null,(0,l.kt)("thead",{parentName:"table"},(0,l.kt)("tr",{parentName:"thead"},(0,l.kt)("th",{parentName:"tr",align:null},"permission \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Description \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Capability \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"))),(0,l.kt)("tbody",{parentName:"table"},(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.jobs.create")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Run jobs (e.g. queries) within the project. ",(0,l.kt)("em",{parentName:"td"},"This only needs for the extractor project where the service account belongs")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"\xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.jobs.list")," \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Manage the queries that the service account has sent. ",(0,l.kt)("em",{parentName:"td"},"This only needs for the extractor project where the service account belongs")),(0,l.kt)("td",{parentName:"tr",align:null},"\xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.readsessions.create")," \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Create a session for streaming large results. ",(0,l.kt)("em",{parentName:"td"},"This only needs for the extractor project where the service account belongs")," \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"\xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.readsessions.getData")),(0,l.kt)("td",{parentName:"tr",align:null},"Get data from the read session. ",(0,l.kt)("em",{parentName:"td"},"This only needs for the extractor project where the service account belongs")," \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null})))),(0,l.kt)("ol",{start:2},(0,l.kt)("li",{parentName:"ol"},"Grant the following permissions to the Service Account on every project where you would like to extract metadata from")),(0,l.kt)("admonition",{type:"info"},(0,l.kt)("p",{parentName:"admonition"},"If you have multiple projects in your BigQuery setup, the role should be granted these permissions in each of the projects.")),(0,l.kt)("table",null,(0,l.kt)("thead",{parentName:"table"},(0,l.kt)("tr",{parentName:"thead"},(0,l.kt)("th",{parentName:"tr",align:null},"permission \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Description \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Capability \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Default GCP role which contains this permission \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"))),(0,l.kt)("tbody",{parentName:"table"},(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.datasets.get")," \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Retrieve metadata about a dataset. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.datasets.getIamPolicy")),(0,l.kt)("td",{parentName:"tr",align:null},"Read a dataset's IAM permissions. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.tables.list")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"List BigQuery tables. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.tables.get")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Retrieve metadata for a table. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.routines.get")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Get Routines. Needs to retrieve metadata for a table from system table. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.routines.list")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"List Routines. Needs to retrieve metadata for a table from system table \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"resourcemanager.projects.get")," \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Retrieve project names and metadata. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.jobs.listAll")," \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"List all jobs (queries) submitted by any user. Needs for Lineage extraction. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Lineage Extraction/Usage extraction"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.resourceViewer"},"roles/bigquery.resourceViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"logging.logEntries.list")," \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Fetch log entries for lineage/usage data. Not required if ",(0,l.kt)("inlineCode",{parentName:"td"},"use_exported_bigquery_audit_metadata")," is enabled."),(0,l.kt)("td",{parentName:"tr",align:null},"Lineage Extraction/Usage extraction"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/logging/docs/access-control#logging.privateLogViewer"},"roles/logging.privateLogViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"logging.privateLogEntries.list")),(0,l.kt)("td",{parentName:"tr",align:null},"Fetch log entries for lineage/usage data. Not required if ",(0,l.kt)("inlineCode",{parentName:"td"},"use_exported_bigquery_audit_metadata")," is enabled."),(0,l.kt)("td",{parentName:"tr",align:null},"Lineage Extraction/Usage extraction"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/logging/docs/access-control#logging.privateLogViewer"},"roles/logging.privateLogViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.tables.getData")," \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Access table data to extract storage size, last updated at, data profiles etc."),(0,l.kt)("td",{parentName:"tr",align:null},"Profiling \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"\xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0")))),(0,l.kt)("h4",{id:"create-a-service-account-in-the-extractor-project"},"Create a service account in the Extractor Project"),(0,l.kt)("ol",null,(0,l.kt)("li",{parentName:"ol"},"Setup a ServiceAccount as per ",(0,l.kt)("a",{parentName:"li",href:"https://cloud.google.com/iam/docs/creating-managing-service-accounts#iam-service-accounts-create-console"},"BigQuery docs"),"\nand assign the previously created role to this service account."),(0,l.kt)("li",{parentName:"ol"},"Download a service account JSON keyfile.\nExample credential file:")),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-json"},'{\n "type": "service_account",\n "project_id": "project-id-1234567",\n "private_key_id": "d0121d0000882411234e11166c6aaa23ed5d74e0",\n "private_key": "-----BEGIN PRIVATE KEY-----\\nMIIyourkey\\n-----END PRIVATE KEY-----",\n "client_email": "test@suppproject-id-1234567.iam.gserviceaccount.com",\n "client_id": "113545814931671546333",\n "auth_uri": "https://accounts.google.com/o/oauth2/auth",\n "token_uri": "https://oauth2.googleapis.com/token",\n "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%suppproject-id-1234567.iam.gserviceaccount.com"\n}\n')),(0,l.kt)("ol",{start:3},(0,l.kt)("li",{parentName:"ol"},(0,l.kt)("p",{parentName:"li"},"To provide credentials to the source, you can either:"),(0,l.kt)("p",{parentName:"li"},"Set an environment variable:"),(0,l.kt)("pre",{parentName:"li"},(0,l.kt)("code",{parentName:"pre",className:"language-sh"},'$ export GOOGLE_APPLICATION_CREDENTIALS="/path/to/keyfile.json"\n')),(0,l.kt)("p",{parentName:"li"},(0,l.kt)("em",{parentName:"p"},"or")),(0,l.kt)("p",{parentName:"li"},"Set credential config in your source based on the credential json file. For example:"),(0,l.kt)("pre",{parentName:"li"},(0,l.kt)("code",{parentName:"pre",className:"language-yml"},'credential:\n project_id: project-id-1234567\n private_key_id: "d0121d0000882411234e11166c6aaa23ed5d74e0"\n private_key: "-----BEGIN PRIVATE KEY-----\\nMIIyourkey\\n-----END PRIVATE KEY-----\\n"\n client_email: "test@suppproject-id-1234567.iam.gserviceaccount.com"\n client_id: "123456678890"\n')))),(0,l.kt)("h5",{id:"profiling-requirements"},"Profiling Requirements"),(0,l.kt)("p",null,"To profile BigQuery external tables backed by Google Drive document, you need to grant document's \"Viewer\" access to service account's email address (",(0,l.kt)("inlineCode",{parentName:"p"},"client_email"),' in credentials json file). To find the Google Drive document linked to BigQuery table, open the BigQuery console, locate the needed table, select "Details" from the drop-down menu in the top-right corner and refer "Source" field . To share access of Google Drive document, open the document, click "Share" in the top-right corner, add the service account\'s email address that needs "Viewer" access. ',(0,l.kt)("img",{parentName:"p",src:"https://github.com/datahub-project/static-assets/raw/main/imgs/integrations/bigquery/google_drive_share.png",alt:"Google Drive Sharing Dialog"})),(0,l.kt)("h3",{id:"lineage-computation-details"},"Lineage Computation Details"),(0,l.kt)("p",null,"When ",(0,l.kt)("inlineCode",{parentName:"p"},"use_exported_bigquery_audit_metadata")," is set to ",(0,l.kt)("inlineCode",{parentName:"p"},"true"),", lineage information will be computed using exported bigquery logs. On how to setup exported bigquery audit logs, refer to the following ",(0,l.kt)("a",{parentName:"p",href:"https://cloud.google.com/bigquery/docs/reference/auditlogs#defining_a_bigquery_log_sink_using_gcloud"},"docs"),' on BigQuery audit logs. Note that only protoPayloads with "type.googleapis.com/google.cloud.audit.BigQueryAuditMetadata" are supported by the current ingestion version. The ',(0,l.kt)("inlineCode",{parentName:"p"},"bigquery_audit_metadata_datasets")," parameter will be used only if ",(0,l.kt)("inlineCode",{parentName:"p"},"use_exported_bigquery_audit_metadat")," is set to ",(0,l.kt)("inlineCode",{parentName:"p"},"true"),"."),(0,l.kt)("p",null,"Note: the ",(0,l.kt)("inlineCode",{parentName:"p"},"bigquery_audit_metadata_datasets")," parameter receives a list of datasets, in the format $PROJECT.$DATASET. This way queries from a multiple number of projects can be used to compute lineage information."),(0,l.kt)("p",null,"Note: Since bigquery source also supports dataset level lineage, the auth client will require additional permissions to be able to access the google audit logs. Refer the permissions section in bigquery-usage section below which also accesses the audit logs."),(0,l.kt)("h3",{id:"profiling-details"},"Profiling Details"),(0,l.kt)("p",null,"For performance reasons, we only profile the latest partition for partitioned tables and the latest shard for sharded tables.\nYou can set partition explicitly with ",(0,l.kt)("inlineCode",{parentName:"p"},"partition.partition_datetime")," property if you want, though note that partition config will be applied to all partitioned tables."),(0,l.kt)("h3",{id:"caveats"},"Caveats"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},"For materialized views, lineage is dependent on logs being retained. If your GCP logging is retained for 30 days (default) and 30 days have passed since the creation of the materialized view we won't be able to get lineage for them.")),(0,l.kt)("h3",{id:"cli-based-ingestion"},"CLI based Ingestion"),(0,l.kt)("h4",{id:"install-the-plugin"},"Install the Plugin"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},"pip install 'acryl-datahub[bigquery]'\n")),(0,l.kt)("h3",{id:"starter-recipe"},"Starter Recipe"),(0,l.kt)("p",null,"Check out the following recipe to get started with ingestion! See ",(0,l.kt)("a",{parentName:"p",href:"#config-details"},"below")," for full configuration options."),(0,l.kt)("p",null,"For general pointers on writing and running a recipe, see our ",(0,l.kt)("a",{parentName:"p",href:"/docs/metadata-ingestion#recipes"},"main recipe guide"),"."),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-yaml"},"source:\n type: bigquery\n config:\n # `schema_pattern` for BQ Datasets\n schema_pattern:\n allow:\n - finance_bq_dataset\n table_pattern:\n deny:\n # The exact name of the table is revenue_table_name\n # The reason we have this `.*` at the beginning is because the current implmenetation of table_pattern is testing\n # project_id.dataset_name.table_name\n # We will improve this in the future\n - .*revenue_table_name\n include_table_lineage: true\n include_usage_statistics: true\n profiling:\n enabled: true\n profile_table_level_only: true\n\nsink:\n # sink configs\n\n")),(0,l.kt)("h3",{id:"config-details"},"Config Details"),(0,l.kt)(i.Z,{mdxType:"Tabs"},(0,l.kt)(s.Z,{value:"options",label:"Options",default:!0,mdxType:"TabItem"},(0,l.kt)("p",null,"Note that a ",(0,l.kt)("inlineCode",{parentName:"p"},".")," is used to denote nested fields in the YAML recipe."),(0,l.kt)("div",{className:"config-table"},(0,l.kt)("table",null,(0,l.kt)("thead",{parentName:"table"},(0,l.kt)("tr",{parentName:"thead"},(0,l.kt)("th",{parentName:"tr",align:"left"},"Field"),(0,l.kt)("th",{parentName:"tr",align:"left"},"Description"))),(0,l.kt)("tbody",{parentName:"table"},(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"bigquery_audit_metadata_datasets"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"bucket_duration"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"Enum"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Size of the time window to aggregate usage stats. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"DAY")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"capture_dataset_label_as_tag"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Capture BigQuery dataset labels as DataHub tag ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"capture_table_label_as_tag"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Capture BigQuery table labels as DataHub tag ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"column_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Maximum number of columns to process in a table. This is a low level config property which should be touched with care. This restriction is needed because excessively wide tables can result in failure to ingest the schema. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"300")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"convert_urns_to_lowercase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Convert urns to lowercase. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"debug_include_full_payloads"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Include full payload into events. It is only for debugging and internal use. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"enable_legacy_sharded_table_support"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Use the legacy sharded table urn suffix added. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"end_time"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Latest date of lineage/usage to consider. Default: Current time in UTC")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"extra_client_options"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"object"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Additional options to pass to google.cloud.logging_v2.client.Client. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"extract_column_lineage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"If enabled, generate column level lineage. Requires lineage_use_sql_parser to be enabled. This and ",(0,l.kt)("inlineCode",{parentName:"td"},"incremental_lineage")," cannot both be enabled. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"extract_lineage_from_catalog"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"This flag enables the data lineage extraction from Data Lineage API exposed by Google Data Catalog. NOTE: This extractor can't build views lineage. It's recommended to enable the view's DDL parsing. Read the docs to have more information about: ",(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/data-catalog/docs/concepts/about-data-lineage"},"https://cloud.google.com/data-catalog/docs/concepts/about-data-lineage")," ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_data_platform_instance"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to create a DataPlatformInstance aspect, equal to the BigQuery project id. If enabled, will cause redundancy in the browse path for BigQuery entities in the UI, because the project id is represented as the top-level container. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_external_url"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to populate BigQuery Console url to Datasets/Tables ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_table_lineage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Option to enable/disable lineage generation. Is enabled by default. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_table_location_lineage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"If the source supports it, include table lineage to the underlying storage location. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_tables"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether tables should be ingested. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_usage_statistics"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Generate usage statistic ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_views"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether views should be ingested. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"incremental_lineage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"When enabled, emits lineage as incremental to existing lineage already in DataHub. When disabled, re-states lineage on each run. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"lineage_parse_view_ddl"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Sql parse view ddl to get lineage. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"lineage_sql_parser_use_raw_names"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"This parameter ignores the lowercase pattern stipulated in the SQLParser. NOTE: Ignored if lineage_use_sql_parser is False. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"lineage_use_sql_parser"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Use sql parser to resolve view/table lineage. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"log_page_size"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The number of log item will be queried per page for lineage collection ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"1000")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"match_fully_qualified_names"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether ",(0,l.kt)("inlineCode",{parentName:"td"},"dataset_pattern")," is matched against fully qualified dataset name ",(0,l.kt)("inlineCode",{parentName:"td"},"."),". ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"max_query_duration"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"number(time-delta)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"900.0")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"number_of_datasets_process_in_batch_if_profiling_enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number of partitioned table queried in batch when getting metadata. This is a low level config property which should be touched with care. This restriction is needed because we query partitions system view which throws error if we try to touch too many tables. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"200")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"options"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"object"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Any options specified here will be passed to ",(0,l.kt)("a",{parentName:"td",href:"https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine"},"SQLAlchemy.create_engine")," as kwargs.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"platform_instance"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The instance of the platform that all assets produced by this recipe belong to")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"project_id"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"[deprecated]"," Use project_id_pattern or project_ids instead.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"project_ids"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"project_on_behalf"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"[Advanced]"," The BigQuery project in which queries are executed. Will be passed when creating a job. If not passed, falls back to the project associated with the service account.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"rate_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Should we rate limit requests made to API. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"requests_per_min"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Used to control number of API calls made per min. Only used when ",(0,l.kt)("inlineCode",{parentName:"td"},"rate_limit")," is set to ",(0,l.kt)("inlineCode",{parentName:"td"},"True"),". ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"60")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"scheme"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"default-line "},"Default: ",(0,l.kt)("span",{className:"default-value"},"bigquery")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"sharded_table_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The regex pattern to match sharded tables and group as one table. This is a very low level config parameter, only change if you know what you are doing, ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"((.+)","[","_","$","]",")?(\\d","{","8","}",")$")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"sql_parser_use_external_process"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"When enabled, sql parser will run in isolated in a separate process. This can affect processing time but can protect from sql parser's mem leak. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"start_time"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on ",(0,l.kt)("inlineCode",{parentName:"td"},"bucket_duration"),"). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"store_last_lineage_extraction_timestamp"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Enable checking last lineage extraction date in store. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"store_last_profiling_timestamps"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Enable storing last profile timestamp in store. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"store_last_usage_extraction_timestamp"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Enable checking last usage timestamp in store. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"temp_table_dataset_prefix"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"If you are creating temp tables in a dataset with a particular prefix you can use this config to set the prefix for the dataset. This is to support workflows from before bigquery's introduction of temp tables. By default we use ",(0,l.kt)("inlineCode",{parentName:"td"},"_")," because of datasets that begin with an underscore are hidden by default ",(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/datasets#dataset-naming"},"https://cloud.google.com/bigquery/docs/datasets#dataset-naming"),". ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"_")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"upstream_lineage_in_report"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Useful for debugging lineage information. Set to True to see the raw lineage created internally. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"use_date_sharded_audit_log_tables"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to read date sharded tables or time partitioned tables when extracting usage from exported audit logs. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"use_exported_bigquery_audit_metadata"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"When configured, use BigQueryAuditMetadata in bigquery_audit_metadata_datasets to compute lineage information. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"env"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The environment that all assets produced by this connector belong to ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"PROD")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"credential"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"BigQueryCredential"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"BigQuery credential informations")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"client_email"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Client email")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"client_id"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Client Id")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"private_key"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Private key in a form of '-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n'")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"private_key_id"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Private key id")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"project_id"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Project id to set the credentials")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"auth_provider_x509_cert_url"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Auth provider x509 certificate url ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},(0,l.kt)("a",{parentName:"td",href:"https://www.googleapis.com/oauth2/v1/certs"},"https://www.googleapis.com/oauth2/v1/certs"))))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"auth_uri"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Authentication uri ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},(0,l.kt)("a",{parentName:"td",href:"https://accounts.google.com/o/oauth2/auth"},"https://accounts.google.com/o/oauth2/auth"))))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"client_x509_cert_url"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"If not set it will be default to ",(0,l.kt)("a",{parentName:"td",href:"https://www.googleapis.com/robot/v1/metadata/x509/client_email"},"https://www.googleapis.com/robot/v1/metadata/x509/client_email"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"token_uri"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Token uri ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},(0,l.kt)("a",{parentName:"td",href:"https://oauth2.googleapis.com/token"},"https://oauth2.googleapis.com/token"))))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"type"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Authentication type ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"service","_","account")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"dataset_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for dataset to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics' ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"dataset_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"dataset_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"dataset_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"domain"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"map(str,AllowDenyPattern)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"A class to store allow deny regexes")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"domain.",(0,l.kt)("inlineCode",{parentName:"td"},"key"),"."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"domain.",(0,l.kt)("inlineCode",{parentName:"td"},"key"),"."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"domain.",(0,l.kt)("inlineCode",{parentName:"td"},"key"),"."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"profile_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the ",(0,l.kt)("inlineCode",{parentName:"td"},"table_pattern")," will be considered. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profile_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profile_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profile_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"project_id_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for project_id to filter in ingestion. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"project_id_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"project_id_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"project_id_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"schema_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics' ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"schema_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"schema_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"schema_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"table_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*' ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"table_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"table_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"table_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"usage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"BigQueryUsageConfig"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Usage related configs ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","bucket","_","duration","'",": ","'","DAY","'",", ","'","end","_","time","'",": ","'","2023-08-24...")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"apply_view_usage_to_tables"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to apply view's usage to its base tables. If set to False, uses sql parser and applies usage to views / tables mentioned in the query. If set to True, usage is applied to base tables only. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"bucket_duration"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"Enum"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Size of the time window to aggregate usage stats. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"DAY")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"end_time"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Latest date of lineage/usage to consider. Default: Current time in UTC")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"format_sql_queries"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to format sql queries ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"include_operational_stats"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to display operational stats. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"include_read_operational_stats"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to report read operational stats. Experimental. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"include_top_n_queries"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ingest the top_n_queries. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"max_query_duration"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"number(time-delta)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"900.0")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"start_time"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on ",(0,l.kt)("inlineCode",{parentName:"td"},"bucket_duration"),"). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"top_n_queries"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number of top queries to save to each table. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"10")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"user_email_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"regex patterns for user emails to filter in usage. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"view_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*' ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"view_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"view_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"view_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"profiling"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"GEProfilingConfig"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"default-line "},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","enabled","'",": False, ","'","operation","_","config","'",": ","{","'","lower","_","fre...")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"catch_exceptions"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"default-line "},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether profiling should be done. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"field_sample_values_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Upper limit for number of sample values to collect for all columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"20")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_distinct_count"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the number of distinct values for each column. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_distinct_value_frequencies"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for distinct value frequencies. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_histogram"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the histogram for numeric fields. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_max_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the max value of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_mean_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the mean value of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_median_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the median value of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_min_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the min value of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_null_count"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the number of nulls for each column. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_quantiles"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the quantiles of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_sample_values"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the sample values for all columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_stddev_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the standard deviation of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Max number of documents to profile. By default, profiles all documents.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"max_number_of_fields_to_profile"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"A positive integer that specifies the maximum number of columns to profile for any table. ",(0,l.kt)("inlineCode",{parentName:"td"},"None")," implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"max_workers"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number of worker threads to use for profiling. Set to 1 to disable. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"10")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"offset"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Offset in documents to profile. By default, uses no offset.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"partition_datetime"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"partition_profiling_enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"default-line "},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_if_updated_since_days"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"number"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Profile table only if it has been updated since these many number of days. If set to ",(0,l.kt)("inlineCode",{parentName:"td"},"null"),", no constraint of last modified time for tables to profile. Supported only in ",(0,l.kt)("inlineCode",{parentName:"td"},"snowflake")," and ",(0,l.kt)("inlineCode",{parentName:"td"},"BigQuery"),".")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_table_level_only"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to perform profiling at table-level only, or include column-level profiling as well. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_table_row_count_estimate_only"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_table_row_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Profile tables only if their row count is less then specified count. If set to ",(0,l.kt)("inlineCode",{parentName:"td"},"null"),", no limit on the row count of tables to profile. Supported only in ",(0,l.kt)("inlineCode",{parentName:"td"},"snowflake")," and ",(0,l.kt)("inlineCode",{parentName:"td"},"BigQuery")," ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"5000000")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_table_size_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Profile tables only if their size is less then specified GBs. If set to ",(0,l.kt)("inlineCode",{parentName:"td"},"null"),", no limit on the size of tables to profile. Supported only in ",(0,l.kt)("inlineCode",{parentName:"td"},"snowflake")," and ",(0,l.kt)("inlineCode",{parentName:"td"},"BigQuery")," ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"5")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"query_combiner_enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("em",{parentName:"td"},"This feature is still experimental and can be disabled if it causes issues.")," Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"report_dropped_profiles"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to report datasets or dataset columns which were not profiled. Set to ",(0,l.kt)("inlineCode",{parentName:"td"},"True")," for debugging purposes. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"turn_off_expensive_profiling_metrics"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"operation_config"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"OperationConfig"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Experimental feature. To specify operation configs.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling.operation_config."),(0,l.kt)("span",{className:"path-main"},"lower_freq_profile_enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling.operation_config."),(0,l.kt)("span",{className:"path-main"},"profile_date_of_month"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling.operation_config."),(0,l.kt)("span",{className:"path-main"},"profile_day_of_week"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"stateful_ingestion"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"StatefulStaleMetadataRemovalConfig"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Base specialized config for Stateful Ingestion with stale metadata removal capability.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"stateful_ingestion."),(0,l.kt)("span",{className:"path-main"},"enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The type of the ingestion state provider registered with datahub. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"stateful_ingestion."),(0,l.kt)("span",{className:"path-main"},"remove_stale_metadata"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))))))),(0,l.kt)(s.Z,{value:"schema",label:"Schema",mdxType:"TabItem"},(0,l.kt)("p",null,"The ",(0,l.kt)("a",{parentName:"p",href:"https://json-schema.org/"},"JSONSchema")," for this configuration is inlined below."),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-javascript"},'{\n "title": "BigQueryV2Config",\n "description": "Base configuration class for stateful ingestion for source configs to inherit from.",\n "type": "object",\n "properties": {\n "store_last_profiling_timestamps": {\n "title": "Store Last Profiling Timestamps",\n "description": "Enable storing last profile timestamp in store.",\n "default": false,\n "type": "boolean"\n },\n "incremental_lineage": {\n "title": "Incremental Lineage",\n "description": "When enabled, emits lineage as incremental to existing lineage already in DataHub. When disabled, re-states lineage on each run.",\n "default": true,\n "type": "boolean"\n },\n "sql_parser_use_external_process": {\n "title": "Sql Parser Use External Process",\n "description": "When enabled, sql parser will run in isolated in a separate process. This can affect processing time but can protect from sql parser\'s mem leak.",\n "default": false,\n "type": "boolean"\n },\n "store_last_lineage_extraction_timestamp": {\n "title": "Store Last Lineage Extraction Timestamp",\n "description": "Enable checking last lineage extraction date in store.",\n "default": false,\n "type": "boolean"\n },\n "bucket_duration": {\n "description": "Size of the time window to aggregate usage stats.",\n "default": "DAY",\n "allOf": [\n {\n "$ref": "#/definitions/BucketDuration"\n }\n ]\n },\n "end_time": {\n "title": "End Time",\n "description": "Latest date of lineage/usage to consider. Default: Current time in UTC",\n "type": "string",\n "format": "date-time"\n },\n "start_time": {\n "title": "Start Time",\n "description": "Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on `bucket_duration`). You can also specify relative time with respect to end_time such as \'-7 days\' Or \'-7d\'.",\n "type": "string",\n "format": "date-time"\n },\n "store_last_usage_extraction_timestamp": {\n "title": "Store Last Usage Extraction Timestamp",\n "description": "Enable checking last usage timestamp in store.",\n "default": true,\n "type": "boolean"\n },\n "env": {\n "title": "Env",\n "description": "The environment that all assets produced by this connector belong to",\n "default": "PROD",\n "type": "string"\n },\n "platform_instance": {\n "title": "Platform Instance",\n "description": "The instance of the platform that all assets produced by this recipe belong to",\n "type": "string"\n },\n "stateful_ingestion": {\n "$ref": "#/definitions/StatefulStaleMetadataRemovalConfig"\n },\n "options": {\n "title": "Options",\n "description": "Any options specified here will be passed to [SQLAlchemy.create_engine](https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine) as kwargs.",\n "type": "object"\n },\n "schema_pattern": {\n "title": "Schema Pattern",\n "description": "Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex \'analytics\'",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "table_pattern": {\n "title": "Table Pattern",\n "description": "Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex \'Customer.public.customer.*\'",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "view_pattern": {\n "title": "View Pattern",\n "description": "Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex \'Customer.public.customer.*\'",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "profile_pattern": {\n "title": "Profile Pattern",\n "description": "Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the `table_pattern` will be considered.",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "domain": {\n "title": "Domain",\n "description": "Attach domains to databases, schemas or tables during ingestion using regex patterns. Domain key can be a guid like *urn:li:domain:ec428203-ce86-4db3-985d-5a8ee6df32ba* or a string like \\"Marketing\\".) If you provide strings, then datahub will attempt to resolve this name to a guid, and will error out if this fails. There can be multiple domain keys specified.",\n "default": {},\n "type": "object",\n "additionalProperties": {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n },\n "include_views": {\n "title": "Include Views",\n "description": "Whether views should be ingested.",\n "default": true,\n "type": "boolean"\n },\n "include_tables": {\n "title": "Include Tables",\n "description": "Whether tables should be ingested.",\n "default": true,\n "type": "boolean"\n },\n "include_table_location_lineage": {\n "title": "Include Table Location Lineage",\n "description": "If the source supports it, include table lineage to the underlying storage location.",\n "default": true,\n "type": "boolean"\n },\n "profiling": {\n "title": "Profiling",\n "default": {\n "enabled": false,\n "operation_config": {\n "lower_freq_profile_enabled": false,\n "profile_day_of_week": null,\n "profile_date_of_month": null\n },\n "limit": null,\n "offset": null,\n "report_dropped_profiles": false,\n "turn_off_expensive_profiling_metrics": false,\n "profile_table_level_only": false,\n "include_field_null_count": true,\n "include_field_distinct_count": true,\n "include_field_min_value": true,\n "include_field_max_value": true,\n "include_field_mean_value": true,\n "include_field_median_value": true,\n "include_field_stddev_value": true,\n "include_field_quantiles": false,\n "include_field_distinct_value_frequencies": false,\n "include_field_histogram": false,\n "include_field_sample_values": true,\n "field_sample_values_limit": 20,\n "max_number_of_fields_to_profile": null,\n "profile_if_updated_since_days": null,\n "profile_table_size_limit": 5,\n "profile_table_row_limit": 5000000,\n "profile_table_row_count_estimate_only": false,\n "max_workers": 10,\n "query_combiner_enabled": true,\n "catch_exceptions": true,\n "partition_profiling_enabled": true,\n "partition_datetime": null\n },\n "allOf": [\n {\n "$ref": "#/definitions/GEProfilingConfig"\n }\n ]\n },\n "rate_limit": {\n "title": "Rate Limit",\n "description": "Should we rate limit requests made to API.",\n "default": false,\n "type": "boolean"\n },\n "requests_per_min": {\n "title": "Requests Per Min",\n "description": "Used to control number of API calls made per min. Only used when `rate_limit` is set to `True`.",\n "default": 60,\n "type": "integer"\n },\n "temp_table_dataset_prefix": {\n "title": "Temp Table Dataset Prefix",\n "description": "If you are creating temp tables in a dataset with a particular prefix you can use this config to set the prefix for the dataset. This is to support workflows from before bigquery\'s introduction of temp tables. By default we use `_` because of datasets that begin with an underscore are hidden by default https://cloud.google.com/bigquery/docs/datasets#dataset-naming.",\n "default": "_",\n "type": "string"\n },\n "sharded_table_pattern": {\n "title": "Sharded Table Pattern",\n "description": "The regex pattern to match sharded tables and group as one table. This is a very low level config parameter, only change if you know what you are doing, ",\n "default": "((.+)[_$])?(\\\\d{8})$",\n "deprecated": true,\n "type": "string"\n },\n "project_id_pattern": {\n "title": "Project Id Pattern",\n "description": "Regex patterns for project_id to filter in ingestion.",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "usage": {\n "title": "Usage",\n "description": "Usage related configs",\n "default": {\n "bucket_duration": "DAY",\n "end_time": "2023-08-24T21:05:43.557107+00:00",\n "start_time": "2023-08-23T00:00:00+00:00",\n "top_n_queries": 10,\n "user_email_pattern": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "include_operational_stats": true,\n "include_read_operational_stats": false,\n "format_sql_queries": false,\n "include_top_n_queries": true,\n "max_query_duration": 900.0,\n "apply_view_usage_to_tables": false\n },\n "allOf": [\n {\n "$ref": "#/definitions/BigQueryUsageConfig"\n }\n ]\n },\n "include_usage_statistics": {\n "title": "Include Usage Statistics",\n "description": "Generate usage statistic",\n "default": true,\n "type": "boolean"\n },\n "capture_table_label_as_tag": {\n "title": "Capture Table Label As Tag",\n "description": "Capture BigQuery table labels as DataHub tag",\n "default": false,\n "type": "boolean"\n },\n "capture_dataset_label_as_tag": {\n "title": "Capture Dataset Label As Tag",\n "description": "Capture BigQuery dataset labels as DataHub tag",\n "default": false,\n "type": "boolean"\n },\n "dataset_pattern": {\n "title": "Dataset Pattern",\n "description": "Regex patterns for dataset to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex \'analytics\'",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "match_fully_qualified_names": {\n "title": "Match Fully Qualified Names",\n "description": "Whether `dataset_pattern` is matched against fully qualified dataset name `.`.",\n "default": false,\n "type": "boolean"\n },\n "include_external_url": {\n "title": "Include External Url",\n "description": "Whether to populate BigQuery Console url to Datasets/Tables",\n "default": true,\n "type": "boolean"\n },\n "include_data_platform_instance": {\n "title": "Include Data Platform Instance",\n "description": "Whether to create a DataPlatformInstance aspect, equal to the BigQuery project id. If enabled, will cause redundancy in the browse path for BigQuery entities in the UI, because the project id is represented as the top-level container.",\n "default": false,\n "type": "boolean"\n },\n "debug_include_full_payloads": {\n "title": "Debug Include Full Payloads",\n "description": "Include full payload into events. It is only for debugging and internal use.",\n "default": false,\n "type": "boolean"\n },\n "number_of_datasets_process_in_batch_if_profiling_enabled": {\n "title": "Number Of Datasets Process In Batch If Profiling Enabled",\n "description": "Number of partitioned table queried in batch when getting metadata. This is a low level config property which should be touched with care. This restriction is needed because we query partitions system view which throws error if we try to touch too many tables.",\n "default": 200,\n "type": "integer"\n },\n "column_limit": {\n "title": "Column Limit",\n "description": "Maximum number of columns to process in a table. This is a low level config property which should be touched with care. This restriction is needed because excessively wide tables can result in failure to ingest the schema.",\n "default": 300,\n "type": "integer"\n },\n "project_id": {\n "title": "Project Id",\n "description": "[deprecated] Use project_id_pattern or project_ids instead.",\n "type": "string"\n },\n "project_ids": {\n "title": "Project Ids",\n "description": "Ingests specified project_ids. Use this property if you want to specify what projects to ingest or don\'t want to give project resourcemanager.projects.list to your service account. Overrides `project_id_pattern`.",\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "project_on_behalf": {\n "title": "Project On Behalf",\n "description": "[Advanced] The BigQuery project in which queries are executed. Will be passed when creating a job. If not passed, falls back to the project associated with the service account.",\n "type": "string"\n },\n "lineage_use_sql_parser": {\n "title": "Lineage Use Sql Parser",\n "description": "Use sql parser to resolve view/table lineage.",\n "default": true,\n "type": "boolean"\n },\n "lineage_parse_view_ddl": {\n "title": "Lineage Parse View Ddl",\n "description": "Sql parse view ddl to get lineage.",\n "default": true,\n "type": "boolean"\n },\n "lineage_sql_parser_use_raw_names": {\n "title": "Lineage Sql Parser Use Raw Names",\n "description": "This parameter ignores the lowercase pattern stipulated in the SQLParser. NOTE: Ignored if lineage_use_sql_parser is False.",\n "default": false,\n "type": "boolean"\n },\n "extract_column_lineage": {\n "title": "Extract Column Lineage",\n "description": "If enabled, generate column level lineage. Requires lineage_use_sql_parser to be enabled. This and `incremental_lineage` cannot both be enabled.",\n "default": false,\n "type": "boolean"\n },\n "extract_lineage_from_catalog": {\n "title": "Extract Lineage From Catalog",\n "description": "This flag enables the data lineage extraction from Data Lineage API exposed by Google Data Catalog. NOTE: This extractor can\'t build views lineage. It\'s recommended to enable the view\'s DDL parsing. Read the docs to have more information about: https://cloud.google.com/data-catalog/docs/concepts/about-data-lineage",\n "default": false,\n "type": "boolean"\n },\n "convert_urns_to_lowercase": {\n "title": "Convert Urns To Lowercase",\n "description": "Convert urns to lowercase.",\n "default": false,\n "type": "boolean"\n },\n "enable_legacy_sharded_table_support": {\n "title": "Enable Legacy Sharded Table Support",\n "description": "Use the legacy sharded table urn suffix added.",\n "default": true,\n "type": "boolean"\n },\n "scheme": {\n "title": "Scheme",\n "default": "bigquery",\n "type": "string"\n },\n "log_page_size": {\n "title": "Log Page Size",\n "description": "The number of log item will be queried per page for lineage collection",\n "default": 1000,\n "exclusiveMinimum": 0,\n "type": "integer"\n },\n "credential": {\n "title": "Credential",\n "description": "BigQuery credential informations",\n "allOf": [\n {\n "$ref": "#/definitions/BigQueryCredential"\n }\n ]\n },\n "extra_client_options": {\n "title": "Extra Client Options",\n "description": "Additional options to pass to google.cloud.logging_v2.client.Client.",\n "default": {},\n "type": "object"\n },\n "include_table_lineage": {\n "title": "Include Table Lineage",\n "description": "Option to enable/disable lineage generation. Is enabled by default.",\n "default": true,\n "type": "boolean"\n },\n "max_query_duration": {\n "title": "Max Query Duration",\n "description": "Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time.",\n "default": 900.0,\n "type": "number",\n "format": "time-delta"\n },\n "bigquery_audit_metadata_datasets": {\n "title": "Bigquery Audit Metadata Datasets",\n "description": "A list of datasets that contain a table named cloudaudit_googleapis_com_data_access which contain BigQuery audit logs, specifically, those containing BigQueryAuditMetadata. It is recommended that the project of the dataset is also specified, for example, projectA.datasetB.",\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "use_exported_bigquery_audit_metadata": {\n "title": "Use Exported Bigquery Audit Metadata",\n "description": "When configured, use BigQueryAuditMetadata in bigquery_audit_metadata_datasets to compute lineage information.",\n "default": false,\n "type": "boolean"\n },\n "use_date_sharded_audit_log_tables": {\n "title": "Use Date Sharded Audit Log Tables",\n "description": "Whether to read date sharded tables or time partitioned tables when extracting usage from exported audit logs.",\n "default": false,\n "type": "boolean"\n },\n "upstream_lineage_in_report": {\n "title": "Upstream Lineage In Report",\n "description": "Useful for debugging lineage information. Set to True to see the raw lineage created internally.",\n "default": false,\n "type": "boolean"\n }\n },\n "additionalProperties": false,\n "definitions": {\n "BucketDuration": {\n "title": "BucketDuration",\n "description": "An enumeration.",\n "enum": [\n "DAY",\n "HOUR"\n ],\n "type": "string"\n },\n "DynamicTypedStateProviderConfig": {\n "title": "DynamicTypedStateProviderConfig",\n "type": "object",\n "properties": {\n "type": {\n "title": "Type",\n "description": "The type of the state provider to use. For DataHub use `datahub`",\n "type": "string"\n },\n "config": {\n "title": "Config",\n "description": "The configuration required for initializing the state provider. Default: The datahub_api config if set at pipeline level. Otherwise, the default DatahubClientConfig. See the defaults (https://github.com/datahub-project/datahub/blob/master/metadata-ingestion/src/datahub/ingestion/graph/client.py#L19)."\n }\n },\n "required": [\n "type"\n ],\n "additionalProperties": false\n },\n "StatefulStaleMetadataRemovalConfig": {\n "title": "StatefulStaleMetadataRemovalConfig",\n "description": "Base specialized config for Stateful Ingestion with stale metadata removal capability.",\n "type": "object",\n "properties": {\n "enabled": {\n "title": "Enabled",\n "description": "The type of the ingestion state provider registered with datahub.",\n "default": false,\n "type": "boolean"\n },\n "remove_stale_metadata": {\n "title": "Remove Stale Metadata",\n "description": "Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.",\n "default": true,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n },\n "AllowDenyPattern": {\n "title": "AllowDenyPattern",\n "description": "A class to store allow deny regexes",\n "type": "object",\n "properties": {\n "allow": {\n "title": "Allow",\n "description": "List of regex patterns to include in ingestion",\n "default": [\n ".*"\n ],\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "deny": {\n "title": "Deny",\n "description": "List of regex patterns to exclude from ingestion.",\n "default": [],\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "ignoreCase": {\n "title": "Ignorecase",\n "description": "Whether to ignore case sensitivity during pattern matching.",\n "default": true,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n },\n "OperationConfig": {\n "title": "OperationConfig",\n "type": "object",\n "properties": {\n "lower_freq_profile_enabled": {\n "title": "Lower Freq Profile Enabled",\n "description": "Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.",\n "default": false,\n "type": "boolean"\n },\n "profile_day_of_week": {\n "title": "Profile Day Of Week",\n "description": "Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.",\n "type": "integer"\n },\n "profile_date_of_month": {\n "title": "Profile Date Of Month",\n "description": "Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.",\n "type": "integer"\n }\n },\n "additionalProperties": false\n },\n "GEProfilingConfig": {\n "title": "GEProfilingConfig",\n "type": "object",\n "properties": {\n "enabled": {\n "title": "Enabled",\n "description": "Whether profiling should be done.",\n "default": false,\n "type": "boolean"\n },\n "operation_config": {\n "title": "Operation Config",\n "description": "Experimental feature. To specify operation configs.",\n "allOf": [\n {\n "$ref": "#/definitions/OperationConfig"\n }\n ]\n },\n "limit": {\n "title": "Limit",\n "description": "Max number of documents to profile. By default, profiles all documents.",\n "type": "integer"\n },\n "offset": {\n "title": "Offset",\n "description": "Offset in documents to profile. By default, uses no offset.",\n "type": "integer"\n },\n "report_dropped_profiles": {\n "title": "Report Dropped Profiles",\n "description": "Whether to report datasets or dataset columns which were not profiled. Set to `True` for debugging purposes.",\n "default": false,\n "type": "boolean"\n },\n "turn_off_expensive_profiling_metrics": {\n "title": "Turn Off Expensive Profiling Metrics",\n "description": "Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.",\n "default": false,\n "type": "boolean"\n },\n "profile_table_level_only": {\n "title": "Profile Table Level Only",\n "description": "Whether to perform profiling at table-level only, or include column-level profiling as well.",\n "default": false,\n "type": "boolean"\n },\n "include_field_null_count": {\n "title": "Include Field Null Count",\n "description": "Whether to profile for the number of nulls for each column.",\n "default": true,\n "type": "boolean"\n },\n "include_field_distinct_count": {\n "title": "Include Field Distinct Count",\n "description": "Whether to profile for the number of distinct values for each column.",\n "default": true,\n "type": "boolean"\n },\n "include_field_min_value": {\n "title": "Include Field Min Value",\n "description": "Whether to profile for the min value of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_max_value": {\n "title": "Include Field Max Value",\n "description": "Whether to profile for the max value of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_mean_value": {\n "title": "Include Field Mean Value",\n "description": "Whether to profile for the mean value of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_median_value": {\n "title": "Include Field Median Value",\n "description": "Whether to profile for the median value of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_stddev_value": {\n "title": "Include Field Stddev Value",\n "description": "Whether to profile for the standard deviation of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_quantiles": {\n "title": "Include Field Quantiles",\n "description": "Whether to profile for the quantiles of numeric columns.",\n "default": false,\n "type": "boolean"\n },\n "include_field_distinct_value_frequencies": {\n "title": "Include Field Distinct Value Frequencies",\n "description": "Whether to profile for distinct value frequencies.",\n "default": false,\n "type": "boolean"\n },\n "include_field_histogram": {\n "title": "Include Field Histogram",\n "description": "Whether to profile for the histogram for numeric fields.",\n "default": false,\n "type": "boolean"\n },\n "include_field_sample_values": {\n "title": "Include Field Sample Values",\n "description": "Whether to profile for the sample values for all columns.",\n "default": true,\n "type": "boolean"\n },\n "field_sample_values_limit": {\n "title": "Field Sample Values Limit",\n "description": "Upper limit for number of sample values to collect for all columns.",\n "default": 20,\n "type": "integer"\n },\n "max_number_of_fields_to_profile": {\n "title": "Max Number Of Fields To Profile",\n "description": "A positive integer that specifies the maximum number of columns to profile for any table. `None` implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.",\n "exclusiveMinimum": 0,\n "type": "integer"\n },\n "profile_if_updated_since_days": {\n "title": "Profile If Updated Since Days",\n "description": "Profile table only if it has been updated since these many number of days. If set to `null`, no constraint of last modified time for tables to profile. Supported only in `snowflake` and `BigQuery`.",\n "exclusiveMinimum": 0,\n "type": "number"\n },\n "profile_table_size_limit": {\n "title": "Profile Table Size Limit",\n "description": "Profile tables only if their size is less then specified GBs. If set to `null`, no limit on the size of tables to profile. Supported only in `snowflake` and `BigQuery`",\n "default": 5,\n "type": "integer"\n },\n "profile_table_row_limit": {\n "title": "Profile Table Row Limit",\n "description": "Profile tables only if their row count is less then specified count. If set to `null`, no limit on the row count of tables to profile. Supported only in `snowflake` and `BigQuery`",\n "default": 5000000,\n "type": "integer"\n },\n "profile_table_row_count_estimate_only": {\n "title": "Profile Table Row Count Estimate Only",\n "description": "Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL. ",\n "default": false,\n "type": "boolean"\n },\n "max_workers": {\n "title": "Max Workers",\n "description": "Number of worker threads to use for profiling. Set to 1 to disable.",\n "default": 10,\n "type": "integer"\n },\n "query_combiner_enabled": {\n "title": "Query Combiner Enabled",\n "description": "*This feature is still experimental and can be disabled if it causes issues.* Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.",\n "default": true,\n "type": "boolean"\n },\n "catch_exceptions": {\n "title": "Catch Exceptions",\n "default": true,\n "type": "boolean"\n },\n "partition_profiling_enabled": {\n "title": "Partition Profiling Enabled",\n "default": true,\n "type": "boolean"\n },\n "partition_datetime": {\n "title": "Partition Datetime",\n "description": "For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.",\n "type": "string",\n "format": "date-time"\n }\n },\n "additionalProperties": false\n },\n "BigQueryUsageConfig": {\n "title": "BigQueryUsageConfig",\n "type": "object",\n "properties": {\n "bucket_duration": {\n "description": "Size of the time window to aggregate usage stats.",\n "default": "DAY",\n "allOf": [\n {\n "$ref": "#/definitions/BucketDuration"\n }\n ]\n },\n "end_time": {\n "title": "End Time",\n "description": "Latest date of lineage/usage to consider. Default: Current time in UTC",\n "type": "string",\n "format": "date-time"\n },\n "start_time": {\n "title": "Start Time",\n "description": "Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on `bucket_duration`). You can also specify relative time with respect to end_time such as \'-7 days\' Or \'-7d\'.",\n "type": "string",\n "format": "date-time"\n },\n "top_n_queries": {\n "title": "Top N Queries",\n "description": "Number of top queries to save to each table.",\n "default": 10,\n "exclusiveMinimum": 0,\n "type": "integer"\n },\n "user_email_pattern": {\n "title": "User Email Pattern",\n "description": "regex patterns for user emails to filter in usage.",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "include_operational_stats": {\n "title": "Include Operational Stats",\n "description": "Whether to display operational stats.",\n "default": true,\n "type": "boolean"\n },\n "include_read_operational_stats": {\n "title": "Include Read Operational Stats",\n "description": "Whether to report read operational stats. Experimental.",\n "default": false,\n "type": "boolean"\n },\n "format_sql_queries": {\n "title": "Format Sql Queries",\n "description": "Whether to format sql queries",\n "default": false,\n "type": "boolean"\n },\n "include_top_n_queries": {\n "title": "Include Top N Queries",\n "description": "Whether to ingest the top_n_queries.",\n "default": true,\n "type": "boolean"\n },\n "max_query_duration": {\n "title": "Max Query Duration",\n "description": "Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time.",\n "default": 900.0,\n "type": "number",\n "format": "time-delta"\n },\n "apply_view_usage_to_tables": {\n "title": "Apply View Usage To Tables",\n "description": "Whether to apply view\'s usage to its base tables. If set to False, uses sql parser and applies usage to views / tables mentioned in the query. If set to True, usage is applied to base tables only.",\n "default": false,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n },\n "BigQueryCredential": {\n "title": "BigQueryCredential",\n "type": "object",\n "properties": {\n "project_id": {\n "title": "Project Id",\n "description": "Project id to set the credentials",\n "type": "string"\n },\n "private_key_id": {\n "title": "Private Key Id",\n "description": "Private key id",\n "type": "string"\n },\n "private_key": {\n "title": "Private Key",\n "description": "Private key in a form of \'-----BEGIN PRIVATE KEY-----\\\\nprivate-key\\\\n-----END PRIVATE KEY-----\\\\n\'",\n "type": "string"\n },\n "client_email": {\n "title": "Client Email",\n "description": "Client email",\n "type": "string"\n },\n "client_id": {\n "title": "Client Id",\n "description": "Client Id",\n "type": "string"\n },\n "auth_uri": {\n "title": "Auth Uri",\n "description": "Authentication uri",\n "default": "https://accounts.google.com/o/oauth2/auth",\n "type": "string"\n },\n "token_uri": {\n "title": "Token Uri",\n "description": "Token uri",\n "default": "https://oauth2.googleapis.com/token",\n "type": "string"\n },\n "auth_provider_x509_cert_url": {\n "title": "Auth Provider X509 Cert Url",\n "description": "Auth provider x509 certificate url",\n "default": "https://www.googleapis.com/oauth2/v1/certs",\n "type": "string"\n },\n "type": {\n "title": "Type",\n "description": "Authentication type",\n "default": "service_account",\n "type": "string"\n },\n "client_x509_cert_url": {\n "title": "Client X509 Cert Url",\n "description": "If not set it will be default to https://www.googleapis.com/robot/v1/metadata/x509/client_email",\n "type": "string"\n }\n },\n "required": [\n "project_id",\n "private_key_id",\n "private_key",\n "client_email",\n "client_id"\n ],\n "additionalProperties": false\n }\n }\n}\n')))),(0,l.kt)("h3",{id:"code-coordinates"},"Code Coordinates"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},"Class Name: ",(0,l.kt)("inlineCode",{parentName:"li"},"datahub.ingestion.source.bigquery_v2.bigquery.BigqueryV2Source")),(0,l.kt)("li",{parentName:"ul"},"Browse on ",(0,l.kt)("a",{parentName:"li",href:"https://github.com/datahub-project/datahub/blob/master/metadata-ingestion/src/datahub/ingestion/source/bigquery_v2/bigquery.py"},"GitHub"))),(0,l.kt)("h2",null,"Questions"),(0,l.kt)("p",null,"If you've got any questions on configuring ingestion for BigQuery, feel free to ping us on ",(0,l.kt)("a",{parentName:"p",href:"https://slack.datahubproject.io"},"our Slack"),"."))}u.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkdocs_website=self.webpackChunkdocs_website||[]).push([[701],{3905:(e,t,a)=>{a.d(t,{Zo:()=>d,kt:()=>u});var n=a(67294);function l(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function i(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function s(e){for(var t=1;t=0||(l[a]=e[a]);return l}(e,t);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(l[a]=e[a])}return l}var o=n.createContext({}),p=function(e){var t=n.useContext(o),a=t;return e&&(a="function"==typeof e?e(t):s(s({},t),e)),a},d=function(e){var t=p(e.components);return n.createElement(o.Provider,{value:t},e.children)},m={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},c=n.forwardRef((function(e,t){var a=e.components,l=e.mdxType,i=e.originalType,o=e.parentName,d=r(e,["components","mdxType","originalType","parentName"]),c=p(a),u=l,f=c["".concat(o,".").concat(u)]||c[u]||m[u]||i;return a?n.createElement(f,s(s({ref:t},d),{},{components:a})):n.createElement(f,s({ref:t},d))}));function u(e,t){var a=arguments,l=t&&t.mdxType;if("string"==typeof e||l){var i=a.length,s=new Array(i);s[0]=c;var r={};for(var o in t)hasOwnProperty.call(t,o)&&(r[o]=t[o]);r.originalType=e,r.mdxType="string"==typeof e?e:l,s[1]=r;for(var p=2;p{a.d(t,{Z:()=>s});var n=a(67294),l=a(86010);const i="tabItem_Ymn6";function s(e){let{children:t,hidden:a,className:s}=e;return n.createElement("div",{role:"tabpanel",className:(0,l.Z)(i,s),hidden:a},t)}},34259:(e,t,a)=>{a.d(t,{Z:()=>u});var n=a(83117),l=a(67294),i=a(86010),s=a(51048),r=a(33609),o=a(1943),p=a(72957);const d="tabList__CuJ",m="tabItem_LNqP";function c(e){const{lazy:t,block:a,defaultValue:s,values:c,groupId:u,className:f}=e,k=l.Children.map(e.children,(e=>{if((0,l.isValidElement)(e)&&"value"in e.props)return e;throw new Error(`Docusaurus error: Bad child <${"string"==typeof e.type?e.type:e.type.name}>: all children of the component should be , and every should have a unique "value" prop.`)})),g=c??k.map((e=>{let{props:{value:t,label:a,attributes:n}}=e;return{value:t,label:a,attributes:n}})),N=(0,r.l)(g,((e,t)=>e.value===t.value));if(N.length>0)throw new Error(`Docusaurus error: Duplicate values "${N.map((e=>e.value)).join(", ")}" found in . Every value needs to be unique.`);const h=null===s?s:s??k.find((e=>e.props.default))?.props.value??k[0].props.value;if(null!==h&&!g.some((e=>e.value===h)))throw new Error(`Docusaurus error: The has a defaultValue "${h}" but none of its children has the corresponding value. Available values are: ${g.map((e=>e.value)).join(", ")}. If you intend to show no default tab, use defaultValue={null} instead.`);const{tabGroupChoices:y,setTabGroupChoices:b}=(0,o.U)(),[v,_]=(0,l.useState)(h),w=[],{blockElementScrollPositionUntilNextRender:x}=(0,p.o5)();if(null!=u){const e=y[u];null!=e&&e!==v&&g.some((t=>t.value===e))&&_(e)}const q=e=>{const t=e.currentTarget,a=w.indexOf(t),n=g[a].value;n!==v&&(x(t),_(n),null!=u&&b(u,String(n)))},D=e=>{let t=null;switch(e.key){case"Enter":q(e);break;case"ArrowRight":{const a=w.indexOf(e.currentTarget)+1;t=w[a]??w[0];break}case"ArrowLeft":{const a=w.indexOf(e.currentTarget)-1;t=w[a]??w[w.length-1];break}}t?.focus()};return l.createElement("div",{className:(0,i.Z)("tabs-container",d)},l.createElement("ul",{role:"tablist","aria-orientation":"horizontal",className:(0,i.Z)("tabs",{"tabs--block":a},f)},g.map((e=>{let{value:t,label:a,attributes:s}=e;return l.createElement("li",(0,n.Z)({role:"tab",tabIndex:v===t?0:-1,"aria-selected":v===t,key:t,ref:e=>w.push(e),onKeyDown:D,onClick:q},s,{className:(0,i.Z)("tabs__item",m,s?.className,{"tabs__item--active":v===t})}),a??t)}))),t?(0,l.cloneElement)(k.filter((e=>e.props.value===v))[0],{className:"margin-top--md"}):l.createElement("div",{className:"margin-top--md"},k.map(((e,t)=>(0,l.cloneElement)(e,{key:t,hidden:e.props.value!==v})))))}function u(e){const t=(0,s.Z)();return l.createElement(c,(0,n.Z)({key:String(t)},e))}},10328:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>d,contentTitle:()=>o,default:()=>u,frontMatter:()=>r,metadata:()=>p,toc:()=>m});var n=a(83117),l=(a(67294),a(3905)),i=a(34259),s=a(18679);const r={sidebar_position:3,title:"BigQuery",slug:"/generated/ingestion/sources/bigquery",custom_edit_url:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/bigquery.md"},o="BigQuery",p={unversionedId:"docs/generated/ingestion/sources/bigquery",id:"docs/generated/ingestion/sources/bigquery",title:"BigQuery",description:"Ingesting metadata from BigQuery requires using the bigquery module.",source:"@site/genDocs/docs/generated/ingestion/sources/bigquery.md",sourceDirName:"docs/generated/ingestion/sources",slug:"/generated/ingestion/sources/bigquery",permalink:"/docs/generated/ingestion/sources/bigquery",draft:!1,editUrl:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/bigquery.md",tags:[],version:"current",sidebarPosition:3,frontMatter:{sidebar_position:3,title:"BigQuery",slug:"/generated/ingestion/sources/bigquery",custom_edit_url:"https://github.com/datahub-project/datahub/blob/master/docs/generated/ingestion/sources/bigquery.md"},sidebar:"overviewSidebar",previous:{title:"Azure AD",permalink:"/docs/generated/ingestion/sources/azure-ad"},next:{title:"Business Glossary",permalink:"/docs/generated/ingestion/sources/business-glossary"}},d={},m=[{value:"Important Capabilities",id:"important-capabilities",level:3},{value:"Prerequisites",id:"prerequisites",level:3},{value:"Create a datahub profile in GCP",id:"create-a-datahub-profile-in-gcp",level:4},{value:"Basic Requirements (needed for metadata ingestion)",id:"basic-requirements-needed-for-metadata-ingestion",level:5},{value:"Create a service account in the Extractor Project",id:"create-a-service-account-in-the-extractor-project",level:4},{value:"Profiling Requirements",id:"profiling-requirements",level:5},{value:"Lineage Computation Details",id:"lineage-computation-details",level:3},{value:"Profiling Details",id:"profiling-details",level:3},{value:"Caveats",id:"caveats",level:3},{value:"CLI based Ingestion",id:"cli-based-ingestion",level:3},{value:"Install the Plugin",id:"install-the-plugin",level:4},{value:"Starter Recipe",id:"starter-recipe",level:3},{value:"Config Details",id:"config-details",level:3},{value:"Code Coordinates",id:"code-coordinates",level:3}],c={toc:m};function u(e){let{components:t,...a}=e;return(0,l.kt)("wrapper",(0,n.Z)({},c,a,{components:t,mdxType:"MDXLayout"}),(0,l.kt)("h1",{id:"bigquery"},"BigQuery"),(0,l.kt)("p",null,"Ingesting metadata from BigQuery requires using the ",(0,l.kt)("strong",{parentName:"p"},"bigquery")," module.\n",(0,l.kt)("img",{parentName:"p",src:"https://img.shields.io/badge/support%20status-certified-brightgreen",alt:"Certified"})),(0,l.kt)("h3",{id:"important-capabilities"},"Important Capabilities"),(0,l.kt)("table",null,(0,l.kt)("thead",{parentName:"table"},(0,l.kt)("tr",{parentName:"thead"},(0,l.kt)("th",{parentName:"tr",align:null},"Capability"),(0,l.kt)("th",{parentName:"tr",align:null},"Status"),(0,l.kt)("th",{parentName:"tr",align:null},"Notes"))),(0,l.kt)("tbody",{parentName:"table"},(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Asset Containers"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Enabled by default")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"/docs/metadata-ingestion/docs/dev_guides/sql_profiles"},"Data Profiling")),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Optionally enabled via configuration")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Dataset Usage"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Enabled by default, can be disabled via configuration ",(0,l.kt)("inlineCode",{parentName:"td"},"include_usage_statistics"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Descriptions"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Enabled by default")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"/docs/metadata-ingestion/docs/dev_guides/stateful#stale-entity-removal"},"Detect Deleted Entities")),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Optionally enabled via ",(0,l.kt)("inlineCode",{parentName:"td"},"stateful_ingestion.remove_stale_metadata"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"/docs/domains"},"Domains")),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Supported via the ",(0,l.kt)("inlineCode",{parentName:"td"},"domain")," config field")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"/docs/platform-instances"},"Platform Instance")),(0,l.kt)("td",{parentName:"tr",align:null},"\u274c"),(0,l.kt)("td",{parentName:"tr",align:null},"Platform instance is pre-set to the BigQuery project id")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Schema Metadata"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Enabled by default")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},"Table-Level Lineage"),(0,l.kt)("td",{parentName:"tr",align:null},"\u2705"),(0,l.kt)("td",{parentName:"tr",align:null},"Optionally enabled via configuration")))),(0,l.kt)("h3",{id:"prerequisites"},"Prerequisites"),(0,l.kt)("p",null,"To understand how BigQuery ingestion needs to be set up, first familiarize yourself with the concepts in the diagram below:"),(0,l.kt)("p",{align:"center"},(0,l.kt)("img",{width:"70%",src:"https://github.com/datahub-project/static-assets/raw/main/imgs/integrations/bigquery/source-bigquery-setup.png"})),(0,l.kt)("p",null,"There are two important concepts to understand and identify:"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("em",{parentName:"li"},"Extractor Project"),": This is the project associated with a service-account, whose credentials you will be configuring in the connector. The connector uses this service-account to run jobs (including queries) within the project."),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("em",{parentName:"li"},"Bigquery Projects")," are the projects from which table metadata, lineage, usage, and profiling data need to be collected. By default, the extractor project is included in the list of projects that DataHub collects metadata from, but you can control that by passing in a specific list of project ids that you want to collect metadata from. Read the configuration section below to understand how to limit the list of projects that DataHub extracts metadata from. ")),(0,l.kt)("h4",{id:"create-a-datahub-profile-in-gcp"},"Create a datahub profile in GCP"),(0,l.kt)("ol",null,(0,l.kt)("li",{parentName:"ol"},"Create a custom role for datahub as per ",(0,l.kt)("a",{parentName:"li",href:"https://cloud.google.com/iam/docs/creating-custom-roles#creating_a_custom_role"},"BigQuery docs"),"."),(0,l.kt)("li",{parentName:"ol"},"Follow the sections below to grant permissions to this role on this project and other projects.")),(0,l.kt)("h5",{id:"basic-requirements-needed-for-metadata-ingestion"},"Basic Requirements (needed for metadata ingestion)"),(0,l.kt)("ol",null,(0,l.kt)("li",{parentName:"ol"},"Identify your Extractor Project where the service account will run queries to extract metadata.")),(0,l.kt)("table",null,(0,l.kt)("thead",{parentName:"table"},(0,l.kt)("tr",{parentName:"thead"},(0,l.kt)("th",{parentName:"tr",align:null},"permission \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Description \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Capability \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"))),(0,l.kt)("tbody",{parentName:"table"},(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.jobs.create")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Run jobs (e.g. queries) within the project. ",(0,l.kt)("em",{parentName:"td"},"This only needs for the extractor project where the service account belongs")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"\xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.jobs.list")," \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Manage the queries that the service account has sent. ",(0,l.kt)("em",{parentName:"td"},"This only needs for the extractor project where the service account belongs")),(0,l.kt)("td",{parentName:"tr",align:null},"\xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.readsessions.create")," \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Create a session for streaming large results. ",(0,l.kt)("em",{parentName:"td"},"This only needs for the extractor project where the service account belongs")," \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"\xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.readsessions.getData")),(0,l.kt)("td",{parentName:"tr",align:null},"Get data from the read session. ",(0,l.kt)("em",{parentName:"td"},"This only needs for the extractor project where the service account belongs")," \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null})))),(0,l.kt)("ol",{start:2},(0,l.kt)("li",{parentName:"ol"},"Grant the following permissions to the Service Account on every project where you would like to extract metadata from")),(0,l.kt)("admonition",{type:"info"},(0,l.kt)("p",{parentName:"admonition"},"If you have multiple projects in your BigQuery setup, the role should be granted these permissions in each of the projects.")),(0,l.kt)("table",null,(0,l.kt)("thead",{parentName:"table"},(0,l.kt)("tr",{parentName:"thead"},(0,l.kt)("th",{parentName:"tr",align:null},"permission \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Description \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Capability \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("th",{parentName:"tr",align:null},"Default GCP role which contains this permission \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"))),(0,l.kt)("tbody",{parentName:"table"},(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.datasets.get")," \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Retrieve metadata about a dataset. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.datasets.getIamPolicy")),(0,l.kt)("td",{parentName:"tr",align:null},"Read a dataset's IAM permissions. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.tables.list")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"List BigQuery tables. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.tables.get")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Retrieve metadata for a table. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.routines.get")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Get Routines. Needs to retrieve metadata for a table from system table. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.routines.list")," \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"List Routines. Needs to retrieve metadata for a table from system table \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"resourcemanager.projects.get")," \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Retrieve project names and metadata. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Table Metadata Extraction \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.metadataViewer"},"roles/bigquery.metadataViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.jobs.listAll")," \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"List all jobs (queries) submitted by any user. Needs for Lineage extraction. \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Lineage Extraction/Usage extraction"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/access-control#bigquery.resourceViewer"},"roles/bigquery.resourceViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"logging.logEntries.list")," \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Fetch log entries for lineage/usage data. Not required if ",(0,l.kt)("inlineCode",{parentName:"td"},"use_exported_bigquery_audit_metadata")," is enabled."),(0,l.kt)("td",{parentName:"tr",align:null},"Lineage Extraction/Usage extraction"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/logging/docs/access-control#logging.privateLogViewer"},"roles/logging.privateLogViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"logging.privateLogEntries.list")),(0,l.kt)("td",{parentName:"tr",align:null},"Fetch log entries for lineage/usage data. Not required if ",(0,l.kt)("inlineCode",{parentName:"td"},"use_exported_bigquery_audit_metadata")," is enabled."),(0,l.kt)("td",{parentName:"tr",align:null},"Lineage Extraction/Usage extraction"),(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/logging/docs/access-control#logging.privateLogViewer"},"roles/logging.privateLogViewer"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:null},(0,l.kt)("inlineCode",{parentName:"td"},"bigquery.tables.getData")," \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"Access table data to extract storage size, last updated at, data profiles etc."),(0,l.kt)("td",{parentName:"tr",align:null},"Profiling \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0"),(0,l.kt)("td",{parentName:"tr",align:null},"\xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0 \xa0")))),(0,l.kt)("h4",{id:"create-a-service-account-in-the-extractor-project"},"Create a service account in the Extractor Project"),(0,l.kt)("ol",null,(0,l.kt)("li",{parentName:"ol"},"Setup a ServiceAccount as per ",(0,l.kt)("a",{parentName:"li",href:"https://cloud.google.com/iam/docs/creating-managing-service-accounts#iam-service-accounts-create-console"},"BigQuery docs"),"\nand assign the previously created role to this service account."),(0,l.kt)("li",{parentName:"ol"},"Download a service account JSON keyfile.\nExample credential file:")),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-json"},'{\n "type": "service_account",\n "project_id": "project-id-1234567",\n "private_key_id": "d0121d0000882411234e11166c6aaa23ed5d74e0",\n "private_key": "-----BEGIN PRIVATE KEY-----\\nMIIyourkey\\n-----END PRIVATE KEY-----",\n "client_email": "test@suppproject-id-1234567.iam.gserviceaccount.com",\n "client_id": "113545814931671546333",\n "auth_uri": "https://accounts.google.com/o/oauth2/auth",\n "token_uri": "https://oauth2.googleapis.com/token",\n "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%suppproject-id-1234567.iam.gserviceaccount.com"\n}\n')),(0,l.kt)("ol",{start:3},(0,l.kt)("li",{parentName:"ol"},(0,l.kt)("p",{parentName:"li"},"To provide credentials to the source, you can either:"),(0,l.kt)("p",{parentName:"li"},"Set an environment variable:"),(0,l.kt)("pre",{parentName:"li"},(0,l.kt)("code",{parentName:"pre",className:"language-sh"},'$ export GOOGLE_APPLICATION_CREDENTIALS="/path/to/keyfile.json"\n')),(0,l.kt)("p",{parentName:"li"},(0,l.kt)("em",{parentName:"p"},"or")),(0,l.kt)("p",{parentName:"li"},"Set credential config in your source based on the credential json file. For example:"),(0,l.kt)("pre",{parentName:"li"},(0,l.kt)("code",{parentName:"pre",className:"language-yml"},'credential:\n project_id: project-id-1234567\n private_key_id: "d0121d0000882411234e11166c6aaa23ed5d74e0"\n private_key: "-----BEGIN PRIVATE KEY-----\\nMIIyourkey\\n-----END PRIVATE KEY-----\\n"\n client_email: "test@suppproject-id-1234567.iam.gserviceaccount.com"\n client_id: "123456678890"\n')))),(0,l.kt)("h5",{id:"profiling-requirements"},"Profiling Requirements"),(0,l.kt)("p",null,"To profile BigQuery external tables backed by Google Drive document, you need to grant document's \"Viewer\" access to service account's email address (",(0,l.kt)("inlineCode",{parentName:"p"},"client_email"),' in credentials json file). To find the Google Drive document linked to BigQuery table, open the BigQuery console, locate the needed table, select "Details" from the drop-down menu in the top-right corner and refer "Source" field . To share access of Google Drive document, open the document, click "Share" in the top-right corner, add the service account\'s email address that needs "Viewer" access. ',(0,l.kt)("img",{parentName:"p",src:"https://github.com/datahub-project/static-assets/raw/main/imgs/integrations/bigquery/google_drive_share.png",alt:"Google Drive Sharing Dialog"})),(0,l.kt)("h3",{id:"lineage-computation-details"},"Lineage Computation Details"),(0,l.kt)("p",null,"When ",(0,l.kt)("inlineCode",{parentName:"p"},"use_exported_bigquery_audit_metadata")," is set to ",(0,l.kt)("inlineCode",{parentName:"p"},"true"),", lineage information will be computed using exported bigquery logs. On how to setup exported bigquery audit logs, refer to the following ",(0,l.kt)("a",{parentName:"p",href:"https://cloud.google.com/bigquery/docs/reference/auditlogs#defining_a_bigquery_log_sink_using_gcloud"},"docs"),' on BigQuery audit logs. Note that only protoPayloads with "type.googleapis.com/google.cloud.audit.BigQueryAuditMetadata" are supported by the current ingestion version. The ',(0,l.kt)("inlineCode",{parentName:"p"},"bigquery_audit_metadata_datasets")," parameter will be used only if ",(0,l.kt)("inlineCode",{parentName:"p"},"use_exported_bigquery_audit_metadat")," is set to ",(0,l.kt)("inlineCode",{parentName:"p"},"true"),"."),(0,l.kt)("p",null,"Note: the ",(0,l.kt)("inlineCode",{parentName:"p"},"bigquery_audit_metadata_datasets")," parameter receives a list of datasets, in the format $PROJECT.$DATASET. This way queries from a multiple number of projects can be used to compute lineage information."),(0,l.kt)("p",null,"Note: Since bigquery source also supports dataset level lineage, the auth client will require additional permissions to be able to access the google audit logs. Refer the permissions section in bigquery-usage section below which also accesses the audit logs."),(0,l.kt)("h3",{id:"profiling-details"},"Profiling Details"),(0,l.kt)("p",null,"For performance reasons, we only profile the latest partition for partitioned tables and the latest shard for sharded tables.\nYou can set partition explicitly with ",(0,l.kt)("inlineCode",{parentName:"p"},"partition.partition_datetime")," property if you want, though note that partition config will be applied to all partitioned tables."),(0,l.kt)("h3",{id:"caveats"},"Caveats"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},"For materialized views, lineage is dependent on logs being retained. If your GCP logging is retained for 30 days (default) and 30 days have passed since the creation of the materialized view we won't be able to get lineage for them.")),(0,l.kt)("h3",{id:"cli-based-ingestion"},"CLI based Ingestion"),(0,l.kt)("h4",{id:"install-the-plugin"},"Install the Plugin"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},"pip install 'acryl-datahub[bigquery]'\n")),(0,l.kt)("h3",{id:"starter-recipe"},"Starter Recipe"),(0,l.kt)("p",null,"Check out the following recipe to get started with ingestion! See ",(0,l.kt)("a",{parentName:"p",href:"#config-details"},"below")," for full configuration options."),(0,l.kt)("p",null,"For general pointers on writing and running a recipe, see our ",(0,l.kt)("a",{parentName:"p",href:"/docs/metadata-ingestion#recipes"},"main recipe guide"),"."),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-yaml"},"source:\n type: bigquery\n config:\n # `schema_pattern` for BQ Datasets\n schema_pattern:\n allow:\n - finance_bq_dataset\n table_pattern:\n deny:\n # The exact name of the table is revenue_table_name\n # The reason we have this `.*` at the beginning is because the current implmenetation of table_pattern is testing\n # project_id.dataset_name.table_name\n # We will improve this in the future\n - .*revenue_table_name\n include_table_lineage: true\n include_usage_statistics: true\n profiling:\n enabled: true\n profile_table_level_only: true\n\nsink:\n # sink configs\n\n")),(0,l.kt)("h3",{id:"config-details"},"Config Details"),(0,l.kt)(i.Z,{mdxType:"Tabs"},(0,l.kt)(s.Z,{value:"options",label:"Options",default:!0,mdxType:"TabItem"},(0,l.kt)("p",null,"Note that a ",(0,l.kt)("inlineCode",{parentName:"p"},".")," is used to denote nested fields in the YAML recipe."),(0,l.kt)("div",{className:"config-table"},(0,l.kt)("table",null,(0,l.kt)("thead",{parentName:"table"},(0,l.kt)("tr",{parentName:"thead"},(0,l.kt)("th",{parentName:"tr",align:"left"},"Field"),(0,l.kt)("th",{parentName:"tr",align:"left"},"Description"))),(0,l.kt)("tbody",{parentName:"table"},(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"bigquery_audit_metadata_datasets"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"bucket_duration"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"Enum"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Size of the time window to aggregate usage stats. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"DAY")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"capture_dataset_label_as_tag"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Capture BigQuery dataset labels as DataHub tag ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"capture_table_label_as_tag"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Capture BigQuery table labels as DataHub tag ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"column_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Maximum number of columns to process in a table. This is a low level config property which should be touched with care. This restriction is needed because excessively wide tables can result in failure to ingest the schema. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"300")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"convert_urns_to_lowercase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Convert urns to lowercase. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"debug_include_full_payloads"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Include full payload into events. It is only for debugging and internal use. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"enable_legacy_sharded_table_support"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Use the legacy sharded table urn suffix added. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"end_time"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Latest date of lineage/usage to consider. Default: Current time in UTC")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"extra_client_options"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"object"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Additional options to pass to google.cloud.logging_v2.client.Client. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"extract_column_lineage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"If enabled, generate column level lineage. Requires lineage_use_sql_parser to be enabled. This and ",(0,l.kt)("inlineCode",{parentName:"td"},"incremental_lineage")," cannot both be enabled. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"extract_lineage_from_catalog"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"This flag enables the data lineage extraction from Data Lineage API exposed by Google Data Catalog. NOTE: This extractor can't build views lineage. It's recommended to enable the view's DDL parsing. Read the docs to have more information about: ",(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/data-catalog/docs/concepts/about-data-lineage"},"https://cloud.google.com/data-catalog/docs/concepts/about-data-lineage")," ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_data_platform_instance"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to create a DataPlatformInstance aspect, equal to the BigQuery project id. If enabled, will cause redundancy in the browse path for BigQuery entities in the UI, because the project id is represented as the top-level container. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_external_url"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to populate BigQuery Console url to Datasets/Tables ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_table_lineage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Option to enable/disable lineage generation. Is enabled by default. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_table_location_lineage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"If the source supports it, include table lineage to the underlying storage location. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_tables"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether tables should be ingested. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_usage_statistics"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Generate usage statistic ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"include_views"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether views should be ingested. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"incremental_lineage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"When enabled, emits lineage as incremental to existing lineage already in DataHub. When disabled, re-states lineage on each run. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"lineage_parse_view_ddl"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Sql parse view ddl to get lineage. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"lineage_sql_parser_use_raw_names"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"This parameter ignores the lowercase pattern stipulated in the SQLParser. NOTE: Ignored if lineage_use_sql_parser is False. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"lineage_use_sql_parser"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Use sql parser to resolve view/table lineage. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"log_page_size"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The number of log item will be queried per page for lineage collection ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"1000")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"match_fully_qualified_names"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether ",(0,l.kt)("inlineCode",{parentName:"td"},"dataset_pattern")," is matched against fully qualified dataset name ",(0,l.kt)("inlineCode",{parentName:"td"},"."),". ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"max_query_duration"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"number(time-delta)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"900.0")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"number_of_datasets_process_in_batch_if_profiling_enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number of partitioned table queried in batch when getting metadata. This is a low level config property which should be touched with care. This restriction is needed because we query partitions system view which throws error if we try to touch too many tables. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"200")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"options"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"object"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Any options specified here will be passed to ",(0,l.kt)("a",{parentName:"td",href:"https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine"},"SQLAlchemy.create_engine")," as kwargs.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"platform_instance"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The instance of the platform that all assets produced by this recipe belong to")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"project_id"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"[deprecated]"," Use project_id_pattern or project_ids instead.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"project_ids"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"project_on_behalf"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"[Advanced]"," The BigQuery project in which queries are executed. Will be passed when creating a job. If not passed, falls back to the project associated with the service account.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"rate_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Should we rate limit requests made to API. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"requests_per_min"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Used to control number of API calls made per min. Only used when ",(0,l.kt)("inlineCode",{parentName:"td"},"rate_limit")," is set to ",(0,l.kt)("inlineCode",{parentName:"td"},"True"),". ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"60")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"scheme"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"default-line "},"Default: ",(0,l.kt)("span",{className:"default-value"},"bigquery")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"sharded_table_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The regex pattern to match sharded tables and group as one table. This is a very low level config parameter, only change if you know what you are doing, ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"((.+)","[","_","$","]",")?(\\d","{","8","}",")$")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"sql_parser_use_external_process"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"When enabled, sql parser will run in isolated in a separate process. This can affect processing time but can protect from sql parser's mem leak. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"start_time"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on ",(0,l.kt)("inlineCode",{parentName:"td"},"bucket_duration"),"). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"store_last_lineage_extraction_timestamp"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Enable checking last lineage extraction date in store. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"store_last_profiling_timestamps"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Enable storing last profile timestamp in store. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"store_last_usage_extraction_timestamp"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Enable checking last usage timestamp in store. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"temp_table_dataset_prefix"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"If you are creating temp tables in a dataset with a particular prefix you can use this config to set the prefix for the dataset. This is to support workflows from before bigquery's introduction of temp tables. By default we use ",(0,l.kt)("inlineCode",{parentName:"td"},"_")," because of datasets that begin with an underscore are hidden by default ",(0,l.kt)("a",{parentName:"td",href:"https://cloud.google.com/bigquery/docs/datasets#dataset-naming"},"https://cloud.google.com/bigquery/docs/datasets#dataset-naming"),". ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"_")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"upstream_lineage_in_report"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Useful for debugging lineage information. Set to True to see the raw lineage created internally. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"use_date_sharded_audit_log_tables"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to read date sharded tables or time partitioned tables when extracting usage from exported audit logs. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"use_exported_bigquery_audit_metadata"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"When configured, use BigQueryAuditMetadata in bigquery_audit_metadata_datasets to compute lineage information. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"env"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The environment that all assets produced by this connector belong to ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"PROD")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"credential"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"BigQueryCredential"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"BigQuery credential informations")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"client_email"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Client email")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"client_id"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Client Id")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"private_key"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Private key in a form of '-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n'")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"private_key_id"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Private key id")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"project_id"),"\xa0",(0,l.kt)("abbr",{title:"Required if credential is set"},"\u2753"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Project id to set the credentials")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"auth_provider_x509_cert_url"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Auth provider x509 certificate url ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},(0,l.kt)("a",{parentName:"td",href:"https://www.googleapis.com/oauth2/v1/certs"},"https://www.googleapis.com/oauth2/v1/certs"))))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"auth_uri"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Authentication uri ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},(0,l.kt)("a",{parentName:"td",href:"https://accounts.google.com/o/oauth2/auth"},"https://accounts.google.com/o/oauth2/auth"))))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"client_x509_cert_url"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"If not set it will be default to ",(0,l.kt)("a",{parentName:"td",href:"https://www.googleapis.com/robot/v1/metadata/x509/client_email"},"https://www.googleapis.com/robot/v1/metadata/x509/client_email"))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"token_uri"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Token uri ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},(0,l.kt)("a",{parentName:"td",href:"https://oauth2.googleapis.com/token"},"https://oauth2.googleapis.com/token"))))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"credential."),(0,l.kt)("span",{className:"path-main"},"type"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Authentication type ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"service","_","account")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"dataset_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for dataset to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics' ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"dataset_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"dataset_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"dataset_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"domain"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"map(str,AllowDenyPattern)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"A class to store allow deny regexes")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"domain.",(0,l.kt)("inlineCode",{parentName:"td"},"key"),"."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"domain.",(0,l.kt)("inlineCode",{parentName:"td"},"key"),"."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"domain.",(0,l.kt)("inlineCode",{parentName:"td"},"key"),"."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"profile_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the ",(0,l.kt)("inlineCode",{parentName:"td"},"table_pattern")," will be considered. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profile_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profile_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profile_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"project_id_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for project_id to filter in ingestion. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"project_id_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"project_id_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"project_id_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"schema_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics' ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"schema_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"schema_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"schema_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"table_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*' ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"table_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"table_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"table_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"usage"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"BigQueryUsageConfig"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Usage related configs ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","bucket","_","duration","'",": ","'","DAY","'",", ","'","end","_","time","'",": ","'","2023-08-24...")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"apply_view_usage_to_tables"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to apply view's usage to its base tables. If set to False, uses sql parser and applies usage to views / tables mentioned in the query. If set to True, usage is applied to base tables only. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"bucket_duration"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"Enum"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Size of the time window to aggregate usage stats. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"DAY")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"end_time"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Latest date of lineage/usage to consider. Default: Current time in UTC")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"format_sql_queries"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to format sql queries ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"include_operational_stats"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to display operational stats. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"include_read_operational_stats"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to report read operational stats. Experimental. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"include_top_n_queries"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ingest the top_n_queries. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"max_query_duration"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"number(time-delta)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"900.0")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"start_time"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on ",(0,l.kt)("inlineCode",{parentName:"td"},"bucket_duration"),"). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"top_n_queries"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number of top queries to save to each table. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"10")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage."),(0,l.kt)("span",{className:"path-main"},"user_email_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"regex patterns for user emails to filter in usage. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"usage.user_email_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"view_pattern"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"AllowDenyPattern"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*' ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","allow","'",": ","[","'",".","*","'","]",", ","'","deny","'",": ","[","]",", ","'","ignoreCase","'",": True","}")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"view_pattern."),(0,l.kt)("span",{className:"path-main"},"allow"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"view_pattern."),(0,l.kt)("span",{className:"path-main"},"deny"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"array(string)"))),(0,l.kt)("td",{parentName:"tr",align:"left"})),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"view_pattern."),(0,l.kt)("span",{className:"path-main"},"ignoreCase"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to ignore case sensitivity during pattern matching. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"profiling"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"GEProfilingConfig"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"default-line "},"Default: ",(0,l.kt)("span",{className:"default-value"},"{","'","enabled","'",": False, ","'","operation","_","config","'",": ","{","'","lower","_","fre...")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"catch_exceptions"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"default-line "},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether profiling should be done. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"field_sample_values_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Upper limit for number of sample values to collect for all columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"20")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_distinct_count"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the number of distinct values for each column. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_distinct_value_frequencies"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for distinct value frequencies. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_histogram"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the histogram for numeric fields. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_max_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the max value of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_mean_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the mean value of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_median_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the median value of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_min_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the min value of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_null_count"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the number of nulls for each column. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_quantiles"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the quantiles of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_sample_values"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the sample values for all columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"include_field_stddev_value"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to profile for the standard deviation of numeric columns. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Max number of documents to profile. By default, profiles all documents.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"max_number_of_fields_to_profile"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"A positive integer that specifies the maximum number of columns to profile for any table. ",(0,l.kt)("inlineCode",{parentName:"td"},"None")," implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"max_workers"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number of worker threads to use for profiling. Set to 1 to disable. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"10")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"offset"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Offset in documents to profile. By default, uses no offset.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"partition_datetime"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"string(date-time)"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"partition_profiling_enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"default-line "},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_if_updated_since_days"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"number"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Profile table only if it has been updated since these many number of days. If set to ",(0,l.kt)("inlineCode",{parentName:"td"},"null"),", no constraint of last modified time for tables to profile. Supported only in ",(0,l.kt)("inlineCode",{parentName:"td"},"snowflake")," and ",(0,l.kt)("inlineCode",{parentName:"td"},"BigQuery"),".")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_table_level_only"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to perform profiling at table-level only, or include column-level profiling as well. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_table_row_count_estimate_only"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_table_row_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Profile tables only if their row count is less then specified count. If set to ",(0,l.kt)("inlineCode",{parentName:"td"},"null"),", no limit on the row count of tables to profile. Supported only in ",(0,l.kt)("inlineCode",{parentName:"td"},"snowflake")," and ",(0,l.kt)("inlineCode",{parentName:"td"},"BigQuery")," ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"5000000")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"profile_table_size_limit"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Profile tables only if their size is less then specified GBs. If set to ",(0,l.kt)("inlineCode",{parentName:"td"},"null"),", no limit on the size of tables to profile. Supported only in ",(0,l.kt)("inlineCode",{parentName:"td"},"snowflake")," and ",(0,l.kt)("inlineCode",{parentName:"td"},"BigQuery")," ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"5")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"query_combiner_enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("em",{parentName:"td"},"This feature is still experimental and can be disabled if it causes issues.")," Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"report_dropped_profiles"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to report datasets or dataset columns which were not profiled. Set to ",(0,l.kt)("inlineCode",{parentName:"td"},"True")," for debugging purposes. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"turn_off_expensive_profiling_metrics"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling."),(0,l.kt)("span",{className:"path-main"},"operation_config"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"OperationConfig"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Experimental feature. To specify operation configs.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling.operation_config."),(0,l.kt)("span",{className:"path-main"},"lower_freq_profile_enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling.operation_config."),(0,l.kt)("span",{className:"path-main"},"profile_date_of_month"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"profiling.operation_config."),(0,l.kt)("span",{className:"path-main"},"profile_day_of_week"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"integer"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-main"},"stateful_ingestion"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"StatefulStaleMetadataRemovalConfig"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Base specialized config for Stateful Ingestion with stale metadata removal capability.")),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"stateful_ingestion."),(0,l.kt)("span",{className:"path-main"},"enabled"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"The type of the ingestion state provider registered with datahub. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"False")))),(0,l.kt)("tr",{parentName:"tbody"},(0,l.kt)("td",{parentName:"tr",align:"left"},(0,l.kt)("div",{className:"path-line"},(0,l.kt)("span",{className:"path-prefix"},"stateful_ingestion."),(0,l.kt)("span",{className:"path-main"},"remove_stale_metadata"))," ",(0,l.kt)("div",{className:"type-name-line"},(0,l.kt)("span",{className:"type-name"},"boolean"))),(0,l.kt)("td",{parentName:"tr",align:"left"},"Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled. ",(0,l.kt)("div",{className:"default-line default-line-with-docs"},"Default: ",(0,l.kt)("span",{className:"default-value"},"True")))))))),(0,l.kt)(s.Z,{value:"schema",label:"Schema",mdxType:"TabItem"},(0,l.kt)("p",null,"The ",(0,l.kt)("a",{parentName:"p",href:"https://json-schema.org/"},"JSONSchema")," for this configuration is inlined below."),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-javascript"},'{\n "title": "BigQueryV2Config",\n "description": "Base configuration class for stateful ingestion for source configs to inherit from.",\n "type": "object",\n "properties": {\n "store_last_profiling_timestamps": {\n "title": "Store Last Profiling Timestamps",\n "description": "Enable storing last profile timestamp in store.",\n "default": false,\n "type": "boolean"\n },\n "incremental_lineage": {\n "title": "Incremental Lineage",\n "description": "When enabled, emits lineage as incremental to existing lineage already in DataHub. When disabled, re-states lineage on each run.",\n "default": true,\n "type": "boolean"\n },\n "sql_parser_use_external_process": {\n "title": "Sql Parser Use External Process",\n "description": "When enabled, sql parser will run in isolated in a separate process. This can affect processing time but can protect from sql parser\'s mem leak.",\n "default": false,\n "type": "boolean"\n },\n "store_last_lineage_extraction_timestamp": {\n "title": "Store Last Lineage Extraction Timestamp",\n "description": "Enable checking last lineage extraction date in store.",\n "default": false,\n "type": "boolean"\n },\n "bucket_duration": {\n "description": "Size of the time window to aggregate usage stats.",\n "default": "DAY",\n "allOf": [\n {\n "$ref": "#/definitions/BucketDuration"\n }\n ]\n },\n "end_time": {\n "title": "End Time",\n "description": "Latest date of lineage/usage to consider. Default: Current time in UTC",\n "type": "string",\n "format": "date-time"\n },\n "start_time": {\n "title": "Start Time",\n "description": "Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on `bucket_duration`). You can also specify relative time with respect to end_time such as \'-7 days\' Or \'-7d\'.",\n "type": "string",\n "format": "date-time"\n },\n "store_last_usage_extraction_timestamp": {\n "title": "Store Last Usage Extraction Timestamp",\n "description": "Enable checking last usage timestamp in store.",\n "default": true,\n "type": "boolean"\n },\n "env": {\n "title": "Env",\n "description": "The environment that all assets produced by this connector belong to",\n "default": "PROD",\n "type": "string"\n },\n "platform_instance": {\n "title": "Platform Instance",\n "description": "The instance of the platform that all assets produced by this recipe belong to",\n "type": "string"\n },\n "stateful_ingestion": {\n "$ref": "#/definitions/StatefulStaleMetadataRemovalConfig"\n },\n "options": {\n "title": "Options",\n "description": "Any options specified here will be passed to [SQLAlchemy.create_engine](https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine) as kwargs.",\n "type": "object"\n },\n "schema_pattern": {\n "title": "Schema Pattern",\n "description": "Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex \'analytics\'",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "table_pattern": {\n "title": "Table Pattern",\n "description": "Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex \'Customer.public.customer.*\'",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "view_pattern": {\n "title": "View Pattern",\n "description": "Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex \'Customer.public.customer.*\'",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "profile_pattern": {\n "title": "Profile Pattern",\n "description": "Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the `table_pattern` will be considered.",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "domain": {\n "title": "Domain",\n "description": "Attach domains to databases, schemas or tables during ingestion using regex patterns. Domain key can be a guid like *urn:li:domain:ec428203-ce86-4db3-985d-5a8ee6df32ba* or a string like \\"Marketing\\".) If you provide strings, then datahub will attempt to resolve this name to a guid, and will error out if this fails. There can be multiple domain keys specified.",\n "default": {},\n "type": "object",\n "additionalProperties": {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n },\n "include_views": {\n "title": "Include Views",\n "description": "Whether views should be ingested.",\n "default": true,\n "type": "boolean"\n },\n "include_tables": {\n "title": "Include Tables",\n "description": "Whether tables should be ingested.",\n "default": true,\n "type": "boolean"\n },\n "include_table_location_lineage": {\n "title": "Include Table Location Lineage",\n "description": "If the source supports it, include table lineage to the underlying storage location.",\n "default": true,\n "type": "boolean"\n },\n "profiling": {\n "title": "Profiling",\n "default": {\n "enabled": false,\n "operation_config": {\n "lower_freq_profile_enabled": false,\n "profile_day_of_week": null,\n "profile_date_of_month": null\n },\n "limit": null,\n "offset": null,\n "report_dropped_profiles": false,\n "turn_off_expensive_profiling_metrics": false,\n "profile_table_level_only": false,\n "include_field_null_count": true,\n "include_field_distinct_count": true,\n "include_field_min_value": true,\n "include_field_max_value": true,\n "include_field_mean_value": true,\n "include_field_median_value": true,\n "include_field_stddev_value": true,\n "include_field_quantiles": false,\n "include_field_distinct_value_frequencies": false,\n "include_field_histogram": false,\n "include_field_sample_values": true,\n "field_sample_values_limit": 20,\n "max_number_of_fields_to_profile": null,\n "profile_if_updated_since_days": null,\n "profile_table_size_limit": 5,\n "profile_table_row_limit": 5000000,\n "profile_table_row_count_estimate_only": false,\n "max_workers": 10,\n "query_combiner_enabled": true,\n "catch_exceptions": true,\n "partition_profiling_enabled": true,\n "partition_datetime": null\n },\n "allOf": [\n {\n "$ref": "#/definitions/GEProfilingConfig"\n }\n ]\n },\n "rate_limit": {\n "title": "Rate Limit",\n "description": "Should we rate limit requests made to API.",\n "default": false,\n "type": "boolean"\n },\n "requests_per_min": {\n "title": "Requests Per Min",\n "description": "Used to control number of API calls made per min. Only used when `rate_limit` is set to `True`.",\n "default": 60,\n "type": "integer"\n },\n "temp_table_dataset_prefix": {\n "title": "Temp Table Dataset Prefix",\n "description": "If you are creating temp tables in a dataset with a particular prefix you can use this config to set the prefix for the dataset. This is to support workflows from before bigquery\'s introduction of temp tables. By default we use `_` because of datasets that begin with an underscore are hidden by default https://cloud.google.com/bigquery/docs/datasets#dataset-naming.",\n "default": "_",\n "type": "string"\n },\n "sharded_table_pattern": {\n "title": "Sharded Table Pattern",\n "description": "The regex pattern to match sharded tables and group as one table. This is a very low level config parameter, only change if you know what you are doing, ",\n "default": "((.+)[_$])?(\\\\d{8})$",\n "deprecated": true,\n "type": "string"\n },\n "project_id_pattern": {\n "title": "Project Id Pattern",\n "description": "Regex patterns for project_id to filter in ingestion.",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "usage": {\n "title": "Usage",\n "description": "Usage related configs",\n "default": {\n "bucket_duration": "DAY",\n "end_time": "2023-08-24T22:32:27.545907+00:00",\n "start_time": "2023-08-23T00:00:00+00:00",\n "top_n_queries": 10,\n "user_email_pattern": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "include_operational_stats": true,\n "include_read_operational_stats": false,\n "format_sql_queries": false,\n "include_top_n_queries": true,\n "max_query_duration": 900.0,\n "apply_view_usage_to_tables": false\n },\n "allOf": [\n {\n "$ref": "#/definitions/BigQueryUsageConfig"\n }\n ]\n },\n "include_usage_statistics": {\n "title": "Include Usage Statistics",\n "description": "Generate usage statistic",\n "default": true,\n "type": "boolean"\n },\n "capture_table_label_as_tag": {\n "title": "Capture Table Label As Tag",\n "description": "Capture BigQuery table labels as DataHub tag",\n "default": false,\n "type": "boolean"\n },\n "capture_dataset_label_as_tag": {\n "title": "Capture Dataset Label As Tag",\n "description": "Capture BigQuery dataset labels as DataHub tag",\n "default": false,\n "type": "boolean"\n },\n "dataset_pattern": {\n "title": "Dataset Pattern",\n "description": "Regex patterns for dataset to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex \'analytics\'",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "match_fully_qualified_names": {\n "title": "Match Fully Qualified Names",\n "description": "Whether `dataset_pattern` is matched against fully qualified dataset name `.`.",\n "default": false,\n "type": "boolean"\n },\n "include_external_url": {\n "title": "Include External Url",\n "description": "Whether to populate BigQuery Console url to Datasets/Tables",\n "default": true,\n "type": "boolean"\n },\n "include_data_platform_instance": {\n "title": "Include Data Platform Instance",\n "description": "Whether to create a DataPlatformInstance aspect, equal to the BigQuery project id. If enabled, will cause redundancy in the browse path for BigQuery entities in the UI, because the project id is represented as the top-level container.",\n "default": false,\n "type": "boolean"\n },\n "debug_include_full_payloads": {\n "title": "Debug Include Full Payloads",\n "description": "Include full payload into events. It is only for debugging and internal use.",\n "default": false,\n "type": "boolean"\n },\n "number_of_datasets_process_in_batch_if_profiling_enabled": {\n "title": "Number Of Datasets Process In Batch If Profiling Enabled",\n "description": "Number of partitioned table queried in batch when getting metadata. This is a low level config property which should be touched with care. This restriction is needed because we query partitions system view which throws error if we try to touch too many tables.",\n "default": 200,\n "type": "integer"\n },\n "column_limit": {\n "title": "Column Limit",\n "description": "Maximum number of columns to process in a table. This is a low level config property which should be touched with care. This restriction is needed because excessively wide tables can result in failure to ingest the schema.",\n "default": 300,\n "type": "integer"\n },\n "project_id": {\n "title": "Project Id",\n "description": "[deprecated] Use project_id_pattern or project_ids instead.",\n "type": "string"\n },\n "project_ids": {\n "title": "Project Ids",\n "description": "Ingests specified project_ids. Use this property if you want to specify what projects to ingest or don\'t want to give project resourcemanager.projects.list to your service account. Overrides `project_id_pattern`.",\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "project_on_behalf": {\n "title": "Project On Behalf",\n "description": "[Advanced] The BigQuery project in which queries are executed. Will be passed when creating a job. If not passed, falls back to the project associated with the service account.",\n "type": "string"\n },\n "lineage_use_sql_parser": {\n "title": "Lineage Use Sql Parser",\n "description": "Use sql parser to resolve view/table lineage.",\n "default": true,\n "type": "boolean"\n },\n "lineage_parse_view_ddl": {\n "title": "Lineage Parse View Ddl",\n "description": "Sql parse view ddl to get lineage.",\n "default": true,\n "type": "boolean"\n },\n "lineage_sql_parser_use_raw_names": {\n "title": "Lineage Sql Parser Use Raw Names",\n "description": "This parameter ignores the lowercase pattern stipulated in the SQLParser. NOTE: Ignored if lineage_use_sql_parser is False.",\n "default": false,\n "type": "boolean"\n },\n "extract_column_lineage": {\n "title": "Extract Column Lineage",\n "description": "If enabled, generate column level lineage. Requires lineage_use_sql_parser to be enabled. This and `incremental_lineage` cannot both be enabled.",\n "default": false,\n "type": "boolean"\n },\n "extract_lineage_from_catalog": {\n "title": "Extract Lineage From Catalog",\n "description": "This flag enables the data lineage extraction from Data Lineage API exposed by Google Data Catalog. NOTE: This extractor can\'t build views lineage. It\'s recommended to enable the view\'s DDL parsing. Read the docs to have more information about: https://cloud.google.com/data-catalog/docs/concepts/about-data-lineage",\n "default": false,\n "type": "boolean"\n },\n "convert_urns_to_lowercase": {\n "title": "Convert Urns To Lowercase",\n "description": "Convert urns to lowercase.",\n "default": false,\n "type": "boolean"\n },\n "enable_legacy_sharded_table_support": {\n "title": "Enable Legacy Sharded Table Support",\n "description": "Use the legacy sharded table urn suffix added.",\n "default": true,\n "type": "boolean"\n },\n "scheme": {\n "title": "Scheme",\n "default": "bigquery",\n "type": "string"\n },\n "log_page_size": {\n "title": "Log Page Size",\n "description": "The number of log item will be queried per page for lineage collection",\n "default": 1000,\n "exclusiveMinimum": 0,\n "type": "integer"\n },\n "credential": {\n "title": "Credential",\n "description": "BigQuery credential informations",\n "allOf": [\n {\n "$ref": "#/definitions/BigQueryCredential"\n }\n ]\n },\n "extra_client_options": {\n "title": "Extra Client Options",\n "description": "Additional options to pass to google.cloud.logging_v2.client.Client.",\n "default": {},\n "type": "object"\n },\n "include_table_lineage": {\n "title": "Include Table Lineage",\n "description": "Option to enable/disable lineage generation. Is enabled by default.",\n "default": true,\n "type": "boolean"\n },\n "max_query_duration": {\n "title": "Max Query Duration",\n "description": "Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time.",\n "default": 900.0,\n "type": "number",\n "format": "time-delta"\n },\n "bigquery_audit_metadata_datasets": {\n "title": "Bigquery Audit Metadata Datasets",\n "description": "A list of datasets that contain a table named cloudaudit_googleapis_com_data_access which contain BigQuery audit logs, specifically, those containing BigQueryAuditMetadata. It is recommended that the project of the dataset is also specified, for example, projectA.datasetB.",\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "use_exported_bigquery_audit_metadata": {\n "title": "Use Exported Bigquery Audit Metadata",\n "description": "When configured, use BigQueryAuditMetadata in bigquery_audit_metadata_datasets to compute lineage information.",\n "default": false,\n "type": "boolean"\n },\n "use_date_sharded_audit_log_tables": {\n "title": "Use Date Sharded Audit Log Tables",\n "description": "Whether to read date sharded tables or time partitioned tables when extracting usage from exported audit logs.",\n "default": false,\n "type": "boolean"\n },\n "upstream_lineage_in_report": {\n "title": "Upstream Lineage In Report",\n "description": "Useful for debugging lineage information. Set to True to see the raw lineage created internally.",\n "default": false,\n "type": "boolean"\n }\n },\n "additionalProperties": false,\n "definitions": {\n "BucketDuration": {\n "title": "BucketDuration",\n "description": "An enumeration.",\n "enum": [\n "DAY",\n "HOUR"\n ],\n "type": "string"\n },\n "DynamicTypedStateProviderConfig": {\n "title": "DynamicTypedStateProviderConfig",\n "type": "object",\n "properties": {\n "type": {\n "title": "Type",\n "description": "The type of the state provider to use. For DataHub use `datahub`",\n "type": "string"\n },\n "config": {\n "title": "Config",\n "description": "The configuration required for initializing the state provider. Default: The datahub_api config if set at pipeline level. Otherwise, the default DatahubClientConfig. See the defaults (https://github.com/datahub-project/datahub/blob/master/metadata-ingestion/src/datahub/ingestion/graph/client.py#L19)."\n }\n },\n "required": [\n "type"\n ],\n "additionalProperties": false\n },\n "StatefulStaleMetadataRemovalConfig": {\n "title": "StatefulStaleMetadataRemovalConfig",\n "description": "Base specialized config for Stateful Ingestion with stale metadata removal capability.",\n "type": "object",\n "properties": {\n "enabled": {\n "title": "Enabled",\n "description": "The type of the ingestion state provider registered with datahub.",\n "default": false,\n "type": "boolean"\n },\n "remove_stale_metadata": {\n "title": "Remove Stale Metadata",\n "description": "Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.",\n "default": true,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n },\n "AllowDenyPattern": {\n "title": "AllowDenyPattern",\n "description": "A class to store allow deny regexes",\n "type": "object",\n "properties": {\n "allow": {\n "title": "Allow",\n "description": "List of regex patterns to include in ingestion",\n "default": [\n ".*"\n ],\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "deny": {\n "title": "Deny",\n "description": "List of regex patterns to exclude from ingestion.",\n "default": [],\n "type": "array",\n "items": {\n "type": "string"\n }\n },\n "ignoreCase": {\n "title": "Ignorecase",\n "description": "Whether to ignore case sensitivity during pattern matching.",\n "default": true,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n },\n "OperationConfig": {\n "title": "OperationConfig",\n "type": "object",\n "properties": {\n "lower_freq_profile_enabled": {\n "title": "Lower Freq Profile Enabled",\n "description": "Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.",\n "default": false,\n "type": "boolean"\n },\n "profile_day_of_week": {\n "title": "Profile Day Of Week",\n "description": "Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.",\n "type": "integer"\n },\n "profile_date_of_month": {\n "title": "Profile Date Of Month",\n "description": "Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.",\n "type": "integer"\n }\n },\n "additionalProperties": false\n },\n "GEProfilingConfig": {\n "title": "GEProfilingConfig",\n "type": "object",\n "properties": {\n "enabled": {\n "title": "Enabled",\n "description": "Whether profiling should be done.",\n "default": false,\n "type": "boolean"\n },\n "operation_config": {\n "title": "Operation Config",\n "description": "Experimental feature. To specify operation configs.",\n "allOf": [\n {\n "$ref": "#/definitions/OperationConfig"\n }\n ]\n },\n "limit": {\n "title": "Limit",\n "description": "Max number of documents to profile. By default, profiles all documents.",\n "type": "integer"\n },\n "offset": {\n "title": "Offset",\n "description": "Offset in documents to profile. By default, uses no offset.",\n "type": "integer"\n },\n "report_dropped_profiles": {\n "title": "Report Dropped Profiles",\n "description": "Whether to report datasets or dataset columns which were not profiled. Set to `True` for debugging purposes.",\n "default": false,\n "type": "boolean"\n },\n "turn_off_expensive_profiling_metrics": {\n "title": "Turn Off Expensive Profiling Metrics",\n "description": "Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.",\n "default": false,\n "type": "boolean"\n },\n "profile_table_level_only": {\n "title": "Profile Table Level Only",\n "description": "Whether to perform profiling at table-level only, or include column-level profiling as well.",\n "default": false,\n "type": "boolean"\n },\n "include_field_null_count": {\n "title": "Include Field Null Count",\n "description": "Whether to profile for the number of nulls for each column.",\n "default": true,\n "type": "boolean"\n },\n "include_field_distinct_count": {\n "title": "Include Field Distinct Count",\n "description": "Whether to profile for the number of distinct values for each column.",\n "default": true,\n "type": "boolean"\n },\n "include_field_min_value": {\n "title": "Include Field Min Value",\n "description": "Whether to profile for the min value of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_max_value": {\n "title": "Include Field Max Value",\n "description": "Whether to profile for the max value of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_mean_value": {\n "title": "Include Field Mean Value",\n "description": "Whether to profile for the mean value of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_median_value": {\n "title": "Include Field Median Value",\n "description": "Whether to profile for the median value of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_stddev_value": {\n "title": "Include Field Stddev Value",\n "description": "Whether to profile for the standard deviation of numeric columns.",\n "default": true,\n "type": "boolean"\n },\n "include_field_quantiles": {\n "title": "Include Field Quantiles",\n "description": "Whether to profile for the quantiles of numeric columns.",\n "default": false,\n "type": "boolean"\n },\n "include_field_distinct_value_frequencies": {\n "title": "Include Field Distinct Value Frequencies",\n "description": "Whether to profile for distinct value frequencies.",\n "default": false,\n "type": "boolean"\n },\n "include_field_histogram": {\n "title": "Include Field Histogram",\n "description": "Whether to profile for the histogram for numeric fields.",\n "default": false,\n "type": "boolean"\n },\n "include_field_sample_values": {\n "title": "Include Field Sample Values",\n "description": "Whether to profile for the sample values for all columns.",\n "default": true,\n "type": "boolean"\n },\n "field_sample_values_limit": {\n "title": "Field Sample Values Limit",\n "description": "Upper limit for number of sample values to collect for all columns.",\n "default": 20,\n "type": "integer"\n },\n "max_number_of_fields_to_profile": {\n "title": "Max Number Of Fields To Profile",\n "description": "A positive integer that specifies the maximum number of columns to profile for any table. `None` implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.",\n "exclusiveMinimum": 0,\n "type": "integer"\n },\n "profile_if_updated_since_days": {\n "title": "Profile If Updated Since Days",\n "description": "Profile table only if it has been updated since these many number of days. If set to `null`, no constraint of last modified time for tables to profile. Supported only in `snowflake` and `BigQuery`.",\n "exclusiveMinimum": 0,\n "type": "number"\n },\n "profile_table_size_limit": {\n "title": "Profile Table Size Limit",\n "description": "Profile tables only if their size is less then specified GBs. If set to `null`, no limit on the size of tables to profile. Supported only in `snowflake` and `BigQuery`",\n "default": 5,\n "type": "integer"\n },\n "profile_table_row_limit": {\n "title": "Profile Table Row Limit",\n "description": "Profile tables only if their row count is less then specified count. If set to `null`, no limit on the row count of tables to profile. Supported only in `snowflake` and `BigQuery`",\n "default": 5000000,\n "type": "integer"\n },\n "profile_table_row_count_estimate_only": {\n "title": "Profile Table Row Count Estimate Only",\n "description": "Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL. ",\n "default": false,\n "type": "boolean"\n },\n "max_workers": {\n "title": "Max Workers",\n "description": "Number of worker threads to use for profiling. Set to 1 to disable.",\n "default": 10,\n "type": "integer"\n },\n "query_combiner_enabled": {\n "title": "Query Combiner Enabled",\n "description": "*This feature is still experimental and can be disabled if it causes issues.* Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.",\n "default": true,\n "type": "boolean"\n },\n "catch_exceptions": {\n "title": "Catch Exceptions",\n "default": true,\n "type": "boolean"\n },\n "partition_profiling_enabled": {\n "title": "Partition Profiling Enabled",\n "default": true,\n "type": "boolean"\n },\n "partition_datetime": {\n "title": "Partition Datetime",\n "description": "For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.",\n "type": "string",\n "format": "date-time"\n }\n },\n "additionalProperties": false\n },\n "BigQueryUsageConfig": {\n "title": "BigQueryUsageConfig",\n "type": "object",\n "properties": {\n "bucket_duration": {\n "description": "Size of the time window to aggregate usage stats.",\n "default": "DAY",\n "allOf": [\n {\n "$ref": "#/definitions/BucketDuration"\n }\n ]\n },\n "end_time": {\n "title": "End Time",\n "description": "Latest date of lineage/usage to consider. Default: Current time in UTC",\n "type": "string",\n "format": "date-time"\n },\n "start_time": {\n "title": "Start Time",\n "description": "Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on `bucket_duration`). You can also specify relative time with respect to end_time such as \'-7 days\' Or \'-7d\'.",\n "type": "string",\n "format": "date-time"\n },\n "top_n_queries": {\n "title": "Top N Queries",\n "description": "Number of top queries to save to each table.",\n "default": 10,\n "exclusiveMinimum": 0,\n "type": "integer"\n },\n "user_email_pattern": {\n "title": "User Email Pattern",\n "description": "regex patterns for user emails to filter in usage.",\n "default": {\n "allow": [\n ".*"\n ],\n "deny": [],\n "ignoreCase": true\n },\n "allOf": [\n {\n "$ref": "#/definitions/AllowDenyPattern"\n }\n ]\n },\n "include_operational_stats": {\n "title": "Include Operational Stats",\n "description": "Whether to display operational stats.",\n "default": true,\n "type": "boolean"\n },\n "include_read_operational_stats": {\n "title": "Include Read Operational Stats",\n "description": "Whether to report read operational stats. Experimental.",\n "default": false,\n "type": "boolean"\n },\n "format_sql_queries": {\n "title": "Format Sql Queries",\n "description": "Whether to format sql queries",\n "default": false,\n "type": "boolean"\n },\n "include_top_n_queries": {\n "title": "Include Top N Queries",\n "description": "Whether to ingest the top_n_queries.",\n "default": true,\n "type": "boolean"\n },\n "max_query_duration": {\n "title": "Max Query Duration",\n "description": "Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time.",\n "default": 900.0,\n "type": "number",\n "format": "time-delta"\n },\n "apply_view_usage_to_tables": {\n "title": "Apply View Usage To Tables",\n "description": "Whether to apply view\'s usage to its base tables. If set to False, uses sql parser and applies usage to views / tables mentioned in the query. If set to True, usage is applied to base tables only.",\n "default": false,\n "type": "boolean"\n }\n },\n "additionalProperties": false\n },\n "BigQueryCredential": {\n "title": "BigQueryCredential",\n "type": "object",\n "properties": {\n "project_id": {\n "title": "Project Id",\n "description": "Project id to set the credentials",\n "type": "string"\n },\n "private_key_id": {\n "title": "Private Key Id",\n "description": "Private key id",\n "type": "string"\n },\n "private_key": {\n "title": "Private Key",\n "description": "Private key in a form of \'-----BEGIN PRIVATE KEY-----\\\\nprivate-key\\\\n-----END PRIVATE KEY-----\\\\n\'",\n "type": "string"\n },\n "client_email": {\n "title": "Client Email",\n "description": "Client email",\n "type": "string"\n },\n "client_id": {\n "title": "Client Id",\n "description": "Client Id",\n "type": "string"\n },\n "auth_uri": {\n "title": "Auth Uri",\n "description": "Authentication uri",\n "default": "https://accounts.google.com/o/oauth2/auth",\n "type": "string"\n },\n "token_uri": {\n "title": "Token Uri",\n "description": "Token uri",\n "default": "https://oauth2.googleapis.com/token",\n "type": "string"\n },\n "auth_provider_x509_cert_url": {\n "title": "Auth Provider X509 Cert Url",\n "description": "Auth provider x509 certificate url",\n "default": "https://www.googleapis.com/oauth2/v1/certs",\n "type": "string"\n },\n "type": {\n "title": "Type",\n "description": "Authentication type",\n "default": "service_account",\n "type": "string"\n },\n "client_x509_cert_url": {\n "title": "Client X509 Cert Url",\n "description": "If not set it will be default to https://www.googleapis.com/robot/v1/metadata/x509/client_email",\n "type": "string"\n }\n },\n "required": [\n "project_id",\n "private_key_id",\n "private_key",\n "client_email",\n "client_id"\n ],\n "additionalProperties": false\n }\n }\n}\n')))),(0,l.kt)("h3",{id:"code-coordinates"},"Code Coordinates"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},"Class Name: ",(0,l.kt)("inlineCode",{parentName:"li"},"datahub.ingestion.source.bigquery_v2.bigquery.BigqueryV2Source")),(0,l.kt)("li",{parentName:"ul"},"Browse on ",(0,l.kt)("a",{parentName:"li",href:"https://github.com/datahub-project/datahub/blob/master/metadata-ingestion/src/datahub/ingestion/source/bigquery_v2/bigquery.py"},"GitHub"))),(0,l.kt)("h2",null,"Questions"),(0,l.kt)("p",null,"If you've got any questions on configuring ingestion for BigQuery, feel free to ping us on ",(0,l.kt)("a",{parentName:"p",href:"https://slack.datahubproject.io"},"our Slack"),"."))}u.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.fa73fa01.js b/assets/js/runtime~main.734fff28.js similarity index 98% rename from assets/js/runtime~main.fa73fa01.js rename to assets/js/runtime~main.734fff28.js index 292ee0991c262..8a1c76d221198 100644 --- a/assets/js/runtime~main.fa73fa01.js +++ b/assets/js/runtime~main.734fff28.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,c,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var f=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=b,e=[],r.O=(a,f,c,d)=>{if(!f){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,f({}),f([]),f(f)];for(var t=2&c&&e;"object"==typeof t&&!~a.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",76:"e433513e",107:"10f4cd9f",114:"3439e832",135:"2ae2f03c",138:"952fdaa7",157:"dd11e1f5",210:"9fb8e83b",237:"559cce6e",241:"8915fe4a",261:"1cdd3ad8",273:"b45b8f2a",278:"a9ee1b2e",336:"55300bfc",342:"bded5420",368:"74e37280",511:"ce9cc878",543:"239ede39",548:"cbb51090",560:"11c4014b",567:"a6b57299",593:"2714f677",660:"b5c1e141",674:"d6e92d94",677:"e0c68876",692:"94c4aaa1",701:"4d58b9df",769:"4deec257",815:"4c58bc23",835:"f60aa932",858:"764aa33e",866:"582f4e0f",922:"1c5daeca",926:"6802b5bb",944:"fb16e8d8",987:"4f3a601b",1009:"a2001381",1036:"b05a1f44",1131:"2ae52255",1150:"69b79dad",1155:"f1ca4237",1199:"36622342",1366:"a0936084",1415:"1c9e2091",1439:"6684a94d",1619:"1645b7aa",1655:"2e552f37",1667:"7c50e773",1683:"7d2c9853",1810:"8581e84a",1811:"13f0f933",1816:"d49b7a82",1832:"c2702396",1854:"c006a637",1886:"97a71a4c",1959:"6ba4b6b7",1993:"558d392d",2005:"211eb021",2013:"b6d58f6e",2026:"46bc0ee8",2055:"b8cb6aeb",2125:"4673b9e3",2148:"f2df41f1",2206:"494ba276",2225:"a22be15c",2237:"62f996d0",2277:"0c71206d",2307:"85446872",2367:"5c3d3a81",2383:"0eeb3f77",2387:"e381afd8",2393:"81423764",2416:"e148fe41",2504:"b65e549a",2519:"f73df94e",2555:"51d2ccba",2635:"b2bffc66",2651:"9dad6126",2680:"236aade1",2692:"60330b81",2710:"222ef86b",2718:"40baa5b4",2734:"8770979a",2743:"e55a2f25",2768:"da41aefa",2825:"60d66e8d",2867:"96958afb",2923:"7dcfffe1",2954:"c9fe24a5",3001:"363813f6",3042:"418b62bc",3081:"dc48bf03",3121:"c306cfc9",3181:"8ac3eb33",3209:"7f63ac80",3213:"12b4c537",3217:"02628582",3360:"49f5f15f",3382:"6fd60ff2",3467:"01c4b4c9",3478:"899897e4",3484:"8f1359e8",3495:"c22b5a3c",3508:"3302977a",3522:"5aaa2ddf",3526:"af6aa3b0",3571:"da4f9ec6",3580:"94732196",3589:"f2adeaeb",3613:"510b1a82",3670:"5947ae86",3699:"ab60f020",3705:"232fb92c",3713:"bfdbc90c",3739:"3196103f",3754:"923f5d68",3777:"179e51a2",3811:"8bdb1750",3817:"46925a21",3821:"eeebdb74",3855:"4dbd669a",3893:"d576a5e7",3940:"b30c8566",3995:"c85391c5",4e3:"6b5bcc88",4043:"14fc7887",4089:"93373db4",4110:"db6ac68b",4113:"8a488b1e",4153:"6509ccc3",4195:"c4f5d8e4",4207:"2380a642",4254:"d94a66d9",4311:"b83e28af",4330:"38f0a075",4332:"75ad91bf",4341:"e70edfe9",4342:"8b76922d",4348:"89d719b4",4355:"1fb350ae",4369:"3b80522b",4412:"1f0eff70",4414:"f9bdac24",4426:"ec69336c",4506:"94063305",4528:"db102036",4552:"3a5579a2",4653:"3ef0606f",4680:"a3320e26",4685:"fa1ce1af",4712:"f5887bb4",4760:"f9638421",4764:"8a8bafee",4780:"0b513daf",4791:"87d615cc",4797:"1d69dcd9",4812:"acd6ef9e",4823:"21925e60",4846:"938cf45b",4871:"12b199b6",4894:"be6c8af9",4904:"5beca119",4916:"6986f074",4947:"a0df199b",4976:"c7640dde",4980:"cbcac693",5008:"d53d345c",5020:"3feaa519",5062:"dc885198",5073:"1cf4be31",5106:"d05dbbf6",5133:"8a98b86a",5143:"454007ac",5158:"73fbfd94",5205:"d9f2f253",5216:"29c4cc6b",5257:"8c050434",5294:"a6d40679",5299:"802902e0",5352:"928fb163",5385:"7104efaa",5387:"2a9fc4bc",5471:"6e48c3ae",5487:"28c6036c",5489:"c8f875c6",5507:"4a667104",5530:"3a7072e6",5538:"2d15c56d",5575:"1f0aa512",5598:"231c53fe",5609:"f11c80e8",5614:"d016f05d",5638:"62f77ec1",5733:"ff66a4ca",5758:"aa26c06b",5763:"56695ef1",5816:"af1d3831",5820:"9ce8e978",5832:"71ebc0ad",5897:"75a7a1f5",5898:"3a0ef5a8",5932:"b4afab46",5937:"c772f1ac",5977:"26970788",6011:"177dd695",6024:"aaf4be8e",6048:"8f9f4159",6094:"82f24c6f",6109:"b6c912d1",6116:"d6b8ae16",6126:"b5bfed16",6134:"5610cb74",6166:"d56a6816",6195:"d2e3d9fd",6201:"b4582b6b",6207:"86411a05",6259:"ee75ff2b",6322:"11cc5d12",6323:"ff5df40d",6410:"1ceba1b9",6438:"80495dd3",6448:"822b9af5",6460:"2de0b602",6474:"1cedc99d",6508:"5c075523",6519:"f12e2eb7",6532:"fd80153f",6539:"5e0025e2",6558:"5c4b6040",6693:"3d3cad96",6695:"e99845c5",6738:"eef46cd0",6763:"4209a02a",6765:"e2e0505d",6775:"64bd86e9",6806:"27b2c5ed",6826:"ae66728e",6851:"ac913de2",6876:"5f16916e",6877:"298519cc",6918:"435f1c10",6975:"08c88421",6998:"f8101417",7002:"f50f8a57",7071:"e534df78",7094:"f11ecfaa",7110:"ed9908e8",7232:"6ba707f7",7260:"3a807525",7278:"1379f890",7285:"fe72252f",7297:"2ea701c6",7366:"1d30dbf3",7385:"28101003",7410:"af27ef25",7414:"dd89ea2c",7441:"b73dd225",7451:"0aad804f",7459:"fcf2a6b2",7494:"7aac2a72",7542:"a1eac508",7560:"f24f8511",7561:"79ac022f",7722:"fd64a384",7761:"64e745b0",7764:"a0c2adf2",7779:"bd40640f",7801:"def0badb",7858:"e5927596",7860:"e76f1787",7868:"2f9b3d0d",7918:"17896441",7920:"1a4e3797",7952:"1484d05f",7959:"68fadf16",7961:"fcd16a9e",7990:"203263d4",8035:"a47b6120",8080:"1043e548",8131:"fe630487",8152:"026cb5cf",8162:"bab9f900",8238:"7f68880b",8268:"8448429a",8269:"add68e33",8301:"27cf1d60",8310:"1dc0f641",8387:"ea403ec7",8437:"4b37dbd2",8456:"0c9f6f72",8475:"a33b3d6f",8551:"a5852e81",8594:"b5622745",8622:"da33929d",8653:"f6be1df8",8667:"6818b056",8704:"eca54536",8730:"4d5361ea",8733:"a5b282d6",8770:"5b70f945",8812:"54b5b2cf",8815:"bec788ea",8878:"19bf3bfc",8882:"f2959043",8922:"746d5cf3",9005:"40cbda52",9021:"01f389df",9039:"1e64ce86",9045:"1d976a14",9065:"c2dd8c1b",9092:"52dee01e",9105:"517fcd13",9162:"460cc3b6",9175:"a776d94a",9232:"9a5b80a5",9238:"1e62fda0",9285:"c8502d9d",9299:"84c9ba83",9358:"8e26439a",9365:"3fd8ad89",9374:"af123054",9389:"882a51ff",9398:"8d75b97d",9427:"998bddad",9442:"6e0b27a3",9463:"3db781c5",9465:"4d07c16c",9468:"4e880e71",9489:"af622fc3",9503:"da2c0eee",9514:"1be78505",9548:"fbc62c73",9608:"240588d5",9610:"4295d560",9637:"156ab36c",9639:"89c889e2",9658:"ccd4e028",9660:"3022e979",9750:"28d2dd17",9773:"4c68208b",9778:"36968183",9782:"6d9c169a",9784:"eff66394",9824:"ebd45411",9828:"cca92c7f",9857:"41ce53dc",9895:"d9c5d136",9927:"70be5e1d",9944:"ec330b1a",9990:"7f055fb6"}[e]||e)+"."+{53:"cf1e5df2",76:"848c7f3d",107:"ec5258e0",114:"e589d077",135:"2b36dabe",138:"2679138b",157:"7e3352ef",210:"82b7302e",237:"0764a6b5",241:"eb9f3812",261:"de38abed",273:"8c3e5ab8",278:"32f72046",336:"df6a3eb8",342:"4f8f1e5a",368:"7dc3d4b0",511:"a53bd11c",543:"f0e4e207",548:"501f38dc",560:"0437c69b",567:"b6ba7395",593:"f6823172",660:"c4f521af",674:"f3f6fa67",677:"02880b05",692:"7f2fe157",701:"bd6b464d",769:"a1c54629",815:"519c18a6",835:"1c97513a",858:"9ff799b0",866:"7f07bebc",922:"49a281e0",926:"06384287",944:"e3fbd497",987:"b5396c85",1009:"c62e7001",1036:"14c51555",1131:"2c77fc9c",1150:"12128e4b",1155:"c8c4dbd1",1199:"06fb5e5a",1366:"229bc754",1415:"6d24ebd9",1439:"2b5fd234",1619:"c4a38bed",1655:"00870b9e",1667:"a6406949",1683:"a01f34bf",1810:"ab50d0da",1811:"90e67483",1816:"fb132cc9",1832:"0371ad69",1854:"7f2a3492",1886:"193a9bd0",1959:"a8d95d6b",1993:"ce98bf9c",2005:"5d9ea19d",2013:"67da6bc2",2026:"9948c27e",2055:"71187268",2125:"00f64c1e",2148:"c8a71bdc",2153:"2a8e0431",2206:"c5cd9441",2225:"62bc06cd",2237:"c46896c2",2277:"8846d48e",2307:"f6d39a4d",2367:"1fba97df",2383:"8c33ecdb",2387:"a70ff05c",2393:"6a3889ea",2416:"90b5c0af",2504:"69b8db9b",2519:"3a0b3c24",2555:"b99cc9e7",2635:"a366571f",2651:"648f3bac",2680:"ac3107d7",2692:"d5c3f23c",2710:"0ddcd26e",2718:"4229fdb3",2734:"f101106e",2743:"f11cb783",2768:"6d5c8eef",2825:"ec75f8e9",2867:"2acb1829",2923:"35d3e2ac",2954:"c02eec7a",3001:"b01126cd",3042:"c6cf919c",3081:"184224fa",3121:"289990a7",3181:"03714675",3209:"9615f9b4",3213:"7df039ce",3217:"42362c79",3360:"c3b031ff",3382:"d22ed53d",3467:"2a74120a",3478:"77e95ca2",3484:"89ef8df4",3495:"0ab98f4a",3508:"abe7c534",3522:"37c441dd",3526:"98a64538",3571:"42d3ad23",3580:"1efe118f",3589:"567049c3",3613:"7b83b675",3670:"a11c4f23",3699:"f4804e9c",3705:"ecff8e68",3713:"4b1b7a68",3739:"e85f1e22",3754:"e5730daa",3777:"00db5ff6",3811:"b87be9f7",3817:"0ac0fdf2",3821:"8ebd00b3",3855:"aa3a20c7",3893:"a233493c",3940:"90b20859",3995:"0a7178ba",4e3:"da3fe362",4043:"db71722a",4089:"386f0b31",4110:"11279433",4113:"425454b2",4153:"a26eabef",4195:"1fca85b5",4207:"33274743",4248:"4db9db10",4254:"e73f509f",4311:"9dd34ae8",4330:"c8bcf0e9",4332:"bd92144f",4341:"b4799f15",4342:"fb11d2d7",4348:"fb32682f",4355:"f1a1b50f",4369:"08db1378",4412:"d7175999",4414:"b8b47cde",4426:"4972b2be",4506:"de54d8b4",4528:"9c154e7d",4552:"878f9ca3",4653:"a78095be",4680:"e903e453",4685:"63b04d65",4712:"b62589f8",4760:"2a09e827",4764:"6f966b4d",4780:"c38ccd3f",4791:"d8d4cf62",4797:"44de0b2b",4812:"17dc9bc9",4823:"7dc7e85f",4846:"a44dd682",4871:"64ed7717",4894:"b9635414",4904:"853def21",4916:"92af0688",4947:"e01bee39",4976:"64a9e6f8",4980:"9d1cdb96",4985:"a6a7d0c1",5008:"b7082753",5020:"c51d62ae",5062:"9fd8d62c",5073:"152f9145",5106:"7d8026ad",5133:"a67843e2",5143:"dd3b0b0b",5158:"fad6430d",5205:"b27f259d",5216:"d6307eda",5257:"1d00adea",5294:"94097d7d",5299:"0137749d",5352:"a5d072f3",5385:"ba41e531",5387:"ccbca60b",5471:"421400ec",5487:"9b2bf2c4",5489:"64fae0a1",5507:"fadb6d80",5530:"577acfeb",5538:"726ebdee",5575:"a6c95813",5598:"a60e4120",5609:"a0b3d3a7",5614:"8a7008fe",5638:"6bb84ce8",5733:"44c9efc5",5758:"9dcd1578",5763:"75da873c",5816:"797fb105",5820:"18fb8c9f",5832:"aeaf8d18",5897:"3dd248a1",5898:"814104d5",5932:"7fc3f565",5937:"2ba87cc7",5977:"4c1a77e0",6011:"2fcf18d9",6024:"431f043a",6048:"0ee42701",6094:"8b84f8e4",6109:"d8de5e11",6116:"0d691bbc",6126:"06cc8965",6134:"7aa71dae",6166:"6b8e3370",6195:"ad072cb0",6201:"71a85021",6207:"4388dd3a",6259:"1bac1ae2",6322:"ffbb3523",6323:"b4082142",6410:"a730e896",6438:"5c6e4ed9",6448:"ad5c6d0b",6460:"38a2d53f",6474:"adef73fd",6508:"ff1d0eca",6519:"938d9725",6532:"ca3d7793",6539:"5c5bd5cf",6558:"f2d55ac0",6693:"efab6214",6695:"cc8f30c6",6738:"f5c809fe",6763:"f9925b8a",6765:"e30bc76b",6775:"16e77756",6780:"d08d9b5a",6806:"567a55da",6826:"4ae79ca3",6851:"eb1832ba",6876:"dbe757f0",6877:"f5e8255f",6918:"5f896a17",6945:"b6af4340",6975:"3ddb65d9",6998:"448f1868",7002:"935674f9",7071:"779927b4",7094:"8464fdf6",7110:"334ee968",7232:"5bc2fa51",7260:"4519bd52",7278:"8b985cdf",7285:"d60f5768",7297:"48a15a49",7366:"6a4da65d",7385:"56ec0366",7410:"70f5f9c0",7414:"a0dcf947",7441:"1e21e9b0",7451:"32dc413f",7459:"a9fe3527",7494:"d78f5f5b",7542:"c3273573",7560:"d839a14a",7561:"b1973d42",7722:"682d74bb",7761:"7346d8a0",7764:"ccf02db5",7779:"161eb332",7801:"e09df369",7858:"c70e478d",7860:"7e09811d",7868:"e540d53f",7918:"458bbbb3",7920:"ba74bc81",7952:"02c78b43",7959:"72173f70",7961:"bf6e6ea0",7990:"4ee3dd67",8035:"21b380f5",8080:"55aa7524",8131:"4e8f0bdb",8152:"c3522dae",8162:"b7a6880d",8238:"0e329269",8268:"bc13c7e7",8269:"24a36d17",8301:"177b901c",8310:"f6cac05f",8387:"5fc2385e",8437:"adc739ef",8456:"0b49371d",8475:"5beb4116",8551:"7a9b0c65",8594:"e00ea97c",8622:"a3191b0b",8634:"ebfffeaa",8653:"280188ec",8667:"011adf68",8704:"c54de494",8730:"5591b5cb",8733:"802f001c",8770:"020d4f6c",8812:"f183bf12",8815:"2d6e9b3b",8878:"d00ed9a5",8882:"fb3fc13d",8922:"c8414748",8973:"440d6946",9005:"d2935c02",9021:"0eff7c20",9039:"5d840dd3",9045:"c7132ea8",9049:"dc3d1bbe",9065:"aaaa1509",9092:"d21e6bb2",9105:"b47b6bb2",9162:"a56d4aee",9175:"70e1c1d5",9232:"f15d8dbc",9238:"cb6f5f39",9285:"ad45c59a",9299:"89849c97",9319:"ed79db1c",9358:"253b5324",9365:"fbabb1cd",9374:"c726512f",9389:"cdfac751",9398:"0df15040",9427:"565277c4",9442:"c885e053",9463:"2f77dad7",9465:"3cd961a9",9468:"eadd6304",9489:"1f7d8410",9503:"9b92f537",9514:"7151db62",9548:"d215f987",9608:"5aca5539",9610:"02e52a64",9637:"8a8f9b40",9639:"a3c2f126",9658:"e6e9808c",9660:"06ac6c5e",9750:"dd12c110",9773:"8f5dc6be",9778:"979b171d",9782:"98b461df",9784:"1a5d7960",9824:"8982fa9a",9828:"75b5d884",9857:"f8844034",9895:"021e723d",9927:"1d3ffdf2",9944:"6701b52c",9990:"89966c51"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="docs-website:",r.l=(e,a,f,b)=>{if(c[e])c[e].push(a);else{var t,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(u.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=u.bind(null,t.onerror),t.onload=u.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.nmd=e=>(e.paths=[],e.children||(e.children=[]),e),r.p="/",r.gca=function(e){return e={17896441:"7918",26970788:"5977",28101003:"7385",36622342:"1199",36968183:"9778",81423764:"2393",85446872:"2307",94063305:"4506",94732196:"3580","935f2afb":"53",e433513e:"76","10f4cd9f":"107","3439e832":"114","2ae2f03c":"135","952fdaa7":"138",dd11e1f5:"157","9fb8e83b":"210","559cce6e":"237","8915fe4a":"241","1cdd3ad8":"261",b45b8f2a:"273",a9ee1b2e:"278","55300bfc":"336",bded5420:"342","74e37280":"368",ce9cc878:"511","239ede39":"543",cbb51090:"548","11c4014b":"560",a6b57299:"567","2714f677":"593",b5c1e141:"660",d6e92d94:"674",e0c68876:"677","94c4aaa1":"692","4d58b9df":"701","4deec257":"769","4c58bc23":"815",f60aa932:"835","764aa33e":"858","582f4e0f":"866","1c5daeca":"922","6802b5bb":"926",fb16e8d8:"944","4f3a601b":"987",a2001381:"1009",b05a1f44:"1036","2ae52255":"1131","69b79dad":"1150",f1ca4237:"1155",a0936084:"1366","1c9e2091":"1415","6684a94d":"1439","1645b7aa":"1619","2e552f37":"1655","7c50e773":"1667","7d2c9853":"1683","8581e84a":"1810","13f0f933":"1811",d49b7a82:"1816",c2702396:"1832",c006a637:"1854","97a71a4c":"1886","6ba4b6b7":"1959","558d392d":"1993","211eb021":"2005",b6d58f6e:"2013","46bc0ee8":"2026",b8cb6aeb:"2055","4673b9e3":"2125",f2df41f1:"2148","494ba276":"2206",a22be15c:"2225","62f996d0":"2237","0c71206d":"2277","5c3d3a81":"2367","0eeb3f77":"2383",e381afd8:"2387",e148fe41:"2416",b65e549a:"2504",f73df94e:"2519","51d2ccba":"2555",b2bffc66:"2635","9dad6126":"2651","236aade1":"2680","60330b81":"2692","222ef86b":"2710","40baa5b4":"2718","8770979a":"2734",e55a2f25:"2743",da41aefa:"2768","60d66e8d":"2825","96958afb":"2867","7dcfffe1":"2923",c9fe24a5:"2954","363813f6":"3001","418b62bc":"3042",dc48bf03:"3081",c306cfc9:"3121","8ac3eb33":"3181","7f63ac80":"3209","12b4c537":"3213","02628582":"3217","49f5f15f":"3360","6fd60ff2":"3382","01c4b4c9":"3467","899897e4":"3478","8f1359e8":"3484",c22b5a3c:"3495","3302977a":"3508","5aaa2ddf":"3522",af6aa3b0:"3526",da4f9ec6:"3571",f2adeaeb:"3589","510b1a82":"3613","5947ae86":"3670",ab60f020:"3699","232fb92c":"3705",bfdbc90c:"3713","3196103f":"3739","923f5d68":"3754","179e51a2":"3777","8bdb1750":"3811","46925a21":"3817",eeebdb74:"3821","4dbd669a":"3855",d576a5e7:"3893",b30c8566:"3940",c85391c5:"3995","6b5bcc88":"4000","14fc7887":"4043","93373db4":"4089",db6ac68b:"4110","8a488b1e":"4113","6509ccc3":"4153",c4f5d8e4:"4195","2380a642":"4207",d94a66d9:"4254",b83e28af:"4311","38f0a075":"4330","75ad91bf":"4332",e70edfe9:"4341","8b76922d":"4342","89d719b4":"4348","1fb350ae":"4355","3b80522b":"4369","1f0eff70":"4412",f9bdac24:"4414",ec69336c:"4426",db102036:"4528","3a5579a2":"4552","3ef0606f":"4653",a3320e26:"4680",fa1ce1af:"4685",f5887bb4:"4712",f9638421:"4760","8a8bafee":"4764","0b513daf":"4780","87d615cc":"4791","1d69dcd9":"4797",acd6ef9e:"4812","21925e60":"4823","938cf45b":"4846","12b199b6":"4871",be6c8af9:"4894","5beca119":"4904","6986f074":"4916",a0df199b:"4947",c7640dde:"4976",cbcac693:"4980",d53d345c:"5008","3feaa519":"5020",dc885198:"5062","1cf4be31":"5073",d05dbbf6:"5106","8a98b86a":"5133","454007ac":"5143","73fbfd94":"5158",d9f2f253:"5205","29c4cc6b":"5216","8c050434":"5257",a6d40679:"5294","802902e0":"5299","928fb163":"5352","7104efaa":"5385","2a9fc4bc":"5387","6e48c3ae":"5471","28c6036c":"5487",c8f875c6:"5489","4a667104":"5507","3a7072e6":"5530","2d15c56d":"5538","1f0aa512":"5575","231c53fe":"5598",f11c80e8:"5609",d016f05d:"5614","62f77ec1":"5638",ff66a4ca:"5733",aa26c06b:"5758","56695ef1":"5763",af1d3831:"5816","9ce8e978":"5820","71ebc0ad":"5832","75a7a1f5":"5897","3a0ef5a8":"5898",b4afab46:"5932",c772f1ac:"5937","177dd695":"6011",aaf4be8e:"6024","8f9f4159":"6048","82f24c6f":"6094",b6c912d1:"6109",d6b8ae16:"6116",b5bfed16:"6126","5610cb74":"6134",d56a6816:"6166",d2e3d9fd:"6195",b4582b6b:"6201","86411a05":"6207",ee75ff2b:"6259","11cc5d12":"6322",ff5df40d:"6323","1ceba1b9":"6410","80495dd3":"6438","822b9af5":"6448","2de0b602":"6460","1cedc99d":"6474","5c075523":"6508",f12e2eb7:"6519",fd80153f:"6532","5e0025e2":"6539","5c4b6040":"6558","3d3cad96":"6693",e99845c5:"6695",eef46cd0:"6738","4209a02a":"6763",e2e0505d:"6765","64bd86e9":"6775","27b2c5ed":"6806",ae66728e:"6826",ac913de2:"6851","5f16916e":"6876","298519cc":"6877","435f1c10":"6918","08c88421":"6975",f8101417:"6998",f50f8a57:"7002",e534df78:"7071",f11ecfaa:"7094",ed9908e8:"7110","6ba707f7":"7232","3a807525":"7260","1379f890":"7278",fe72252f:"7285","2ea701c6":"7297","1d30dbf3":"7366",af27ef25:"7410",dd89ea2c:"7414",b73dd225:"7441","0aad804f":"7451",fcf2a6b2:"7459","7aac2a72":"7494",a1eac508:"7542",f24f8511:"7560","79ac022f":"7561",fd64a384:"7722","64e745b0":"7761",a0c2adf2:"7764",bd40640f:"7779",def0badb:"7801",e5927596:"7858",e76f1787:"7860","2f9b3d0d":"7868","1a4e3797":"7920","1484d05f":"7952","68fadf16":"7959",fcd16a9e:"7961","203263d4":"7990",a47b6120:"8035","1043e548":"8080",fe630487:"8131","026cb5cf":"8152",bab9f900:"8162","7f68880b":"8238","8448429a":"8268",add68e33:"8269","27cf1d60":"8301","1dc0f641":"8310",ea403ec7:"8387","4b37dbd2":"8437","0c9f6f72":"8456",a33b3d6f:"8475",a5852e81:"8551",b5622745:"8594",da33929d:"8622",f6be1df8:"8653","6818b056":"8667",eca54536:"8704","4d5361ea":"8730",a5b282d6:"8733","5b70f945":"8770","54b5b2cf":"8812",bec788ea:"8815","19bf3bfc":"8878",f2959043:"8882","746d5cf3":"8922","40cbda52":"9005","01f389df":"9021","1e64ce86":"9039","1d976a14":"9045",c2dd8c1b:"9065","52dee01e":"9092","517fcd13":"9105","460cc3b6":"9162",a776d94a:"9175","9a5b80a5":"9232","1e62fda0":"9238",c8502d9d:"9285","84c9ba83":"9299","8e26439a":"9358","3fd8ad89":"9365",af123054:"9374","882a51ff":"9389","8d75b97d":"9398","998bddad":"9427","6e0b27a3":"9442","3db781c5":"9463","4d07c16c":"9465","4e880e71":"9468",af622fc3:"9489",da2c0eee:"9503","1be78505":"9514",fbc62c73:"9548","240588d5":"9608","4295d560":"9610","156ab36c":"9637","89c889e2":"9639",ccd4e028:"9658","3022e979":"9660","28d2dd17":"9750","4c68208b":"9773","6d9c169a":"9782",eff66394:"9784",ebd45411:"9824",cca92c7f:"9828","41ce53dc":"9857",d9c5d136:"9895","70be5e1d":"9927",ec330b1a:"9944","7f055fb6":"9990"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var c=r.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(f=>{if(r.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),b=f&&f.target&&f.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,c[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,b=f[0],t=f[1],o=f[2],n=0;if(b.some((a=>0!==e[a]))){for(c in t)r.o(t,c)&&(r.m[c]=t[c]);if(o)var i=o(r)}for(a&&a(f);n{"use strict";var e,a,f,c,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var f=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=b,e=[],r.O=(a,f,c,d)=>{if(!f){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,f({}),f([]),f(f)];for(var t=2&c&&e;"object"==typeof t&&!~a.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",76:"e433513e",107:"10f4cd9f",114:"3439e832",135:"2ae2f03c",138:"952fdaa7",157:"dd11e1f5",210:"9fb8e83b",237:"559cce6e",241:"8915fe4a",261:"1cdd3ad8",273:"b45b8f2a",278:"a9ee1b2e",336:"55300bfc",342:"bded5420",368:"74e37280",511:"ce9cc878",543:"239ede39",548:"cbb51090",560:"11c4014b",567:"a6b57299",593:"2714f677",660:"b5c1e141",674:"d6e92d94",677:"e0c68876",692:"94c4aaa1",701:"4d58b9df",769:"4deec257",815:"4c58bc23",835:"f60aa932",858:"764aa33e",866:"582f4e0f",922:"1c5daeca",926:"6802b5bb",944:"fb16e8d8",987:"4f3a601b",1009:"a2001381",1036:"b05a1f44",1131:"2ae52255",1150:"69b79dad",1155:"f1ca4237",1199:"36622342",1366:"a0936084",1415:"1c9e2091",1439:"6684a94d",1619:"1645b7aa",1655:"2e552f37",1667:"7c50e773",1683:"7d2c9853",1810:"8581e84a",1811:"13f0f933",1816:"d49b7a82",1832:"c2702396",1854:"c006a637",1886:"97a71a4c",1959:"6ba4b6b7",1993:"558d392d",2005:"211eb021",2013:"b6d58f6e",2026:"46bc0ee8",2055:"b8cb6aeb",2125:"4673b9e3",2148:"f2df41f1",2206:"494ba276",2225:"a22be15c",2237:"62f996d0",2277:"0c71206d",2307:"85446872",2367:"5c3d3a81",2383:"0eeb3f77",2387:"e381afd8",2393:"81423764",2416:"e148fe41",2504:"b65e549a",2519:"f73df94e",2555:"51d2ccba",2635:"b2bffc66",2651:"9dad6126",2680:"236aade1",2692:"60330b81",2710:"222ef86b",2718:"40baa5b4",2734:"8770979a",2743:"e55a2f25",2768:"da41aefa",2825:"60d66e8d",2867:"96958afb",2923:"7dcfffe1",2954:"c9fe24a5",3001:"363813f6",3042:"418b62bc",3081:"dc48bf03",3121:"c306cfc9",3181:"8ac3eb33",3209:"7f63ac80",3213:"12b4c537",3217:"02628582",3360:"49f5f15f",3382:"6fd60ff2",3467:"01c4b4c9",3478:"899897e4",3484:"8f1359e8",3495:"c22b5a3c",3508:"3302977a",3522:"5aaa2ddf",3526:"af6aa3b0",3571:"da4f9ec6",3580:"94732196",3589:"f2adeaeb",3613:"510b1a82",3670:"5947ae86",3699:"ab60f020",3705:"232fb92c",3713:"bfdbc90c",3739:"3196103f",3754:"923f5d68",3777:"179e51a2",3811:"8bdb1750",3817:"46925a21",3821:"eeebdb74",3855:"4dbd669a",3893:"d576a5e7",3940:"b30c8566",3995:"c85391c5",4e3:"6b5bcc88",4043:"14fc7887",4089:"93373db4",4110:"db6ac68b",4113:"8a488b1e",4153:"6509ccc3",4195:"c4f5d8e4",4207:"2380a642",4254:"d94a66d9",4311:"b83e28af",4330:"38f0a075",4332:"75ad91bf",4341:"e70edfe9",4342:"8b76922d",4348:"89d719b4",4355:"1fb350ae",4369:"3b80522b",4412:"1f0eff70",4414:"f9bdac24",4426:"ec69336c",4506:"94063305",4528:"db102036",4552:"3a5579a2",4653:"3ef0606f",4680:"a3320e26",4685:"fa1ce1af",4712:"f5887bb4",4760:"f9638421",4764:"8a8bafee",4780:"0b513daf",4791:"87d615cc",4797:"1d69dcd9",4812:"acd6ef9e",4823:"21925e60",4846:"938cf45b",4871:"12b199b6",4894:"be6c8af9",4904:"5beca119",4916:"6986f074",4947:"a0df199b",4976:"c7640dde",4980:"cbcac693",5008:"d53d345c",5020:"3feaa519",5062:"dc885198",5073:"1cf4be31",5106:"d05dbbf6",5133:"8a98b86a",5143:"454007ac",5158:"73fbfd94",5205:"d9f2f253",5216:"29c4cc6b",5257:"8c050434",5294:"a6d40679",5299:"802902e0",5352:"928fb163",5385:"7104efaa",5387:"2a9fc4bc",5471:"6e48c3ae",5487:"28c6036c",5489:"c8f875c6",5507:"4a667104",5530:"3a7072e6",5538:"2d15c56d",5575:"1f0aa512",5598:"231c53fe",5609:"f11c80e8",5614:"d016f05d",5638:"62f77ec1",5733:"ff66a4ca",5758:"aa26c06b",5763:"56695ef1",5816:"af1d3831",5820:"9ce8e978",5832:"71ebc0ad",5897:"75a7a1f5",5898:"3a0ef5a8",5932:"b4afab46",5937:"c772f1ac",5977:"26970788",6011:"177dd695",6024:"aaf4be8e",6048:"8f9f4159",6094:"82f24c6f",6109:"b6c912d1",6116:"d6b8ae16",6126:"b5bfed16",6134:"5610cb74",6166:"d56a6816",6195:"d2e3d9fd",6201:"b4582b6b",6207:"86411a05",6259:"ee75ff2b",6322:"11cc5d12",6323:"ff5df40d",6410:"1ceba1b9",6438:"80495dd3",6448:"822b9af5",6460:"2de0b602",6474:"1cedc99d",6508:"5c075523",6519:"f12e2eb7",6532:"fd80153f",6539:"5e0025e2",6558:"5c4b6040",6693:"3d3cad96",6695:"e99845c5",6738:"eef46cd0",6763:"4209a02a",6765:"e2e0505d",6775:"64bd86e9",6806:"27b2c5ed",6826:"ae66728e",6851:"ac913de2",6876:"5f16916e",6877:"298519cc",6918:"435f1c10",6975:"08c88421",6998:"f8101417",7002:"f50f8a57",7071:"e534df78",7094:"f11ecfaa",7110:"ed9908e8",7232:"6ba707f7",7260:"3a807525",7278:"1379f890",7285:"fe72252f",7297:"2ea701c6",7366:"1d30dbf3",7385:"28101003",7410:"af27ef25",7414:"dd89ea2c",7441:"b73dd225",7451:"0aad804f",7459:"fcf2a6b2",7494:"7aac2a72",7542:"a1eac508",7560:"f24f8511",7561:"79ac022f",7722:"fd64a384",7761:"64e745b0",7764:"a0c2adf2",7779:"bd40640f",7801:"def0badb",7858:"e5927596",7860:"e76f1787",7868:"2f9b3d0d",7918:"17896441",7920:"1a4e3797",7952:"1484d05f",7959:"68fadf16",7961:"fcd16a9e",7990:"203263d4",8035:"a47b6120",8080:"1043e548",8131:"fe630487",8152:"026cb5cf",8162:"bab9f900",8238:"7f68880b",8268:"8448429a",8269:"add68e33",8301:"27cf1d60",8310:"1dc0f641",8387:"ea403ec7",8437:"4b37dbd2",8456:"0c9f6f72",8475:"a33b3d6f",8551:"a5852e81",8594:"b5622745",8622:"da33929d",8653:"f6be1df8",8667:"6818b056",8704:"eca54536",8730:"4d5361ea",8733:"a5b282d6",8770:"5b70f945",8812:"54b5b2cf",8815:"bec788ea",8878:"19bf3bfc",8882:"f2959043",8922:"746d5cf3",9005:"40cbda52",9021:"01f389df",9039:"1e64ce86",9045:"1d976a14",9065:"c2dd8c1b",9092:"52dee01e",9105:"517fcd13",9162:"460cc3b6",9175:"a776d94a",9232:"9a5b80a5",9238:"1e62fda0",9285:"c8502d9d",9299:"84c9ba83",9358:"8e26439a",9365:"3fd8ad89",9374:"af123054",9389:"882a51ff",9398:"8d75b97d",9427:"998bddad",9442:"6e0b27a3",9463:"3db781c5",9465:"4d07c16c",9468:"4e880e71",9489:"af622fc3",9503:"da2c0eee",9514:"1be78505",9548:"fbc62c73",9608:"240588d5",9610:"4295d560",9637:"156ab36c",9639:"89c889e2",9658:"ccd4e028",9660:"3022e979",9750:"28d2dd17",9773:"4c68208b",9778:"36968183",9782:"6d9c169a",9784:"eff66394",9824:"ebd45411",9828:"cca92c7f",9857:"41ce53dc",9895:"d9c5d136",9927:"70be5e1d",9944:"ec330b1a",9990:"7f055fb6"}[e]||e)+"."+{53:"cf1e5df2",76:"848c7f3d",107:"ec5258e0",114:"e589d077",135:"9d9e083e",138:"2679138b",157:"7e3352ef",210:"82b7302e",237:"0764a6b5",241:"eb9f3812",261:"de38abed",273:"8c3e5ab8",278:"32f72046",336:"df6a3eb8",342:"4f8f1e5a",368:"7dc3d4b0",511:"a53bd11c",543:"f0e4e207",548:"501f38dc",560:"0437c69b",567:"b6ba7395",593:"f6823172",660:"c4f521af",674:"f3f6fa67",677:"02880b05",692:"7f2fe157",701:"da938499",769:"a1c54629",815:"519c18a6",835:"1c97513a",858:"9ff799b0",866:"7f07bebc",922:"49a281e0",926:"06384287",944:"e3fbd497",987:"b5396c85",1009:"c62e7001",1036:"14c51555",1131:"2c77fc9c",1150:"12128e4b",1155:"c8c4dbd1",1199:"06fb5e5a",1366:"229bc754",1415:"6d24ebd9",1439:"2b5fd234",1619:"c4a38bed",1655:"00870b9e",1667:"a6406949",1683:"a01f34bf",1810:"ab50d0da",1811:"90e67483",1816:"fb132cc9",1832:"0371ad69",1854:"7f2a3492",1886:"193a9bd0",1959:"a8d95d6b",1993:"ce98bf9c",2005:"5d9ea19d",2013:"67da6bc2",2026:"9948c27e",2055:"71187268",2125:"00f64c1e",2148:"c8a71bdc",2153:"2a8e0431",2206:"c5cd9441",2225:"62bc06cd",2237:"c46896c2",2277:"8846d48e",2307:"f6d39a4d",2367:"1fba97df",2383:"8c33ecdb",2387:"a70ff05c",2393:"6a3889ea",2416:"90b5c0af",2504:"69b8db9b",2519:"3a0b3c24",2555:"b99cc9e7",2635:"a366571f",2651:"648f3bac",2680:"ac3107d7",2692:"d5c3f23c",2710:"0ddcd26e",2718:"4229fdb3",2734:"f101106e",2743:"f11cb783",2768:"6d5c8eef",2825:"ec75f8e9",2867:"2acb1829",2923:"35d3e2ac",2954:"c02eec7a",3001:"b01126cd",3042:"c6cf919c",3081:"184224fa",3121:"289990a7",3181:"03714675",3209:"9615f9b4",3213:"7df039ce",3217:"42362c79",3360:"c3b031ff",3382:"d22ed53d",3467:"2a74120a",3478:"77e95ca2",3484:"89ef8df4",3495:"0ab98f4a",3508:"abe7c534",3522:"37c441dd",3526:"98a64538",3571:"42d3ad23",3580:"1efe118f",3589:"567049c3",3613:"7b83b675",3670:"a11c4f23",3699:"f4804e9c",3705:"ecff8e68",3713:"4b1b7a68",3739:"e85f1e22",3754:"e5730daa",3777:"00db5ff6",3811:"b87be9f7",3817:"0ac0fdf2",3821:"8ebd00b3",3855:"aa3a20c7",3893:"a233493c",3940:"90b20859",3995:"0a7178ba",4e3:"da3fe362",4043:"db71722a",4089:"386f0b31",4110:"11279433",4113:"425454b2",4153:"a26eabef",4195:"1fca85b5",4207:"33274743",4248:"4db9db10",4254:"e73f509f",4311:"9dd34ae8",4330:"c8bcf0e9",4332:"bd92144f",4341:"b4799f15",4342:"fb11d2d7",4348:"fb32682f",4355:"f1a1b50f",4369:"08db1378",4412:"d7175999",4414:"b8b47cde",4426:"4972b2be",4506:"de54d8b4",4528:"9c154e7d",4552:"878f9ca3",4653:"a78095be",4680:"e903e453",4685:"63b04d65",4712:"b62589f8",4760:"2a09e827",4764:"6f966b4d",4780:"c38ccd3f",4791:"d8d4cf62",4797:"44de0b2b",4812:"17dc9bc9",4823:"7dc7e85f",4846:"a44dd682",4871:"64ed7717",4894:"b9635414",4904:"853def21",4916:"92af0688",4947:"e01bee39",4976:"64a9e6f8",4980:"9d1cdb96",4985:"a6a7d0c1",5008:"b7082753",5020:"c51d62ae",5062:"9fd8d62c",5073:"152f9145",5106:"7d8026ad",5133:"a67843e2",5143:"dd3b0b0b",5158:"fad6430d",5205:"b27f259d",5216:"d6307eda",5257:"1d00adea",5294:"94097d7d",5299:"0137749d",5352:"a5d072f3",5385:"ba41e531",5387:"ccbca60b",5471:"421400ec",5487:"9b2bf2c4",5489:"64fae0a1",5507:"fadb6d80",5530:"577acfeb",5538:"726ebdee",5575:"a6c95813",5598:"a60e4120",5609:"a0b3d3a7",5614:"8a7008fe",5638:"6bb84ce8",5733:"44c9efc5",5758:"9dcd1578",5763:"75da873c",5816:"797fb105",5820:"18fb8c9f",5832:"aeaf8d18",5897:"3dd248a1",5898:"814104d5",5932:"7fc3f565",5937:"2ba87cc7",5977:"4c1a77e0",6011:"2fcf18d9",6024:"431f043a",6048:"0ee42701",6094:"8b84f8e4",6109:"d8de5e11",6116:"0d691bbc",6126:"06cc8965",6134:"7aa71dae",6166:"6b8e3370",6195:"ad072cb0",6201:"71a85021",6207:"4388dd3a",6259:"1bac1ae2",6322:"ffbb3523",6323:"b4082142",6410:"a730e896",6438:"5c6e4ed9",6448:"ad5c6d0b",6460:"38a2d53f",6474:"adef73fd",6508:"ff1d0eca",6519:"938d9725",6532:"ca3d7793",6539:"5c5bd5cf",6558:"f2d55ac0",6693:"efab6214",6695:"cc8f30c6",6738:"f5c809fe",6763:"f9925b8a",6765:"e30bc76b",6775:"16e77756",6780:"d08d9b5a",6806:"3a92e29b",6826:"4ae79ca3",6851:"eb1832ba",6876:"dbe757f0",6877:"f5e8255f",6918:"5f896a17",6945:"b6af4340",6975:"3ddb65d9",6998:"448f1868",7002:"935674f9",7071:"779927b4",7094:"8464fdf6",7110:"334ee968",7232:"5bc2fa51",7260:"4519bd52",7278:"8b985cdf",7285:"d60f5768",7297:"48a15a49",7366:"6a4da65d",7385:"56ec0366",7410:"70f5f9c0",7414:"a0dcf947",7441:"1e21e9b0",7451:"32dc413f",7459:"a9fe3527",7494:"d78f5f5b",7542:"c3273573",7560:"d839a14a",7561:"b1973d42",7722:"682d74bb",7761:"7346d8a0",7764:"ccf02db5",7779:"161eb332",7801:"e09df369",7858:"c70e478d",7860:"7e09811d",7868:"e540d53f",7918:"458bbbb3",7920:"ba74bc81",7952:"02c78b43",7959:"72173f70",7961:"bf6e6ea0",7990:"4ee3dd67",8035:"21b380f5",8080:"dc92a5b9",8131:"4e8f0bdb",8152:"c3522dae",8162:"b7a6880d",8238:"0e329269",8268:"bc13c7e7",8269:"24a36d17",8301:"177b901c",8310:"f6cac05f",8387:"5fc2385e",8437:"adc739ef",8456:"0b49371d",8475:"5beb4116",8551:"7a9b0c65",8594:"e00ea97c",8622:"a3191b0b",8634:"ebfffeaa",8653:"280188ec",8667:"011adf68",8704:"c54de494",8730:"5591b5cb",8733:"802f001c",8770:"020d4f6c",8812:"f183bf12",8815:"2d6e9b3b",8878:"d00ed9a5",8882:"fb3fc13d",8922:"c8414748",8973:"440d6946",9005:"d2935c02",9021:"0eff7c20",9039:"5d840dd3",9045:"c7132ea8",9049:"dc3d1bbe",9065:"aaaa1509",9092:"d21e6bb2",9105:"b47b6bb2",9162:"a56d4aee",9175:"70e1c1d5",9232:"f15d8dbc",9238:"cb6f5f39",9285:"ad45c59a",9299:"89849c97",9319:"ed79db1c",9358:"253b5324",9365:"fbabb1cd",9374:"c726512f",9389:"cdfac751",9398:"0df15040",9427:"565277c4",9442:"c885e053",9463:"2f77dad7",9465:"3cd961a9",9468:"eadd6304",9489:"1f7d8410",9503:"9b92f537",9514:"7151db62",9548:"d215f987",9608:"5aca5539",9610:"02e52a64",9637:"8a8f9b40",9639:"a3c2f126",9658:"e6e9808c",9660:"06ac6c5e",9750:"dd12c110",9773:"8f5dc6be",9778:"979b171d",9782:"98b461df",9784:"1a5d7960",9824:"8982fa9a",9828:"75b5d884",9857:"f8844034",9895:"021e723d",9927:"1d3ffdf2",9944:"6701b52c",9990:"89966c51"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="docs-website:",r.l=(e,a,f,b)=>{if(c[e])c[e].push(a);else{var t,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(u.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=u.bind(null,t.onerror),t.onload=u.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.nmd=e=>(e.paths=[],e.children||(e.children=[]),e),r.p="/",r.gca=function(e){return e={17896441:"7918",26970788:"5977",28101003:"7385",36622342:"1199",36968183:"9778",81423764:"2393",85446872:"2307",94063305:"4506",94732196:"3580","935f2afb":"53",e433513e:"76","10f4cd9f":"107","3439e832":"114","2ae2f03c":"135","952fdaa7":"138",dd11e1f5:"157","9fb8e83b":"210","559cce6e":"237","8915fe4a":"241","1cdd3ad8":"261",b45b8f2a:"273",a9ee1b2e:"278","55300bfc":"336",bded5420:"342","74e37280":"368",ce9cc878:"511","239ede39":"543",cbb51090:"548","11c4014b":"560",a6b57299:"567","2714f677":"593",b5c1e141:"660",d6e92d94:"674",e0c68876:"677","94c4aaa1":"692","4d58b9df":"701","4deec257":"769","4c58bc23":"815",f60aa932:"835","764aa33e":"858","582f4e0f":"866","1c5daeca":"922","6802b5bb":"926",fb16e8d8:"944","4f3a601b":"987",a2001381:"1009",b05a1f44:"1036","2ae52255":"1131","69b79dad":"1150",f1ca4237:"1155",a0936084:"1366","1c9e2091":"1415","6684a94d":"1439","1645b7aa":"1619","2e552f37":"1655","7c50e773":"1667","7d2c9853":"1683","8581e84a":"1810","13f0f933":"1811",d49b7a82:"1816",c2702396:"1832",c006a637:"1854","97a71a4c":"1886","6ba4b6b7":"1959","558d392d":"1993","211eb021":"2005",b6d58f6e:"2013","46bc0ee8":"2026",b8cb6aeb:"2055","4673b9e3":"2125",f2df41f1:"2148","494ba276":"2206",a22be15c:"2225","62f996d0":"2237","0c71206d":"2277","5c3d3a81":"2367","0eeb3f77":"2383",e381afd8:"2387",e148fe41:"2416",b65e549a:"2504",f73df94e:"2519","51d2ccba":"2555",b2bffc66:"2635","9dad6126":"2651","236aade1":"2680","60330b81":"2692","222ef86b":"2710","40baa5b4":"2718","8770979a":"2734",e55a2f25:"2743",da41aefa:"2768","60d66e8d":"2825","96958afb":"2867","7dcfffe1":"2923",c9fe24a5:"2954","363813f6":"3001","418b62bc":"3042",dc48bf03:"3081",c306cfc9:"3121","8ac3eb33":"3181","7f63ac80":"3209","12b4c537":"3213","02628582":"3217","49f5f15f":"3360","6fd60ff2":"3382","01c4b4c9":"3467","899897e4":"3478","8f1359e8":"3484",c22b5a3c:"3495","3302977a":"3508","5aaa2ddf":"3522",af6aa3b0:"3526",da4f9ec6:"3571",f2adeaeb:"3589","510b1a82":"3613","5947ae86":"3670",ab60f020:"3699","232fb92c":"3705",bfdbc90c:"3713","3196103f":"3739","923f5d68":"3754","179e51a2":"3777","8bdb1750":"3811","46925a21":"3817",eeebdb74:"3821","4dbd669a":"3855",d576a5e7:"3893",b30c8566:"3940",c85391c5:"3995","6b5bcc88":"4000","14fc7887":"4043","93373db4":"4089",db6ac68b:"4110","8a488b1e":"4113","6509ccc3":"4153",c4f5d8e4:"4195","2380a642":"4207",d94a66d9:"4254",b83e28af:"4311","38f0a075":"4330","75ad91bf":"4332",e70edfe9:"4341","8b76922d":"4342","89d719b4":"4348","1fb350ae":"4355","3b80522b":"4369","1f0eff70":"4412",f9bdac24:"4414",ec69336c:"4426",db102036:"4528","3a5579a2":"4552","3ef0606f":"4653",a3320e26:"4680",fa1ce1af:"4685",f5887bb4:"4712",f9638421:"4760","8a8bafee":"4764","0b513daf":"4780","87d615cc":"4791","1d69dcd9":"4797",acd6ef9e:"4812","21925e60":"4823","938cf45b":"4846","12b199b6":"4871",be6c8af9:"4894","5beca119":"4904","6986f074":"4916",a0df199b:"4947",c7640dde:"4976",cbcac693:"4980",d53d345c:"5008","3feaa519":"5020",dc885198:"5062","1cf4be31":"5073",d05dbbf6:"5106","8a98b86a":"5133","454007ac":"5143","73fbfd94":"5158",d9f2f253:"5205","29c4cc6b":"5216","8c050434":"5257",a6d40679:"5294","802902e0":"5299","928fb163":"5352","7104efaa":"5385","2a9fc4bc":"5387","6e48c3ae":"5471","28c6036c":"5487",c8f875c6:"5489","4a667104":"5507","3a7072e6":"5530","2d15c56d":"5538","1f0aa512":"5575","231c53fe":"5598",f11c80e8:"5609",d016f05d:"5614","62f77ec1":"5638",ff66a4ca:"5733",aa26c06b:"5758","56695ef1":"5763",af1d3831:"5816","9ce8e978":"5820","71ebc0ad":"5832","75a7a1f5":"5897","3a0ef5a8":"5898",b4afab46:"5932",c772f1ac:"5937","177dd695":"6011",aaf4be8e:"6024","8f9f4159":"6048","82f24c6f":"6094",b6c912d1:"6109",d6b8ae16:"6116",b5bfed16:"6126","5610cb74":"6134",d56a6816:"6166",d2e3d9fd:"6195",b4582b6b:"6201","86411a05":"6207",ee75ff2b:"6259","11cc5d12":"6322",ff5df40d:"6323","1ceba1b9":"6410","80495dd3":"6438","822b9af5":"6448","2de0b602":"6460","1cedc99d":"6474","5c075523":"6508",f12e2eb7:"6519",fd80153f:"6532","5e0025e2":"6539","5c4b6040":"6558","3d3cad96":"6693",e99845c5:"6695",eef46cd0:"6738","4209a02a":"6763",e2e0505d:"6765","64bd86e9":"6775","27b2c5ed":"6806",ae66728e:"6826",ac913de2:"6851","5f16916e":"6876","298519cc":"6877","435f1c10":"6918","08c88421":"6975",f8101417:"6998",f50f8a57:"7002",e534df78:"7071",f11ecfaa:"7094",ed9908e8:"7110","6ba707f7":"7232","3a807525":"7260","1379f890":"7278",fe72252f:"7285","2ea701c6":"7297","1d30dbf3":"7366",af27ef25:"7410",dd89ea2c:"7414",b73dd225:"7441","0aad804f":"7451",fcf2a6b2:"7459","7aac2a72":"7494",a1eac508:"7542",f24f8511:"7560","79ac022f":"7561",fd64a384:"7722","64e745b0":"7761",a0c2adf2:"7764",bd40640f:"7779",def0badb:"7801",e5927596:"7858",e76f1787:"7860","2f9b3d0d":"7868","1a4e3797":"7920","1484d05f":"7952","68fadf16":"7959",fcd16a9e:"7961","203263d4":"7990",a47b6120:"8035","1043e548":"8080",fe630487:"8131","026cb5cf":"8152",bab9f900:"8162","7f68880b":"8238","8448429a":"8268",add68e33:"8269","27cf1d60":"8301","1dc0f641":"8310",ea403ec7:"8387","4b37dbd2":"8437","0c9f6f72":"8456",a33b3d6f:"8475",a5852e81:"8551",b5622745:"8594",da33929d:"8622",f6be1df8:"8653","6818b056":"8667",eca54536:"8704","4d5361ea":"8730",a5b282d6:"8733","5b70f945":"8770","54b5b2cf":"8812",bec788ea:"8815","19bf3bfc":"8878",f2959043:"8882","746d5cf3":"8922","40cbda52":"9005","01f389df":"9021","1e64ce86":"9039","1d976a14":"9045",c2dd8c1b:"9065","52dee01e":"9092","517fcd13":"9105","460cc3b6":"9162",a776d94a:"9175","9a5b80a5":"9232","1e62fda0":"9238",c8502d9d:"9285","84c9ba83":"9299","8e26439a":"9358","3fd8ad89":"9365",af123054:"9374","882a51ff":"9389","8d75b97d":"9398","998bddad":"9427","6e0b27a3":"9442","3db781c5":"9463","4d07c16c":"9465","4e880e71":"9468",af622fc3:"9489",da2c0eee:"9503","1be78505":"9514",fbc62c73:"9548","240588d5":"9608","4295d560":"9610","156ab36c":"9637","89c889e2":"9639",ccd4e028:"9658","3022e979":"9660","28d2dd17":"9750","4c68208b":"9773","6d9c169a":"9782",eff66394:"9784",ebd45411:"9824",cca92c7f:"9828","41ce53dc":"9857",d9c5d136:"9895","70be5e1d":"9927",ec330b1a:"9944","7f055fb6":"9990"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var c=r.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(f=>{if(r.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),b=f&&f.target&&f.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,c[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,b=f[0],t=f[1],o=f[2],n=0;if(b.some((a=>0!==e[a]))){for(c in t)r.o(t,c)&&(r.m[c]=t[c]);if(o)var i=o(r)}for(a&&a(f);n - +

About DataHub Lineage Impact Analysis

Feature Availability
Self-Hosted DataHub
Managed DataHub

Lineage Impact Analysis is a powerful workflow for understanding the complete set of upstream and downstream dependencies of a Dataset, Dashboard, Chart, and many other DataHub Entities.

This allows Data Practitioners to proactively identify the impact of breaking schema changes or failed data pipelines on downstream dependencies, rapidly discover which upstream dependencies may have caused unexpected data quality issues, and more.

Lineage Impact Analysis is available via the DataHub UI and GraphQL endpoints, supporting manual and automated workflows.

Lineage Impact Analysis Setup, Prerequisites, and Permissions

Lineage Impact Analysis is enabled for any Entity that has associated Lineage relationships with other Entities and does not require any additional configuration.

Any DataHub user with “View Entity Page” permissions is able to view the full set of upstream or downstream Entities and export results to CSV from the DataHub UI.

Using Lineage Impact Analysis

Follow these simple steps to understand the full dependency chain of your data entities.

  1. On a given Entity Page, select the Lineage tab

  1. Easily toggle between Upstream and Downstream dependencies

  1. Choose the Degree of Dependencies you are interested in. The default filter is “1 Degree of Dependency” to minimize processor-intensive queries.

  1. Slice and dice the result list by Entity Type, Platfrom, Owner, and more to isolate the relevant dependencies

  1. Export the full list of dependencies to CSV

  1. View the filtered set of dependencies via CSV, with details about assigned ownership, domain, tags, terms, and quick links back to those entities within DataHub

Additional Resources

Videos

DataHub 201: Impact Analysis

GraphQL

Looking for an example of how to use searchAcrossLineage to read lineage? Look here

DataHub Blog

FAQ and Troubleshooting

The Lineage Tab is greyed out - why can’t I click on it?

This means you have not yet ingested Lineage metadata for that entity. Please see the Lineage Guide to get started.

Why is my list of exported dependencies incomplete?

We currently limit the list of dependencies to 10,000 records; we suggest applying filters to narrow the result set if you hit that limit.

Need more help? Join the conversation in Slack!

- + \ No newline at end of file diff --git a/docs/act-on-metadata/index.html b/docs/act-on-metadata/index.html index 631de77388a19..f0e9ecba13d6e 100644 --- a/docs/act-on-metadata/index.html +++ b/docs/act-on-metadata/index.html @@ -8,13 +8,13 @@ - +

Act on Metadata Overview

DataHub's metadata infrastructure is stream-oriented, meaning that all changes in metadata are communicated and reflected within the platform within seconds.

This unlocks endless opportunities to automate data governance and data management workflows, such as:

  • Automatically enrich or annotate existing data entities within DataHub, i.e., apply Tags, Terms, Owners, etc.
  • Leverage the Actions Framework to trigger external workflows or send alerts to external systems, i.e., send a message to a team channel when there's a schema change
  • Proactively identify what business-critical data resources will be impacted by a breaking schema change

This section contains resources to help you take real-time action on your rapidly evolving data stack.

- + \ No newline at end of file diff --git a/docs/actions/actions/executor/index.html b/docs/actions/actions/executor/index.html index d675e26a803b7..3d188b8b6fa5d 100644 --- a/docs/actions/actions/executor/index.html +++ b/docs/actions/actions/executor/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ network connectivity to any source systems that are required for ingestion.

For example, if the ingestion recipe is pulling from an internal DBMS, the actions container must be able to resolve & connect to that DBMS system for the ingestion command to run successfully.

Install the Plugin(s)

Run the following commands to install the relevant action plugin(s):

pip install 'acryl-datahub-actions[executor]'

Configure the Action Config

Use the following config(s) to get started with this Action.

name: "pipeline-name"
source:
# source configs
action:
type: "executor"
# Requires DataHub API configurations to report to DataHub
datahub:
server: "http://${DATAHUB_GMS_HOST:-localhost}:${DATAHUB_GMS_PORT:-8080}"
# token: <token> # Must have "Manage Secrets" privilege
View All Configuration Options
| Field | Required | Default | Description | | --- | :-: | :-: | --- | | `executor_id` | ❌ | `default` | An executor ID assigned to the executor. This can be used to manage multiple distinct executors. |

Troubleshooting

Quitting the Actions Framework

Currently, when you quit the Actions framework, any in-flight ingestion processing will continue to execute as a subprocess on your system. This means that there may be "orphaned" processes which are never marked as "Succeeded" or "Failed" in the UI, even though they may have completed.

To address this, simply "Cancel" the ingestion source on the UI once you've restarted the Ingestion Executor action.

- + \ No newline at end of file diff --git a/docs/actions/actions/hello_world/index.html b/docs/actions/actions/hello_world/index.html index 75899e40cd29d..c8458dbeb4901 100644 --- a/docs/actions/actions/hello_world/index.html +++ b/docs/actions/actions/hello_world/index.html @@ -8,13 +8,13 @@ - +

Hello World

Certified

Overview

This Action is an example action which simply prints all Events it receives as JSON.

Capabilities

  • Printing events that are received by the Action to the console.

Supported Events

All event types, including

  • EntityChangeEvent_v1
  • MetadataChangeLog_v1

Action Quickstart

Prerequisites

No prerequisites. This action comes pre-loaded with acryl-datahub-actions.

Install the Plugin(s)

This action comes with the Actions Framework by default:

pip install 'acryl-datahub-actions'

Configure the Action Config

Use the following config(s) to get started with this Action.

name: "pipeline-name"
source:
# source configs
action:
type: "hello_world"
View All Configuration Options
| Field | Required | Default | Description | | --- | :-: | :-: | --- | | `to_upper` | ❌| `False` | Whether to print events in upper case. |

Troubleshooting

N/A

- + \ No newline at end of file diff --git a/docs/actions/actions/slack/index.html b/docs/actions/actions/slack/index.html index 1c31beb2d3bbe..d260ba4707652 100644 --- a/docs/actions/actions/slack/index.html +++ b/docs/actions/actions/slack/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ Slack Action Demo

Supported Events

  • EntityChangeEvent_v1
  • Currently, the MetadataChangeLog_v1 event is not processed by the Action.

Action Quickstart

Prerequisites

Ensure that you have configured the Slack App in your Slack workspace.

Install the DataHub Slack App into your Slack workspace

The following steps should be performed by a Slack Workspace Admin.

  • Navigate to https://api.slack.com/apps/
  • Click Create New App
  • Use “From an app manifest” option
  • Select your workspace
  • Paste this Manifest in YAML. We suggest changing the name and display_name to be DataHub App YOUR_TEAM_NAME but this is not required. This name will show up in your Slack workspace.
display_information:
name: DataHub App
description: An app to integrate DataHub with Slack
background_color: "#000000"
features:
bot_user:
display_name: DataHub App
always_online: false
oauth_config:
scopes:
bot:
- channels:history
- channels:read
- chat:write
- commands
- groups:read
- im:read
- mpim:read
- team:read
- users:read
- users:read.email
settings:
org_deploy_enabled: false
socket_mode_enabled: false
token_rotation_enabled: false
  • Confirm you see the Basic Information Tab

  • Click Install to Workspace
  • It will show you permissions the Slack App is asking for, what they mean and a default channel in which you want to add the slack app
    • Note that the Slack App will only be able to post in channels that the app has been added to. This is made clear by slack’s Authentication screen also.
  • Select the channel you'd like notifications to go to and click Allow
  • Go to the DataHub App page

Getting Credentials and Configuration

Now that you've created your app and installed it in your workspace, you need a few pieces of information before you can activate your Slack action.

1. The Signing Secret

On your app's Basic Information page, you will see a App Credentials area. Take note of the Signing Secret information, you will need it later.

2. The Bot Token

Navigate to the OAuth & Permissions Tab

Here you'll find a “Bot User OAuth Token” which DataHub will need to communicate with your Slack workspace through the bot.

3. The Slack Channel

Finally, you need to figure out which Slack channel you will send notifications to. Perhaps it should be called #datahub-notifications or maybe, #data-notifications or maybe you already have a channel where important notifications about datasets and pipelines are already being routed to. Once you have decided what channel to send notifications to, make sure to add the app to the channel.

Next, figure out the channel id for this Slack channel. You can find it in the About section for the channel if you scroll to the very bottom of the app.

Alternately, if you are on the browser, you can figure it out from the URL. e.g. for the troubleshoot channel in OSS DataHub slack

  • Notice TUMKD5EGJ/C029A3M079U in the URL
    • Channel ID = C029A3M079U from above

In the next steps, we'll show you how to configure the Slack Action based on the credentials and configuration values that you have collected.

Installation Instructions (Deployment specific)

Managed DataHub

Head over to the Configuring Notifications section in the Managed DataHub guide to configure Slack notifications for your Managed DataHub instance.

Quickstart

If you are running DataHub using the docker quickstart option, there are no additional software installation steps. The datahub-actions container comes pre-installed with the Slack action.

All you need to do is export a few environment variables to activate and configure the integration. See below for the list of environment variables to export.

Env VariableRequired for IntegrationPurpose
DATAHUB_ACTIONS_SLACK_ENABLEDSet to "true" to enable the Slack action
DATAHUB_ACTIONS_SLACK_SIGNING_SECRETSet to the Slack Signing Secret that you configured in the pre-requisites step above
DATAHUB_ACTIONS_SLACK_BOT_TOKENSet to the Bot User OAuth Token that you configured in the pre-requisites step above
DATAHUB_ACTIONS_SLACK_CHANNELSet to the Slack Channel ID that you want the action to send messages to
DATAHUB_ACTIONS_SLACK_DATAHUB_BASE_URLDefaults to "http://localhost:9002". Set to the location where your DataHub UI is running. On a local quickstart this is usually "http://localhost:9002", so you shouldn't need to modify this
note

You will have to restart the datahub-actions docker container after you have exported these environment variables if this is the first time. The simplest way to do it is via the Docker Desktop UI, or by just issuing a datahub docker quickstart --stop && datahub docker quickstart command to restart the whole instance.

For example:

export DATAHUB_ACTIONS_SLACK_ENABLED=true
export DATAHUB_ACTIONS_SLACK_SIGNING_SECRET=<slack-signing-secret>
....
export DATAHUB_ACTIONS_SLACK_CHANNEL=<slack_channel_id>

datahub docker quickstart --stop && datahub docker quickstart

k8s / helm

Similar to the quickstart scenario, there are no specific software installation steps. The datahub-actions container comes pre-installed with the Slack action. You just need to export a few environment variables and make them available to the datahub-actions container to activate and configure the integration. See below for the list of environment variables to export.

Env VariableRequired for IntegrationPurpose
DATAHUB_ACTIONS_SLACK_ENABLEDSet to "true" to enable the Slack action
DATAHUB_ACTIONS_SLACK_SIGNING_SECRETSet to the Slack Signing Secret that you configured in the pre-requisites step above
DATAHUB_ACTIONS_SLACK_BOT_TOKENSet to the Bot User OAuth Token that you configured in the pre-requisites step above
DATAHUB_ACTIONS_SLACK_CHANNELSet to the Slack Channel ID that you want the action to send messages to
DATAHUB_ACTIONS_DATAHUB_BASE_URLSet to the location where your DataHub UI is running. For example, if your DataHub UI is hosted at "https://datahub.my-company.biz", set this to "https://datahub.my-company.biz"

Bare Metal - CLI or Python-based

If you are using the datahub-actions library directly from Python, or the datahub-actions cli directly, then you need to first install the slack action plugin in your Python virtualenv.

pip install "datahub-actions[slack]"

Then run the action with a configuration file that you have modified to capture your credentials and configuration.

Sample Slack Action Configuration File
name: datahub_slack_action
enabled: true
source:
type: "kafka"
config:
connection:
bootstrap: ${KAFKA_BOOTSTRAP_SERVER:-localhost:9092}
schema_registry_url: ${SCHEMA_REGISTRY_URL:-http://localhost:8081}
topic_routes:
mcl: ${METADATA_CHANGE_LOG_VERSIONED_TOPIC_NAME:-MetadataChangeLog_Versioned_v1}
pe: ${PLATFORM_EVENT_TOPIC_NAME:-PlatformEvent_v1}

## 3a. Optional: Filter to run on events (map)
# filter:
# event_type: <filtered-event-type>
# event:
# # Filter event fields by exact-match
# <filtered-event-fields>

# 3b. Optional: Custom Transformers to run on events (array)
# transform:
# - type: <transformer-type>
# config:
# # Transformer-specific configs (map)

action:
type: slack
config:
# Action-specific configs (map)
base_url: ${DATAHUB_ACTIONS_SLACK_DATAHUB_BASE_URL:-http://localhost:9002}
bot_token: ${DATAHUB_ACTIONS_SLACK_BOT_TOKEN}
signing_secret: ${DATAHUB_ACTIONS_SLACK_SIGNING_SECRET}
default_channel: ${DATAHUB_ACTIONS_SLACK_CHANNEL}
suppress_system_activity: ${DATAHUB_ACTIONS_SLACK_SUPPRESS_SYSTEM_ACTIVITY:-true}

datahub:
server: "http://${DATAHUB_GMS_HOST:-localhost}:${DATAHUB_GMS_PORT:-8080}"

Slack Action Configuration Parameters
FieldRequiredDefaultDescription
base_urlFalseWhether to print events in upper case.
signing_secretSet to the Slack Signing Secret that you configured in the pre-requisites step above
bot_tokenSet to the Bot User OAuth Token that you configured in the pre-requisites step above
default_channelSet to the Slack Channel ID that you want the action to send messages to
suppress_system_activityTrueSet to False if you want to get low level system activity events, e.g. when datasets are ingested, etc. Note: this will currently result in a very spammy Slack notifications experience, so this is not recommended to be changed.

Troubleshooting

If things are configured correctly, you should see logs on the datahub-actions container that indicate success in enabling and running the Slack action.

docker logs datahub-datahub-actions-1

...
[2022-12-04 07:07:53,804] INFO {datahub_actions.plugin.action.slack.slack:96} - Slack notification action configured with bot_token=SecretStr('**********') signing_secret=SecretStr('**********') default_channel='C04CZUSSR5X' base_url='http://localhost:9002' suppress_system_activity=True
[2022-12-04 07:07:54,506] WARNING {datahub_actions.cli.actions:103} - Skipping pipeline datahub_teams_action as it is not enabled
[2022-12-04 07:07:54,506] INFO {datahub_actions.cli.actions:119} - Action Pipeline with name 'ingestion_executor' is now running.
[2022-12-04 07:07:54,507] INFO {datahub_actions.cli.actions:119} - Action Pipeline with name 'datahub_slack_action' is now running.
...

If the Slack action was not enabled, you would see messages indicating that. e.g. the following logs below show that neither the Slack or Teams action were enabled.

docker logs datahub-datahub-actions-1

....
No user action configurations found. Not starting user actions.
[2022-12-04 06:45:27,509] INFO {datahub_actions.cli.actions:76} - DataHub Actions version: unavailable (installed editable via git)
[2022-12-04 06:45:27,647] WARNING {datahub_actions.cli.actions:103} - Skipping pipeline datahub_slack_action as it is not enabled
[2022-12-04 06:45:27,649] WARNING {datahub_actions.cli.actions:103} - Skipping pipeline datahub_teams_action as it is not enabled
[2022-12-04 06:45:27,649] INFO {datahub_actions.cli.actions:119} - Action Pipeline with name 'ingestion_executor' is now running.
...

- + \ No newline at end of file diff --git a/docs/actions/actions/teams/index.html b/docs/actions/actions/teams/index.html index 5a2980fc26bf6..06211ecc4b551 100644 --- a/docs/actions/actions/teams/index.html +++ b/docs/actions/actions/teams/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@

Watch the townhall demo to see this in action: Teams Action Demo

Supported Events

  • EntityChangeEvent_v1
  • Currently, the MetadataChangeLog_v1 event is not processed by the Action.

Action Quickstart

Prerequisites

Ensure that you have configured an incoming webhook in your Teams channel.

Follow the guide here to set it up.

Take note of the incoming webhook url as you will need to use that to configure the Team action.

Installation Instructions (Deployment specific)

Quickstart

If you are running DataHub using the docker quickstart option, there are no additional software installation steps. The datahub-actions container comes pre-installed with the Teams action.

All you need to do is export a few environment variables to activate and configure the integration. See below for the list of environment variables to export.

Env VariableRequired for IntegrationPurpose
DATAHUB_ACTIONS_TEAMS_ENABLEDSet to "true" to enable the Teams action
DATAHUB_ACTIONS_TEAMS_WEBHOOK_URLSet to the incoming webhook url that you configured in the pre-requisites step above
DATAHUB_ACTIONS_DATAHUB_BASE_URLDefaults to "http://localhost:9002". Set to the location where your DataHub UI is running. On a local quickstart this is usually "http://localhost:9002", so you shouldn't need to modify this
note

You will have to restart the datahub-actions docker container after you have exported these environment variables if this is the first time. The simplest way to do it is via the Docker Desktop UI, or by just issuing a datahub docker quickstart --stop && datahub docker quickstart command to restart the whole instance.

For example:

export DATAHUB_ACTIONS_TEAMS_ENABLED=true
export DATAHUB_ACTIONS_TEAMS_WEBHOOK_URL=<teams_webhook_url>

datahub docker quickstart --stop && datahub docker quickstart

k8s / helm

Similar to the quickstart scenario, there are no specific software installation steps. The datahub-actions container comes pre-installed with the Teams action. You just need to export a few environment variables and make them available to the datahub-actions container to activate and configure the integration. See below for the list of environment variables to export.

Env VariableRequired for IntegrationPurpose
DATAHUB_ACTIONS_TEAMS_ENABLEDSet to "true" to enable the Teams action
DATAHUB_ACTIONS_TEAMS_WEBHOOK_URLSet to the incoming webhook url that you configured in the pre-requisites step above
DATAHUB_ACTIONS_TEAMS_DATAHUB_BASE_URLSet to the location where your DataHub UI is running. For example, if your DataHub UI is hosted at "https://datahub.my-company.biz", set this to "https://datahub.my-company.biz"

Bare Metal - CLI or Python-based

If you are using the datahub-actions library directly from Python, or the datahub-actions cli directly, then you need to first install the teams action plugin in your Python virtualenv.

pip install "datahub-actions[teams]"

Then run the action with a configuration file that you have modified to capture your credentials and configuration.

Sample Teams Action Configuration File
name: datahub_teams_action
enabled: true
source:
type: "kafka"
config:
connection:
bootstrap: ${KAFKA_BOOTSTRAP_SERVER:-localhost:9092}
schema_registry_url: ${SCHEMA_REGISTRY_URL:-http://localhost:8081}
topic_routes:
mcl: ${METADATA_CHANGE_LOG_VERSIONED_TOPIC_NAME:-MetadataChangeLog_Versioned_v1}
pe: ${PLATFORM_EVENT_TOPIC_NAME:-PlatformEvent_v1}

## 3a. Optional: Filter to run on events (map)
# filter:
# event_type: <filtered-event-type>
# event:
# # Filter event fields by exact-match
# <filtered-event-fields>

# 3b. Optional: Custom Transformers to run on events (array)
# transform:
# - type: <transformer-type>
# config:
# # Transformer-specific configs (map)

action:
type: teams
config:
# Action-specific configs (map)
base_url: ${DATAHUB_ACTIONS_TEAMS_DATAHUB_BASE_URL:-http://localhost:9002}
webhook_url: ${DATAHUB_ACTIONS_TEAMS_WEBHOOK_URL}
suppress_system_activity: ${DATAHUB_ACTIONS_TEAMS_SUPPRESS_SYSTEM_ACTIVITY:-true}

datahub:
server: "http://${DATAHUB_GMS_HOST:-localhost}:${DATAHUB_GMS_PORT:-8080}"
Teams Action Configuration Parameters
FieldRequiredDefaultDescription
base_urlFalseWhether to print events in upper case.
webhook_urlSet to the incoming webhook url that you configured in the pre-requisites step above
suppress_system_activityTrueSet to False if you want to get low level system activity events, e.g. when datasets are ingested, etc. Note: this will currently result in a very spammy Teams notifications experience, so this is not recommended to be changed.

Troubleshooting

If things are configured correctly, you should see logs on the datahub-actions container that indicate success in enabling and running the Teams action.

docker logs datahub-datahub-actions-1

...
[2022-12-04 16:47:44,536] INFO {datahub_actions.cli.actions:76} - DataHub Actions version: unavailable (installed editable via git)
[2022-12-04 16:47:44,565] WARNING {datahub_actions.cli.actions:103} - Skipping pipeline datahub_slack_action as it is not enabled
[2022-12-04 16:47:44,581] INFO {datahub_actions.plugin.action.teams.teams:60} - Teams notification action configured with webhook_url=SecretStr('**********') base_url='http://localhost:9002' suppress_system_activity=True
[2022-12-04 16:47:46,393] INFO {datahub_actions.cli.actions:119} - Action Pipeline with name 'ingestion_executor' is now running.
[2022-12-04 16:47:46,393] INFO {datahub_actions.cli.actions:119} - Action Pipeline with name 'datahub_teams_action' is now running.
...

If the Teams action was not enabled, you would see messages indicating that. e.g. the following logs below show that neither the Teams or Slack action were enabled.

docker logs datahub-datahub-actions-1

....
No user action configurations found. Not starting user actions.
[2022-12-04 06:45:27,509] INFO {datahub_actions.cli.actions:76} - DataHub Actions version: unavailable (installed editable via git)
[2022-12-04 06:45:27,647] WARNING {datahub_actions.cli.actions:103} - Skipping pipeline datahub_slack_action as it is not enabled
[2022-12-04 06:45:27,649] WARNING {datahub_actions.cli.actions:103} - Skipping pipeline datahub_teams_action as it is not enabled
[2022-12-04 06:45:27,649] INFO {datahub_actions.cli.actions:119} - Action Pipeline with name 'ingestion_executor' is now running.
...

- + \ No newline at end of file diff --git a/docs/actions/concepts/index.html b/docs/actions/concepts/index.html index d6d55d5a4fd0e..a15605fad740f 100644 --- a/docs/actions/concepts/index.html +++ b/docs/actions/concepts/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ each Pipeline has its very own Event Source, Transforms, and Actions. This makes it easy to maintain state for mission-critical Actions independently.

Importantly, each Action must have a unique name. This serves as a stable identifier across Pipeline run which can be useful in saving the Pipeline's consumer state (ie. resiliency + reliability). For example, the Kafka Event Source (default) uses the pipeline name as the Kafka Consumer Group id. This enables you to easily scale-out your Actions by running multiple processes with the same exact configuration file. Each will simply become different consumers in the same consumer group, sharing traffic of the DataHub Events stream.

Events

Events are data objects representing changes that have occurred on DataHub. Strictly speaking, the only requirement that the Actions framework imposes is that these objects must be

a. Convertible to JSON b. Convertible from JSON

So that in the event of processing failures, events can be written and read from a failed events file.

Event Types

Each Event instance inside the framework corresponds to a single Event Type, which is common name (e.g. "EntityChangeEvent_v1") which can be used to understand the shape of the Event. This can be thought of as a "topic" or "stream" name. That being said, Events associated with a single type are not expected to change in backwards-breaking ways across versons.

Event Sources

Events are produced to the framework by Event Sources. Event Sources may include their own guarantees, configurations, behaviors, and semantics. They usually produce a fixed set of Event Types.

In addition to sourcing events, Event Sources are also responsible for acking the succesful processing of an event by implementing the ack method. This is invoked by the framework once the Event is guaranteed to have reached the configured Action successfully.

Transformers

Transformers are pluggable components which take an Event as input, and produce an Event (or nothing) as output. This can be used to enrich the information of an Event prior to sending it to an Action.

Multiple Transformers can be configured to run in sequence, filtering and transforming an event in multiple steps.

Transformers can also be used to generate a completely new type of Event (i.e. registered at runtime via the Event Registry) which can subsequently serve as input to an Action.

Transformers can be easily customized and plugged in to meet an organization's unqique requirements. For more information on developing a Transformer, check out Developing a Transformer

Action

Actions are pluggable components which take an Event as input and perform some business logic. Examples may be sending a Slack notification, logging to a file, or creating a Jira ticket, etc.

Each Pipeline can be configured to have a single Action which runs after the filtering and transformations have occurred.

Actions can be easily customized and plugged in to meet an organization's unqique requirements. For more information on developing a Action, check out Developing a Action

- + \ No newline at end of file diff --git a/docs/actions/events/entity-change-event/index.html b/docs/actions/events/entity-change-event/index.html index 03b886f537dc8..f27742c8fb7a9 100644 --- a/docs/actions/events/entity-change-event/index.html +++ b/docs/actions/events/entity-change-event/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ Header

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "GLOSSARY_TERM",
"operation": "ADD",
"modifier": "urn:li:glossaryTerm:ExampleNode.ExampleTerm",
"parameters": {
"termUrn": "urn:li:glossaryTerm:ExampleNode.ExampleTerm"
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Remove Glossary Term Event

This event is emitted when a Glossary Term has been removed from an entity on DataHub.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "GLOSSARY_TERM",
"operation": "REMOVE",
"modifier": "urn:li:glossaryTerm:ExampleNode.ExampleTerm",
"parameters": {
"termUrn": "urn:li:glossaryTerm:ExampleNode.ExampleTerm"
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Add Domain Event

This event is emitted when Domain has been added to an entity on DataHub.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "DOMAIN",
"operation": "ADD",
"modifier": "urn:li:domain:ExampleDomain",
"parameters": {
"domainUrn": "urn:li:domain:ExampleDomain"
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Remove Domain Event

This event is emitted when Domain has been removed from an entity on DataHub. Header

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "DOMAIN",
"operation": "REMOVE",
"modifier": "urn:li:domain:ExampleDomain",
"parameters": {
"domainUrn": "urn:li:domain:ExampleDomain"
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Add Owner Event

This event is emitted when a new owner has been assigned to an entity on DataHub.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "OWNER",
"operation": "ADD",
"modifier": "urn:li:corpuser:jdoe",
"parameters": {
"ownerUrn": "urn:li:corpuser:jdoe",
"ownerType": "BUSINESS_OWNER"
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Remove Owner Event

This event is emitted when an existing owner has been removed from an entity on DataHub.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "OWNER",
"operation": "REMOVE",
"modifier": "urn:li:corpuser:jdoe",
"parameters": {
"ownerUrn": "urn:li:corpuser:jdoe",
"ownerType": "BUSINESS_OWNER"
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Modify Deprecation Event

This event is emitted when the deprecation status of an entity has been modified on DataHub.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "DEPRECATION",
"operation": "MODIFY",
"modifier": "DEPRECATED",
"parameters": {
"status": "DEPRECATED"
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Add Dataset Schema Field Event

This event is emitted when a new field has been added to a Dataset Schema.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "TECHNICAL_SCHEMA",
"operation": "ADD",
"modifier": "urn:li:schemaField:(urn:li:dataset:abc,newFieldName)",
"parameters": {
"fieldUrn": "urn:li:schemaField:(urn:li:dataset:abc,newFieldName)",
"fieldPath": "newFieldName",
"nullable": false
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Remove Dataset Schema Field Event

This event is emitted when a new field has been remove from a Dataset Schema.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "TECHNICAL_SCHEMA",
"operation": "REMOVE",
"modifier": "urn:li:schemaField:(urn:li:dataset:abc,newFieldName)",
"parameters": {
"fieldUrn": "urn:li:schemaField:(urn:li:dataset:abc,newFieldName)",
"fieldPath": "newFieldName",
"nullable": false
},
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Entity Create Event

This event is emitted when a new entity has been created on DataHub. Header

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "LIFECYCLE",
"operation": "CREATE",
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Entity Soft-Delete Event

This event is emitted when a new entity has been soft-deleted on DataHub.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "LIFECYCLE",
"operation": "SOFT_DELETE",
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}

Entity Hard-Delete Event

This event is emitted when a new entity has been hard-deleted on DataHub.

Sample Event

{
"entityUrn": "urn:li:dataset:abc",
"entityType": "dataset",
"category": "LIFECYCLE",
"operation": "HARD_DELETE",
"auditStamp": {
"actor": "urn:li:corpuser:jdoe",
"time": 1649953100653
}
}
- + \ No newline at end of file diff --git a/docs/actions/events/metadata-change-log-event/index.html b/docs/actions/events/metadata-change-log-event/index.html index d40d9521ce0e6..d02d6699f85cf 100644 --- a/docs/actions/events/metadata-change-log-event/index.html +++ b/docs/actions/events/metadata-change-log-event/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

Metadata Change Log Event V1

Event Type

MetadataChangeLog_v1

Overview

This event is emitted when any aspect on DataHub Metadata Graph is changed. This includes creates, updates, and removals of both "versioned" aspects and "time-series" aspects.

Disclaimer: This event is quite powerful, but also quite low-level. Because it exposes the underlying metadata model directly, it is subject to more frequent structural and semantic changes than the higher level Entity Change Event. We recommend using that event instead to achieve your use case when possible.

Event Structure

The fields include

NameTypeDescriptionOptional
entityUrnStringThe unique identifier for the Entity being changed. For example, a Dataset's urn.False
entityTypeStringThe type of the entity being changed. Supported values include dataset, chart, dashboard, dataFlow (Pipeline), dataJob (Task), domain, tag, glossaryTerm, corpGroup, & corpUser.False
entityKeyAspectObjectThe key struct of the entity that was changed. Only present if the Metadata Change Proposal contained the raw key struct.True
changeTypeStringThe change type. UPSERT or DELETE are currently supported.False
aspectNameStringThe entity aspect which was changed.False
aspectObjectThe new aspect value. Null if the aspect was deleted.True
aspect.contentTypeStringThe serialization type of the aspect itself. The only supported value is application/json.False
aspect.valueStringThe serialized aspect. This is a JSON-serialized representing the aspect document originally defined in PDL. See https://github.com/datahub-project/datahub/tree/master/metadata-models/src/main/pegasus/com/linkedin for more.False
previousAspectValueObjectThe previous aspect value. Null if the aspect did not exist previously.True
previousAspectValue.contentTypeStringThe serialization type of the aspect itself. The only supported value is application/jsonFalse
previousAspectValue.valueStringThe serialized aspect. This is a JSON-serialized representing the aspect document originally defined in PDL. See https://github.com/datahub-project/datahub/tree/master/metadata-models/src/main/pegasus/com/linkedin for more.False
systemMetadataObjectThe new system metadata. This includes the the ingestion run-id, model registry and more. For the full structure, see https://github.com/datahub-project/datahub/blob/master/metadata-models/src/main/pegasus/com/linkedin/mxe/SystemMetadata.pdlTrue
previousSystemMetadataObjectThe previous system metadata. This includes the the ingestion run-id, model registry and more. For the full structure, see https://github.com/datahub-project/datahub/blob/master/metadata-models/src/main/pegasus/com/linkedin/mxe/SystemMetadata.pdlTrue
createdObjectAudit stamp about who triggered the Metadata Change and when.False
created.timeNumberThe timestamp in milliseconds when the aspect change occurred.False
created.actorStringThe URN of the actor (e.g. corpuser) that triggered the change.

Sample Events

Tag Change Event

{
"entityType": "container",
"entityUrn": "urn:li:container:DATABASE",
"entityKeyAspect": null,
"changeType": "UPSERT",
"aspectName": "globalTags",
"aspect": {
"value": "{\"tags\":[{\"tag\":\"urn:li:tag:pii\"}]}",
"contentType": "application/json"
},
"systemMetadata": {
"lastObserved": 1651516475595,
"runId": "no-run-id-provided",
"registryName": "unknownRegistry",
"registryVersion": "0.0.0.0-dev",
"properties": null
},
"previousAspectValue": null,
"previousSystemMetadata": null,
"created": {
"time": 1651516475594,
"actor": "urn:li:corpuser:datahub",
"impersonator": null
}
}

Glossary Term Change Event

{
"entityType": "dataset",
"entityUrn": "urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)",
"entityKeyAspect": null,
"changeType": "UPSERT",
"aspectName": "glossaryTerms",
"aspect": {
"value": "{\"auditStamp\":{\"actor\":\"urn:li:corpuser:datahub\",\"time\":1651516599479},\"terms\":[{\"urn\":\"urn:li:glossaryTerm:CustomerAccount\"}]}",
"contentType": "application/json"
},
"systemMetadata": {
"lastObserved": 1651516599486,
"runId": "no-run-id-provided",
"registryName": "unknownRegistry",
"registryVersion": "0.0.0.0-dev",
"properties": null
},
"previousAspectValue": null,
"previousSystemMetadata": null,
"created": {
"time": 1651516599480,
"actor": "urn:li:corpuser:datahub",
"impersonator": null
}
}

Owner Change Event

{
"auditHeader": null,
"entityType": "dataset",
"entityUrn": "urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)",
"entityKeyAspect": null,
"changeType": "UPSERT",
"aspectName": "ownership",
"aspect": {
"value": "{\"owners\":[{\"type\":\"DATAOWNER\",\"owner\":\"urn:li:corpuser:datahub\"}],\"lastModified\":{\"actor\":\"urn:li:corpuser:datahub\",\"time\":1651516640488}}",
"contentType": "application/json"
},
"systemMetadata": {
"lastObserved": 1651516640493,
"runId": "no-run-id-provided",
"registryName": "unknownRegistry",
"registryVersion": "0.0.0.0-dev",
"properties": null
},
"previousAspectValue": {
"value": "{\"owners\":[{\"owner\":\"urn:li:corpuser:jdoe\",\"type\":\"DATAOWNER\"},{\"owner\":\"urn:li:corpuser:datahub\",\"type\":\"DATAOWNER\"}],\"lastModified\":{\"actor\":\"urn:li:corpuser:jdoe\",\"time\":1581407189000}}",
"contentType": "application/json"
},
"previousSystemMetadata": {
"lastObserved": 1651516415088,
"runId": "file-2022_05_02-11_33_35",
"registryName": null,
"registryVersion": null,
"properties": null
},
"created": {
"time": 1651516640490,
"actor": "urn:li:corpuser:datahub",
"impersonator": null
}
}

FAQ

Where can I find all the aspects and their schemas?

Great Question! All MetadataChangeLog events are based on the Metadata Model which is comprised of Entities, Aspects, and Relationships which make up an enterprise Metadata Graph. We recommend checking out the following resources to learn more about this:

You can also find a comprehensive list of Entities + Aspects of the Metadata Model under the Metadata Modeling > Entities section of the official DataHub docs.

- + \ No newline at end of file diff --git a/docs/actions/guides/developing-a-transformer/index.html b/docs/actions/guides/developing-a-transformer/index.html index 28f3ac4b50192..6480313261e01 100644 --- a/docs/actions/guides/developing-a-transformer/index.html +++ b/docs/actions/guides/developing-a-transformer/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ datahub-actions repository.

Once you've added your new Transformer there, make sure that you make it discoverable by updating the entry_points section of the setup.py file. This allows you to assign a globally unique name for you Transformer, so that people can use it without defining the full module path.

Prerequisites:

Prerequisites to consideration for inclusion in the core Transformer library include

  • Testing Define unit tests for your Transformer
  • Deduplication Confirm that no existing Transformer serves the same purpose, or can be easily extended to serve the same purpose
- + \ No newline at end of file diff --git a/docs/actions/guides/developing-an-action/index.html b/docs/actions/guides/developing-an-action/index.html index f6d82ddf736ef..de968c077e67f 100644 --- a/docs/actions/guides/developing-an-action/index.html +++ b/docs/actions/guides/developing-an-action/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ datahub-actions repository.

Once you've added your new Action there, make sure that you make it discoverable by updating the entry_points section of the setup.py file. This allows you to assign a globally unique name for you Action, so that people can use it without defining the full module path.

Prerequisites:

Prerequisites to consideration for inclusion in the core Actions library include

  • Testing Define unit tests for your Action
  • Deduplication Confirm that no existing Action serves the same purpose, or can be easily extended to serve the same purpose
- + \ No newline at end of file diff --git a/docs/actions/index.html b/docs/actions/index.html index c6481ce32227d..e2fe096d7b72b 100644 --- a/docs/actions/index.html +++ b/docs/actions/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ via a Kafka Consumer.

Supported Actions

By default, DataHub supports a set of standard actions plugins. These can be found inside the folder src/datahub-actions/plugins.

Some pre-included Actions include

Development

Build and Test

Notice that we support all actions command using a separate datahub-actions CLI entry point. Feel free to use this during development.

# Build datahub-actions module
./gradlew datahub-actions:build

# Drop into virtual env
cd datahub-actions && source venv/bin/activate

# Start hello world action
datahub-actions actions -c ../examples/hello_world.yaml

# Start ingestion executor action
datahub-actions actions -c ../examples/executor.yaml

# Start multiple actions
datahub-actions actions -c ../examples/executor.yaml -c ../examples/hello_world.yaml

Developing a Transformer

To develop a new Transformer, check out the Developing a Transformer guide.

Developing an Action

To develop a new Action, check out the Developing an Action guide.

Contributing

Contributing guidelines follow those of the main DataHub project. We are accepting contributions for Actions, Transformers, and general framework improvements (tests, error handling, etc).

Resources

Check out the original announcement of the DataHub Actions Framework at the DataHub April 2022 Town Hall.

License

Apache 2.0

- + \ No newline at end of file diff --git a/docs/actions/quickstart/index.html b/docs/actions/quickstart/index.html index aa489529f0eea..c2da8c82db4e6 100644 --- a/docs/actions/quickstart/index.html +++ b/docs/actions/quickstart/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ will prevent events that do not match the filter from being forwarded to the action.

# hello_world.yaml
name: "hello_world"
source:
type: "kafka"
config:
connection:
bootstrap: ${KAFKA_BOOTSTRAP_SERVER:-localhost:9092}
schema_registry_url: ${SCHEMA_REGISTRY_URL:-http://localhost:8081}
filter:
event_type: "EntityChangeEvent_v1"
action:
type: "hello_world"

Filtering for events of type EntityChangeEvent_v1 only

Advanced Filtering

Beyond simply filtering by event type, we can also filter events by matching against the values of their fields. To do so, use the event block. Each field provided will be compared against the real event's value. An event that matches all of the fields will be forwarded to the action.

# hello_world.yaml
name: "hello_world"
source:
type: "kafka"
config:
connection:
bootstrap: ${KAFKA_BOOTSTRAP_SERVER:-localhost:9092}
schema_registry_url: ${SCHEMA_REGISTRY_URL:-http://localhost:8081}
filter:
event_type: "EntityChangeEvent_v1"
event:
category: "TAG"
operation: "ADD"
modifier: "urn:li:tag:pii"
action:
type: "hello_world"

This filter only matches events representing "PII" tag additions to an entity.

And more, we can achieve "OR" semantics on a particular field by providing an array of values.

# hello_world.yaml
name: "hello_world"
source:
type: "kafka"
config:
connection:
bootstrap: ${KAFKA_BOOTSTRAP_SERVER:-localhost:9092}
schema_registry_url: ${SCHEMA_REGISTRY_URL:-http://localhost:8081}
filter:
event_type: "EntityChangeEvent_v1"
event:
category: "TAG"
operation: [ "ADD", "REMOVE" ]
modifier: "urn:li:tag:pii"
action:
type: "hello_world"

This filter only matches events representing "PII" tag additions to OR removals from an entity. How fancy!

- + \ No newline at end of file diff --git a/docs/actions/sources/kafka-event-source/index.html b/docs/actions/sources/kafka-event-source/index.html index cadc62d440c6d..e9147a9237289 100644 --- a/docs/actions/sources/kafka-event-source/index.html +++ b/docs/actions/sources/kafka-event-source/index.html @@ -8,7 +8,7 @@ - + @@ -27,7 +27,7 @@ fail to be processed will simply be logged to a failed_events.log file for further investigation (dead letter queue). The Kafka Event Source will continue to make progress against the underlying topics and continue to commit offsets even in the case of failed messages.

If you've configured your Action pipeline failure_mode to be THROW, then events which fail to be processed result in an Action Pipeline error. This in turn terminates the pipeline before committing offsets back to Kafka. Thus the message will not be marked as "processed" by the Action consumer.

Supported Events

The Kafka Event Source produces

Configure the Event Source

Use the following config(s) to get started with the Kafka Event Source.

name: "pipeline-name"
source:
type: "kafka"
config:
# Connection-related configuration
connection:
bootstrap: ${KAFKA_BOOTSTRAP_SERVER:-localhost:9092}
schema_registry_url: ${SCHEMA_REGISTRY_URL:-http://localhost:8081}
# Dictionary of freeform consumer configs propagated to underlying Kafka Consumer
consumer_config:
#security.protocol: ${KAFKA_PROPERTIES_SECURITY_PROTOCOL:-PLAINTEXT}
#ssl.keystore.location: ${KAFKA_PROPERTIES_SSL_KEYSTORE_LOCATION:-/mnt/certs/keystore}
#ssl.truststore.location: ${KAFKA_PROPERTIES_SSL_TRUSTSTORE_LOCATION:-/mnt/certs/truststore}
#ssl.keystore.password: ${KAFKA_PROPERTIES_SSL_KEYSTORE_PASSWORD:-keystore_password}
#ssl.key.password: ${KAFKA_PROPERTIES_SSL_KEY_PASSWORD:-keystore_password}
#ssl.truststore.password: ${KAFKA_PROPERTIES_SSL_TRUSTSTORE_PASSWORD:-truststore_password}
# Topic Routing - which topics to read from.
topic_routes:
mcl: ${METADATA_CHANGE_LOG_VERSIONED_TOPIC_NAME:-MetadataChangeLog_Versioned_v1} # Topic name for MetadataChangeLog_v1 events.
pe: ${PLATFORM_EVENT_TOPIC_NAME:-PlatformEvent_v1} # Topic name for PlatformEvent_v1 events.
action:
# action configs
View All Configuration Options
| Field | Required | Default | Description | | --- | :-: | :-: | --- | | `connection.bootstrap` | ✅ | N/A | The Kafka bootstrap URI, e.g. `localhost:9092`. | | `connection.schema_registry_url` | ✅ | N/A | The URL for the Kafka schema registry, e.g. `http://localhost:8081` | | `connection.consumer_config` | ❌ | | A set of key-value pairs that represents arbitrary Kafka Consumer configs | | `topic_routes.mcl` | ❌ | `MetadataChangeLog_v1` | The name of the topic containing MetadataChangeLog events | | `topic_routes.pe` | ❌ | `PlatformEvent_v1` | The name of the topic containing PlatformEvent events |

FAQ

  1. Is there a way to always start processing from the end of the topics on Actions start?

Currently, the only way is to change the name of the Action in its configuration file. In the future, we are hoping to add first-class support for configuring the action to be "stateless", ie only process messages that are received while the Action is running.

  1. Is there a way to asynchronously commit offsets back to Kafka?

Currently, all consumer offset commits are made synchronously for each message received. For now we've optimized for correctness over performance. If this commit policy does not accommodate your organization's needs, certainly reach out on Slack.

- + \ No newline at end of file diff --git a/docs/advanced/aspect-versioning/index.html b/docs/advanced/aspect-versioning/index.html index a5252c0a6c13d..ca108795dd844 100644 --- a/docs/advanced/aspect-versioning/index.html +++ b/docs/advanced/aspect-versioning/index.html @@ -8,13 +8,13 @@ - +

Aspect Versioning

As each version of metadata aspect is immutable, any update to an existing aspect results in the creation of a new version. Typically one would expect the version number increases sequentially with the largest version number being the latest version, i.e. v1 (oldest), v2 (second oldest), ..., vN (latest). However, this approach results in major challenges in both rest.li modeling & transaction isolation and therefore requires a rethinking.

Rest.li Modeling

As it's common to create dedicated rest.li sub-resources for a specific aspect, e.g. /datasets/{datasetKey}/ownership, the concept of versions become an interesting modeling question. Should the sub-resource be a Simple or a Collection type?

If Simple, the GET method is expected to return the latest version, and the only way to retrieve non-latest versions is through a custom ACTION method, which is going against the REST principle. As a result, a Simple sub-resource doesn't seem to a be a good fit.

If Collection, the version number naturally becomes the key so it's easy to retrieve specific version number using the typical GET method. It's also easy to list all versions using the standard GET_ALL method or get a set of versions via BATCH_GET. However, Collection resources don't support a simple way to get the latest/largest key directly. To achieve that, one must do one of the following

  • a GET_ALL (assuming descending key order) with a page size of 1
  • a FINDER with special parameters and a page size of 1
  • a custom ACTION method again

None of these options seems like a natural way to ask for the latest version of an aspect, which is one of the most common use cases.

Transaction Isolation

Transaction isolation is a complex topic so make sure to familiarize yourself with the basics first.

To support concurrent update of a metadata aspect, the following pseudo DB operations must be run in a single transaction,

1. Retrieve the current max version (Vmax)
2. Write the new value as (Vmax + 1)

Operation 1 above can easily suffer from Phantom Reads. This subsequently leads to Operation 2 computing the incorrect version and thus overwrites an existing version instead of creating a new one.

One way to solve this is by enforcing Serializable isolation level in DB at the cost of performance. In reality, very few DB even supports this level of isolation, especially for distributed document stores. It's more common to support Repeatable Reads or Read Committed isolation levels—sadly neither would help in this case.

Another possible solution is to transactionally keep track of Vmax directly in a separate table to avoid the need to compute that through a select (thus prevent Phantom Reads). However, cross-table/document/entity transaction is not a feature supported by all distributed document stores, which precludes this as a generalized solution.

Solution: Version 0

The solution to both challenges turns out to be surprisingly simple. Instead of using a "floating" version number to represent the latest version, one can use a "fixed/sentinel" version number instead. In this case we choose Version 0 as we want all non-latest versions to still keep increasing sequentially. In other words, it'd be v0 (latest), v1 (oldest), v2 (second oldest), etc. Alternatively, you can also simply view all the non-zero versions as an audit trail.

Let's examine how Version 0 can solve the aforementioned challenges.

Rest.li Modeling

With Version 0, getting the latest version becomes calling the GET method of a Collection aspect-specific sub-resource with a deterministic key, e.g. /datasets/{datasetkey}/ownership/0, which is a lot more natural than using GET_ALL or FINDER.

Transaction Isolation

The pseudo DB operations change to the following transaction block with version 0,

1. Retrieve v0 of the aspect
2. Retrieve the current max version (Vmax)
3. Write the old value back as (Vmax + 1)
4. Write the new value back as v0

While Operation 2 still suffers from potential Phantom Reads and thus corrupting existing version in Operation 3, Repeatable Reads isolation level will ensure that the transaction fails due to Lost Update detected in Operation 4. Note that this happens to also be the default isolation level for InnoDB in MySQL.

- + \ No newline at end of file diff --git a/docs/advanced/backfilling/index.html b/docs/advanced/backfilling/index.html index f1957f1925a4c..9258bbd18bf29 100644 --- a/docs/advanced/backfilling/index.html +++ b/docs/advanced/backfilling/index.html @@ -8,13 +8,13 @@ - +
- + \ No newline at end of file diff --git a/docs/advanced/browse-paths-upgrade/index.html b/docs/advanced/browse-paths-upgrade/index.html index f5a606a73584d..8314b5d3efa2f 100644 --- a/docs/advanced/browse-paths-upgrade/index.html +++ b/docs/advanced/browse-paths-upgrade/index.html @@ -8,7 +8,7 @@ - + @@ -34,7 +34,7 @@ join #release-0_8_0 channel and reach out to us if you find trouble with the upgrade or have feedback on the process. We will work closely to make sure you can continue to operate DataHub smoothly.

- + \ No newline at end of file diff --git a/docs/advanced/db-retention/index.html b/docs/advanced/db-retention/index.html index b3f8ddd723668..8a7ffb271d604 100644 --- a/docs/advanced/db-retention/index.html +++ b/docs/advanced/db-retention/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ creating the datahub-gms container/k8s pod.

On GMS start up, retention policies are initialized with:

  1. First, the default provided version-based retention to keep 20 latest aspects for all entity-aspect pairs.
  2. Second, we read YAML files from the /etc/datahub/plugins/retention directory and overlay them on the default set of policies we provide.

For docker, we set docker-compose to mount ${HOME}/.datahub directory to /etc/datahub directory within the containers, so you can customize the initial set of retention policies by creating a ${HOME}/.datahub/plugins/retention/retention.yaml file.

We will support a standardized way to do this in Kubernetes setup in the near future.

The format for the YAML file is as follows:

- entity: "*" # denotes that policy will be applied to all entities
aspect: "*" # denotes that policy will be applied to all aspects
config:
retention:
version:
maxVersions: 20
- entity: "dataset"
aspect: "datasetProperties"
config:
retention:
version:
maxVersions: 20
time:
maxAgeInSeconds: 2592000 # 30 days

Note, it searches for the policies corresponding to the entity, aspect pair in the following order:

  1. entity, aspect
  2. *, aspect
  3. entity, *
  4. ,

By restarting datahub-gms after creating the plugin yaml file, the new set of retention policies will be applied.

- + \ No newline at end of file diff --git a/docs/advanced/derived-aspects/index.html b/docs/advanced/derived-aspects/index.html index 0033e664633ff..1c5a3531943d8 100644 --- a/docs/advanced/derived-aspects/index.html +++ b/docs/advanced/derived-aspects/index.html @@ -8,13 +8,13 @@ - +
- + \ No newline at end of file diff --git a/docs/advanced/entity-hierarchy/index.html b/docs/advanced/entity-hierarchy/index.html index 38be1296a0335..91cf7c8757d03 100644 --- a/docs/advanced/entity-hierarchy/index.html +++ b/docs/advanced/entity-hierarchy/index.html @@ -8,13 +8,13 @@ - +
- + \ No newline at end of file diff --git a/docs/advanced/field-path-spec-v2/index.html b/docs/advanced/field-path-spec-v2/index.html index d28114d184291..7b32a3c346916 100644 --- a/docs/advanced/field-path-spec-v2/index.html +++ b/docs/advanced/field-path-spec-v2/index.html @@ -8,7 +8,7 @@ - + @@ -34,7 +34,7 @@ as a union member, set the nullable member of SchemaField to True.

Examples

Primitive types

avro_schema = """
{
"type": "string"
}
"""
unique_v2_field_paths = [
"[version=2.0].[type=string]"
]

Records

Simple Record

avro_schema = """
{
"type": "record",
"name": "some.event.E",
"namespace": "some.event.N",
"doc": "this is the event record E"
"fields": [
{
"name": "a",
"type": "string",
"doc": "this is string field a of E"
},
{
"name": "b",
"type": "string",
"doc": "this is string field b of E"
}
]
}
"""

unique_v2_field_paths = [
"[version=2.0].[type=E].[type=string].a",
"[version=2.0].[type=E].[type=string].b",
]

Nested Record

avro_schema = """
{
"type": "record",
"name": "SimpleNested",
"namespace": "com.linkedin",
"fields": [{
"name": "nestedRcd",
"type": {
"type": "record",
"name": "InnerRcd",
"fields": [{
"name": "aStringField",
"type": "string"
} ]
}
}]
}
"""

unique_v2_field_paths = [
"[version=2.0].[key=True].[type=SimpleNested].[type=InnerRcd].nestedRcd",
"[version=2.0].[key=True].[type=SimpleNested].[type=InnerRcd].nestedRcd.[type=string].aStringField",
]

Recursive Record

avro_schema = """
{
"type": "record",
"name": "Recursive",
"namespace": "com.linkedin",
"fields": [{
"name": "r",
"type": {
"type": "record",
"name": "R",
"fields": [
{ "name" : "anIntegerField", "type" : "int" },
{ "name": "aRecursiveField", "type": "com.linkedin.R"}
]
}
}]
}
"""

unique_v2_field_paths = [
"[version=2.0].[type=Recursive].[type=R].r",
"[version=2.0].[type=Recursive].[type=R].r.[type=int].anIntegerField",
"[version=2.0].[type=Recursive].[type=R].r.[type=R].aRecursiveField"
]
avro_schema ="""
{
"type": "record",
"name": "TreeNode",
"fields": [
{
"name": "value",
"type": "long"
},
{
"name": "children",
"type": { "type": "array", "items": "TreeNode" }
}
]
}
"""
unique_v2_field_paths = [
"[version=2.0].[type=TreeNode].[type=long].value",
"[version=2.0].[type=TreeNode].[type=array].[type=TreeNode].children",
]

Unions

avro_schema = """
{
"type": "record",
"name": "ABUnion",
"namespace": "com.linkedin",
"fields": [{
"name": "a",
"type": [{
"type": "record",
"name": "A",
"fields": [{ "name": "f", "type": "string" } ]
}, {
"type": "record",
"name": "B",
"fields": [{ "name": "f", "type": "string" } ]
}
]
}]
}
"""
unique_v2_field_paths: List[str] = [
"[version=2.0].[key=True].[type=ABUnion].[type=union].a",
"[version=2.0].[key=True].[type=ABUnion].[type=union].[type=A].a",
"[version=2.0].[key=True].[type=ABUnion].[type=union].[type=A].a.[type=string].f",
"[version=2.0].[key=True].[type=ABUnion].[type=union].[type=B].a",
"[version=2.0].[key=True].[type=ABUnion].[type=union].[type=B].a.[type=string].f",
]

Arrays

avro_schema = """
{
"type": "record",
"name": "NestedArray",
"namespace": "com.linkedin",
"fields": [{
"name": "ar",
"type": {
"type": "array",
"items": {
"type": "array",
"items": [
"null",
{
"type": "record",
"name": "Foo",
"fields": [ {
"name": "a",
"type": "long"
} ]
}
]
}
}
}]
}
"""
unique_v2_field_paths: List[str] = [
"[version=2.0].[type=NestedArray].[type=array].[type=array].[type=Foo].ar",
"[version=2.0].[type=NestedArray].[type=array].[type=array].[type=Foo].ar.[type=long].a",
]

Maps

avro_schema = """
{
"type": "record",
"name": "R",
"namespace": "some.namespace",
"fields": [
{
"name": "a_map_of_longs_field",
"type": {
"type": "map",
"values": "long"
}
}
]
}
"""
unique_v2_field_paths = [
"[version=2.0].[type=R].[type=map].[type=long].a_map_of_longs_field",
]


Mixed Complex Type Examples

# Combines arrays, unions and records.
avro_schema = """
{
"type": "record",
"name": "ABFooUnion",
"namespace": "com.linkedin",
"fields": [{
"name": "a",
"type": [ {
"type": "record",
"name": "A",
"fields": [{ "name": "f", "type": "string" } ]
}, {
"type": "record",
"name": "B",
"fields": [{ "name": "f", "type": "string" } ]
}, {
"type": "array",
"items": {
"type": "array",
"items": [
"null",
{
"type": "record",
"name": "Foo",
"fields": [{ "name": "f", "type": "long" }]
}
]
}
}]
}]
}
"""

unique_v2_field_paths: List[str] = [
"[version=2.0].[type=ABFooUnion].[type=union].a",
"[version=2.0].[type=ABFooUnion].[type=union].[type=A].a",
"[version=2.0].[type=ABFooUnion].[type=union].[type=A].a.[type=string].f",
"[version=2.0].[type=ABFooUnion].[type=union].[type=B].a",
"[version=2.0].[type=ABFooUnion].[type=union].[type=B].a.[type=string].f",
"[version=2.0].[type=ABFooUnion].[type=union].[type=array].[type=array].[type=Foo].a",
"[version=2.0].[type=ABFooUnion].[type=union].[type=array].[type=array].[type=Foo].a.[type=long].f",
]

For more examples, see the unit-tests for AvroToMceSchemaConverter.

Backward-compatibility

While this format is not directly compatible with the v1 format, the v1 equivalent can easily be constructed from the v2 encoding by stripping away all the v2 tokens enclosed in the square-brackets [<new_in_v2>].

- + \ No newline at end of file diff --git a/docs/advanced/high-cardinality/index.html b/docs/advanced/high-cardinality/index.html index 14da2af7c7094..a300bf0a78ffa 100644 --- a/docs/advanced/high-cardinality/index.html +++ b/docs/advanced/high-cardinality/index.html @@ -8,13 +8,13 @@ - +

High Cardinality Relationships

As explained in What is a Relationship, the raw metadata for forming relationships is captured directly inside of a Metadata Aspect. The most natural way to model this is using an array, e.g. a group membership aspect contains an array of user URNs. However, this poses some challenges when the cardinality of the relationship is expected to be large (say, greater than 10,000). The aspect becomes large in size, which leads to slow update and retrieval. It may even exceed the underlying limit of the document store, which is often in the range of a few MBs. Furthermore, sending large messages (> 1MB) over Kafka requires special tuning and is generally discouraged.

Depending on the type of relationships, there are different strategies for dealing with high cardinality.

1:N Relationships

When N is large, simply store the relationship as a reverse pointer on the N side, instead of an N-element array on the 1 side. In other words, instead of doing this

record MemberList {
members: array[UserUrn]
}

do this

record Membership {
group: GroupUrn
}

One drawback with this approach is that batch updating the member list becomes multiple DB operations and non-atomic. If the list is provided by an external metadata provider via MCEs, this also means that multiple MCEs will be required to update the list, instead of having one giant array in a single MCE.

M:N Relationships

When one side of the relation (M or N) has low cardinality, you can apply the same trick in [1:N Relationship] by creating the array on the side with low-cardinality. For example, assuming a user can only be part of a small number of groups but each group can have a large number of users, the following model will be more efficient than the reverse.

record Membership {
groups: array[GroupUrn]
}

When both M and N are of high cardinality (e.g. millions of users, each belongs to million of groups), the only way to store such relationships efficiently is by creating a new "Mapping Entity" with a single aspect like this

record UserGroupMap {
user: UserUrn
group: GroupUrn
}

This means that the relationship now can only be created & updated at a single source-destination pair granularity.

- + \ No newline at end of file diff --git a/docs/advanced/mcp-mcl/index.html b/docs/advanced/mcp-mcl/index.html index e9a5b4b4da539..91be941d57ae8 100644 --- a/docs/advanced/mcp-mcl/index.html +++ b/docs/advanced/mcp-mcl/index.html @@ -8,13 +8,13 @@ - +

MetadataChangeProposal & MetadataChangeLog Events

Overview & Vision

As of release v0.8.7, two new important event streams have been introduced: MetadataChangeProposal & MetadataChangeLog. These topics serve as a more generic (and more appropriately named) versions of the classic MetadataChangeEvent and MetadataAuditEvent events, used for a) proposing and b) logging changes to the DataHub Metadata Graph.

With these events, we move towards a more generic world, in which Metadata models are not strongly-typed parts of the event schemas themselves. This provides flexibility, allowing for the core models comprising the Metadata Graph to be added and changed dynamically, without requiring structural updates to Kafka or REST API schemas used for ingesting and serving Metadata.

Moreover, we've focused in on the "aspect" as the atomic unit of write in DataHub. MetadataChangeProposal & MetadataChangeLog with carry only a single aspect in their payload, as opposed to the list of aspects carried by today's MCE & MAE. This more accurately reflects the atomicity contract of the metadata model, hopefully lessening confusion about transactional guarantees for multi-aspect writes in addition to making it simpler to tune into the metadata changes a consumer cares about.

Making these events more generic does not come for free; we give up some in the form of Restli and Kafka-native schema validation and defer this responsibility to DataHub itself, who is the sole enforcer of the graph model contracts. Additionally, we add an extra step to unbundling the actual metadata by requiring a double-deserialization: that of the event / response body itself and another of the nested Metadata aspect.

To mitigate these downsides, we are committed to providing cross-language client libraries capable of doing the hard work for you. We intend to publish these as strongly-typed artifacts generated from the "default" model set DataHub ships with. This stands in addition to an initiative to introduce an OpenAPI layer in DataHub's backend (gms) which would provide a strongly typed model.

Ultimately, we intend to realize a state in which the Entities and Aspect schemas can be altered without requiring generated code and without maintaining a single mega-model schema (looking at you, Snapshot.pdl). The intention is that changes to the metadata model become even easier than they are today.

Modeling

A Metadata Change Proposal is defined (in PDL) as follows

record MetadataChangeProposal {

/**
* Kafka audit header. See go/kafkaauditheader for more info.
*/
auditHeader: optional KafkaAuditHeader

/**
* Type of the entity being written to
*/
entityType: string

/**
* Urn of the entity being written
**/
entityUrn: optional Urn,

/**
* Key aspect of the entity being written
*/
entityKeyAspect: optional GenericAspect

/**
* Type of change being proposed
*/
changeType: ChangeType

/**
* Aspect of the entity being written to
* Not filling this out implies that the writer wants to affect the entire entity
* Note: This is only valid for CREATE and DELETE operations.
**/
aspectName: optional string

aspect: optional GenericAspect

/**
* A string->string map of custom properties that one might want to attach to an event
**/
systemMetadata: optional SystemMetadata

}

Each proposal comprises of the following:

  1. entityType

    Refers to the type of the entity e.g. dataset, chart

  2. entityUrn

    Urn of the entity being updated. Note, exactly one of entityUrn or entityKeyAspect must be filled out to correctly identify an entity.

  3. entityKeyAspect

    Key aspect of the entity. Instead of having a string URN, we will support identifying entities by their key aspect structs. Note, this is not supported as of now.

  4. changeType

    Type of change you are proposing: one of

    • UPSERT: Insert if not exists, update otherwise
    • CREATE: Insert if not exists, fail otherwise
    • UPDATE: Update if exists, fail otherwise
    • DELETE: Delete
    • PATCH: Patch the aspect instead of doing a full replace

    Only UPSERT is supported as of now.

  5. aspectName

    Name of the aspect. Must match the name in the "@Aspect" annotation.

  6. aspect

    To support strongly typed aspects, without having to keep track of a union of all existing aspects, we introduced a new object called GenericAspect.

    record GenericAspect {
    value: bytes
    contentType: string
    }

    It contains the type of serialization and the serialized value. Note, currently we only support "application/json" as contentType but will be adding more forms of serialization in the future. Validation of the serialized object happens in GMS against the schema matching the aspectName.

  7. systemMetadata

    Extra metadata about the proposal like run_id or updated timestamp.

GMS processes the proposal and produces the Metadata Change Log, which looks like this.

record MetadataChangeLog includes MetadataChangeProposal {

previousAspectValue: optional GenericAspect

previousSystemMetadata: optional SystemMetadata

}

It includes all fields in the proposal, but also has the previous version of the aspect value and system metadata. This allows the MCL processor to know the previous value before deciding to update all indices.

Topics

Following the change in our event models, we introduced 4 new topics. The old topics will get deprecated as we fully migrate to this model.

  1. MetadataChangeProposal_v1, FailedMetadataChangeProposal_v1

    Analogous to the MCE topic, proposals that get produced into the MetadataChangeProposal_v1 topic, will get ingested to GMS asynchronously, and any failed ingestion will produce a failed MCP in the FailedMetadataChangeProposal_v1 topic.

  1. MetadataChangeLog_Versioned_v1

    Analogous to the MAE topic, MCLs for versioned aspects will get produced into this topic. Since versioned aspects have a source of truth that can be separately backed up, the retention of this topic is short (by default 7 days). Note both this and the next topic are consumed by the same MCL processor.

  1. MetadataChangeLog_Timeseries_v1

    Analogous to the MAE topics, MCLs for timeseries aspects will get produced into this topic. Since timeseries aspects do not have a source of truth, but rather gets ingested straight to elasticsearch, we set the retention of this topic to be longer (90 days). You can backup timeseries aspect by replaying this topic.

Configuration

With MetadataChangeProposal and MetadataChangeLog, we will introduce a new mechanism for configuring the association between Metadata Entities & Aspects. Specifically, the Snapshot.pdl model will no longer encode this information by way of Rest.li union. Instead, a more explicit yaml file will provide these links. This file will be leveraged at runtime to construct the in-memory Entity Registry which contains the global Metadata schema along with some additional metadata.

An example of the configuration file that will be used for MCP & MCL, which defines a "dataset" entity that is associated with to two aspects: "datasetKey" and "datasetProfile".

# entity-registry.yml

entities:
- name: dataset
keyAspect: datasetKey
aspects:
- datasetProfile
- + \ No newline at end of file diff --git a/docs/advanced/monitoring/index.html b/docs/advanced/monitoring/index.html index 042a3adfb754e..04c541907c56c 100644 --- a/docs/advanced/monitoring/index.html +++ b/docs/advanced/monitoring/index.html @@ -8,7 +8,7 @@ - + @@ -51,7 +51,7 @@ For instance,

docker-compose \
-f quickstart/docker-compose.quickstart.yml \
-f monitoring/docker-compose.monitoring.yml \
pull && \
docker-compose -p datahub \
-f quickstart/docker-compose.quickstart.yml \
-f monitoring/docker-compose.monitoring.yml \
up

We set up quickstart.sh, dev.sh, and dev-without-neo4j.sh to add the above docker-compose when MONITORING=true. For instance MONITORING=true ./docker/quickstart.sh will add the correct env variables to start collecting traces and metrics, and also deploy Jaeger, Prometheus, and Grafana. We will soon support this as a flag during quickstart.

- + \ No newline at end of file diff --git a/docs/advanced/no-code-modeling/index.html b/docs/advanced/no-code-modeling/index.html index 4473a74d2c15d..f686499b648b8 100644 --- a/docs/advanced/no-code-modeling/index.html +++ b/docs/advanced/no-code-modeling/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ a per-entity basis, including

  • Rest.li Resources
  • Index Builders
  • Graph Builders
  • Local, Search, Browse, Graph DAOs
  • Clients
  • Browse Path Logic

along with simplifying the number of raw data models that need defined, including

  • Rest.li Resource Models
  • Search Document Models
  • Relationship Models
  • Urns + their java classes

From an architectural PoV, we will move from a before that looks something like this:

no-code-before

to an after that looks like this

no-code-after

That is, a move away from patterns of strong-typing-everywhere to a more generic + flexible world.

How will we do it?

We will accomplish this by building the following:

  1. Set of custom annotations to permit declarative entity, search, graph configurations
    • @Entity & @Aspect
    • @Searchable
    • @Relationship
  2. Entity Registry: In-memory structures for representing, storing & serving metadata associated with a particular Entity, including search and relationship configurations.
  3. Generic Entity, Search, Graph Service classes: Replaces traditional strongly-typed DAOs with flexible, pluggable APIs that can be used for CRUD, search, and graph across all entities.
  4. Generic Rest.li Resources:
    • 1 permitting reading, writing, searching, autocompleting, and browsing arbitrary entities
    • 1 permitting reading of arbitrary entity-entity relationship edges
  5. Generic Search Index Builder: Given a MAE and a specification of the Search Configuration for an entity, updates the search index.
  6. Generic Graph Index Builder: Given a MAE and a specification of the Relationship Configuration for an entity, updates the graph index.
  7. Generic Index + Mappings Builder: Dynamically generates index mappings and creates indices on the fly.
  8. Introduce of special aspects to address other imperative code requirements
    • BrowsePaths Aspect: Include an aspect to permit customization of the indexed browse paths.
    • Key aspects: Include "virtual" aspects for representing the fields that uniquely identify an Entity for easy reading by clients of DataHub.

Final Developer Experience: Defining an Entity

We will outline what the experience of adding a new Entity should look like. We will imagine we want to define a "Service" entity representing online microservices.

Step 1. Add aspects

ServiceKey.pdl

namespace com.linkedin.metadata.key

/**
* Key for a Service
*/
@Aspect = {
"name": "serviceKey"
}
record ServiceKey {
/**
* Name of the service
*/
@Searchable = {
"fieldType": "WORD_GRAM",
"enableAutocomplete": true
}
name: string
}

ServiceInfo.pdl

namespace com.linkedin.service

import com.linkedin.common.Urn

/**
* Properties associated with a Tag
*/
@Aspect = {
"name": "serviceInfo"
}
record ServiceInfo {

/**
* Description of the service
*/
@Searchable = {}
description: string

/**
* The owners of the
*/
@Relationship = {
"name": "OwnedBy",
"entityTypes": ["corpUser"]
}
owner: Urn
}

Step 2. Add aspect union.

ServiceAspect.pdl

namespace com.linkedin.metadata.aspect

import com.linkedin.metadata.key.ServiceKey
import com.linkedin.service.ServiceInfo
import com.linkedin.common.BrowsePaths

/**
* Service Info
*/
typeref ServiceAspect = union[
ServiceKey,
ServiceInfo,
BrowsePaths
]

Step 3. Add Snapshot model.

ServiceSnapshot.pdl

namespace com.linkedin.metadata.snapshot

import com.linkedin.common.Urn
import com.linkedin.metadata.aspect.ServiceAspect

@Entity = {
"name": "service",
"keyAspect": "serviceKey"
}
record ServiceSnapshot {

/**
* Urn for the service
*/
urn: Urn

/**
* The list of service aspects
*/
aspects: array[ServiceAspect]
}

Step 4. Update Snapshot union.

Snapshot.pdl

namespace com.linkedin.metadata.snapshot

/**
* A union of all supported metadata snapshot types.
*/
typeref Snapshot = union[
...
ServiceSnapshot
]

Interacting with New Entity

  1. Write Entity
curl 'http://localhost:8080/entities?action=ingest' -X POST -H 'X-RestLi-Protocol-Version:2.0.0' --data '{
"entity":{
"value":{
"com.linkedin.metadata.snapshot.ServiceSnapshot":{
"urn": "urn:li:service:mydemoservice",
"aspects":[
{
"com.linkedin.service.ServiceInfo":{
"description":"My demo service",
"owner": "urn:li:corpuser:user1"
}
},
{
"com.linkedin.common.BrowsePaths":{
"paths":[
"/my/custom/browse/path1",
"/my/custom/browse/path2"
]
}
}
]
}
}
}
}'
  1. Read Entity
curl 'http://localhost:8080/entities/urn%3Ali%3Aservice%3Amydemoservice' -H 'X-RestLi-Protocol-Version:2.0.0'
  1. Search Entity
curl --location --request POST 'http://localhost:8080/entities?action=search' \
--header 'X-RestLi-Protocol-Version: 2.0.0' \
--header 'Content-Type: application/json' \
--data-raw '{
"input": "My demo",
"entity": "service",
"start": 0,
"count": 10
}'
  1. Autocomplete
curl --location --request POST 'http://localhost:8080/entities?action=autocomplete' \
--header 'X-RestLi-Protocol-Version: 2.0.0' \
--header 'Content-Type: application/json' \
--data-raw '{
"query": "mydem",
"entity": "service",
"limit": 10
}'
  1. Browse
curl --location --request POST 'http://localhost:8080/entities?action=browse' \
--header 'X-RestLi-Protocol-Version: 2.0.0' \
--header 'Content-Type: application/json' \
--data-raw '{
"path": "/my/custom/browse",
"entity": "service",
"start": 0,
"limit": 10
}'
  1. Relationships
curl --location --request GET 'http://localhost:8080/relationships?direction=INCOMING&urn=urn%3Ali%3Acorpuser%3Auser1&types=OwnedBy' \
--header 'X-RestLi-Protocol-Version: 2.0.0'
- + \ No newline at end of file diff --git a/docs/advanced/no-code-upgrade/index.html b/docs/advanced/no-code-upgrade/index.html index cf014e1eaeec7..9462a9cb7c8e5 100644 --- a/docs/advanced/no-code-upgrade/index.html +++ b/docs/advanced/no-code-upgrade/index.html @@ -8,7 +8,7 @@ - + @@ -43,7 +43,7 @@ join #release-0_8_0 channel and reach out to us if you find trouble with the upgrade or have feedback on the process. We will work closely to make sure you can continue to operate DataHub smoothly.

- + \ No newline at end of file diff --git a/docs/advanced/partial-update/index.html b/docs/advanced/partial-update/index.html index aa25d6417fb3f..ceeba9f1a869f 100644 --- a/docs/advanced/partial-update/index.html +++ b/docs/advanced/partial-update/index.html @@ -8,13 +8,13 @@ - +
- + \ No newline at end of file diff --git a/docs/advanced/patch/index.html b/docs/advanced/patch/index.html index 3a475033fcd01..69581b72f2f3f 100644 --- a/docs/advanced/patch/index.html +++ b/docs/advanced/patch/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

But First, Semantics: Upsert versus Patch

Why Would You Use Patch

By default, most of the SDK tutorials and API-s involve applying full upserts at the aspect level. This means that typically, when you want to change one field within an aspect without modifying others, you need to do a read-modify-write to not overwrite existing fields. To support these scenarios, DataHub supports PATCH based operations so that targeted changes to single fields or values within arrays of fields are possible without impacting other existing metadata.

note

Currently, PATCH support is only available for a selected set of aspects, so before pinning your hopes on using PATCH as a way to make modifications to aspect values, confirm whether your aspect supports PATCH semantics. The complete list of Aspects that are supported are maintained here. In the near future, we do have plans to automatically support PATCH semantics for aspects by default.

How To Use Patch

Examples for using Patch are sprinkled throughout the API guides. Here's how to find the appropriate classes for the language for your choice.

The Python Patch builders are entity-oriented and located in the metadata-ingestion module and located in the datahub.specific module.

Here are a few illustrative examples using the Python Patch builders:

Add Properties to Dataset

# Inlined from /metadata-ingestion/examples/library/dataset_add_properties.py
import logging
from typing import Union

from datahub.configuration.kafka import KafkaProducerConnectionConfig
from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig
from datahub.emitter.mce_builder import make_dataset_urn
from datahub.emitter.rest_emitter import DataHubRestEmitter
from datahub.specific.dataset import DatasetPatchBuilder

log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)


# Get an emitter, either REST or Kafka, this example shows you both
def get_emitter() -> Union[DataHubRestEmitter, DatahubKafkaEmitter]:
USE_REST_EMITTER = True
if USE_REST_EMITTER:
gms_endpoint = "http://localhost:8080"
return DataHubRestEmitter(gms_server=gms_endpoint)
else:
kafka_server = "localhost:9092"
schema_registry_url = "http://localhost:8081"
return DatahubKafkaEmitter(
config=KafkaEmitterConfig(
connection=KafkaProducerConnectionConfig(
bootstrap=kafka_server, schema_registry_url=schema_registry_url
)
)
)


dataset_urn = make_dataset_urn(platform="hive", name="fct_users_created", env="PROD")

with get_emitter() as emitter:
for patch_mcp in (
DatasetPatchBuilder(dataset_urn)
.add_custom_property("cluster_name", "datahubproject.acryl.io")
.add_custom_property("retention_time", "2 years")
.build()
):
emitter.emit(patch_mcp)


log.info(f"Added cluster_name, retention_time properties to dataset {dataset_urn}")

- + \ No newline at end of file diff --git a/docs/advanced/pdl-best-practices/index.html b/docs/advanced/pdl-best-practices/index.html index fce4db158c71d..88a37ec288482 100644 --- a/docs/advanced/pdl-best-practices/index.html +++ b/docs/advanced/pdl-best-practices/index.html @@ -8,13 +8,13 @@ - +
- + \ No newline at end of file diff --git a/docs/api/datahub-apis/index.html b/docs/api/datahub-apis/index.html index a232540d5200b..0575a0100e230 100644 --- a/docs/api/datahub-apis/index.html +++ b/docs/api/datahub-apis/index.html @@ -8,14 +8,14 @@ - +

Which DataHub API is for me?

DataHub supplys several APIs to manipulate metadata on the platform. These are our most-to-least recommended approaches:

  • Our most recommended tools for extending and customizing the behavior of your DataHub instance are our SDKs in Python and Java.
  • If you'd like to customize the DataHub client or roll your own; the GraphQL API is our what powers our frontend. We figure if it's good enough for us, it's good enough for everyone! If graphql doesn't cover everything in your usecase, drop into our slack and let us know how we can improve it!
  • If you are less familiar with graphql and would rather use OpenAPI, we offer OpenAPI endpoints that allow you to produce metadata events and query metadata.
  • Finally, if you're a brave soul and know exactly what you are doing... are you sure you don't just want to use the SDK directly? If you insist, the Rest.li API is a much more powerful, low level API intended only for advanced users.

Python and Java SDK

We offer an SDK for both Python and Java that provide full functionality when it comes to CRUD operations and any complex functionality you may want to build into DataHub.

Get started with the Python SDKGet started with the Java SDK

GraphQL API

The graphql API serves as the primary public API for the platform. It can be used to fetch and update metadata programatically in the language of your choice. Intended as a higher-level API that simplifies the most common operations.

Get started with the GraphQL API

OpenAPI

For developers who prefer OpenAPI to GraphQL for programmatic operations. Provides lower-level API access to the entire DataHub metadata model for writes, reads and queries.

Get started with OpenAPI

Rest.li API

caution

The Rest.li API is intended only for advanced users. If you're just getting started with DataHub, we recommend the GraphQL API

The Rest.li API represents the underlying persistence layer, and exposes the raw PDL models used in storage. Under the hood, it powers the GraphQL API. Aside from that, it is also used for system-specific ingestion of metadata, being used by the Metadata Ingestion Framework for pushing metadata into DataHub directly. For all intents and purposes, the Rest.li API is considered system-internal, meaning DataHub components are the only ones to consume this API directly.

Get started with our Rest.li API

DataHub API Comparison

DataHub supports several APIs, each with its own unique usage and format. Here's an overview of what each API can do.

Last Updated : Apr 8 2023

FeatureGraphQLPython SDKOpenAPI
Create a dataset🚫[Guide]
Delete a dataset (Soft delete)[Guide][Guide]
Delete a dataset (Hard delele)🚫[Guide]
Search a dataset
Create a tag[Guide][Guide]
Read a tag[Guide][Guide]
Add tags to a dataset[Guide][Guide]
Add tags to a column of a dataset[Guide][Guide]
Remove tags from a dataset[Guide][Guide]
Create glossary terms[Guide][Guide]
Read terms from a dataset[Guide][Guide]
Add terms to a column of a dataset[Guide][Guide]
Add terms to a dataset[Guide][Guide]
Create domains[Guide][Guide]
Read domains[Guide][Guide]
Add domains to a dataset[Guide][Guide]
Remove domains from a dataset[Guide][Guide]
Crate users and groups[Guide][Guide]
Read owners of a dataset[Guide][Guide]
Add owner to a dataset[Guide][Guide]
Remove owner from a dataset[Guide][Guide]
Add lineage[Guide][Guide]
Add column level(Fine Grained) lineage🚫
Add documentation(description) to a column of a dataset[Guide][Guide]
Add documentation(description) to a dataset[Guide][Guide]
Add / Remove / Replace custom properties on a dataset🚫 [Guide][Guide]
- + \ No newline at end of file diff --git a/docs/api/graphql/getting-started/index.html b/docs/api/graphql/getting-started/index.html index ce34a21266a26..86420580a225c 100644 --- a/docs/api/graphql/getting-started/index.html +++ b/docs/api/graphql/getting-started/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ To verify that no error has returned after making a GraphQL request, make sure you check both the data and errors fields that are returned.

To catch a GraphQL error, simply check the errors field side the GraphQL response. It will contain a message, a path, and a set of extensions which contain a standard error code.

{
"errors": [
{
"message": "Failed to change ownership for resource urn:li:dataFlow:(airflow,dag_abc,PROD). Expected a corp user urn.",
"locations": [
{
"line": 1,
"column": 22
}
],
"path": ["addOwners"],
"extensions": {
"code": 400,
"type": "BAD_REQUEST",
"classification": "DataFetchingException"
}
}
]
}

With the following error codes officially supported:

CodeTypeDescription
400BAD_REQUESTThe query or mutation was malformed.
403UNAUTHORIZEDThe current actor is not authorized to perform the requested action.
404NOT_FOUNDThe resource is not found.
500SERVER_ERRORAn internal error has occurred. Check your server logs or contact your DataHub administrator.

Visit our Slack channel to ask questions, tell us what we can do better, & make requests for what you'd like to see in the future. Or just stop by to say 'Hi'.

- + \ No newline at end of file diff --git a/docs/api/graphql/graphql-endpoint-development/index.html b/docs/api/graphql/graphql-endpoint-development/index.html index ab83e5175606a..62560b8c83bb7 100644 --- a/docs/api/graphql/graphql-endpoint-development/index.html +++ b/docs/api/graphql/graphql-endpoint-development/index.html @@ -8,13 +8,13 @@ - +

Creating a New GraphQL Endpoint in GMS

This guide will walk you through how to add a new GraphQL endpoint in GMS.

listOwnershipTypes example: The listOwnershipTypes endpoint will be used as an example. This endpoint was added in this commit which can be used as reference.

GraphQL API changes

Adding an endpoint definition

GraphQL endpoint definitions for GMS are located in the datahub-graphql-core/src/main/resources/ directory. New endpoints can be added to the relevant file, e.g. entity.graphql for entity management endpoints, search.graphql for search-related endpoints, etc. Or, for totally new features, new files can be added to this directory.

listOwnershipTypes example: The endpoint was added in the entity.graphql file since ownership types are being added as an entity.

Query or Mutation?

Read-only functionality can go in the Query section, while mutations go in the Mutation section. The definition for new functionality can go in the appropriate section depending on the use case.

listOwnershipTypes example: The endpoint was added in the type Query section because it is read-only functionality. In the same commit, createOwnershipType, updateOwnershipType, and deleteOwnershipType were added in the type Mutation section as these are operations that perform writes.

Input and Output Types

If the new endpoint requires more than a few inputs or outputs, a struct can be created in the same file to collect these fields.

listOwnershipTypes example: Since this functionality takes and returns quite a few parameters, input ListOwnershipTypesInput and type ListOwnershipTypesResult were added to represent the input and output structs. In the same PR, no input and output structs were added for deleteOwnershipType since the inputs and output are primitive types.

Building your changes

After adding the new endpoint, and new structs if necessary, building the project will generate the Java classes for the new code that can be used in making the server changes. Build the datahub project to make the new symbols available.

listOwnershipTypes example: The build step will make the new types ListOwnershipTypesInput and ListOwnershipTypesResult available in a Java IDE.

Java Server changes

We turn now to developing the server-side functionality for the new endpoint.

Adding a resolver

GraphQL queries are handled by Resolver classes located in the datahub-graphql-core/src/main/java/com/linkedin/datahub/graphql/resolvers/ directory. Resolvers are classes that implement the DataFetcher<T> interface where T is CompletableFuture<ClassForResultOfTheEndpoint>.This interface provides a get method that takes in a DataFetchingEnvironment and returns a CompletableFuture of the endpoint return type. The resolver can contain any services needed to resolve the endpoint, and use them to compute the result.

listOwnershipTypes example: The ListOwnershipTypesResolver class implements DataFetcher<CompletableFuture<ListOwnershipTypesResult>> since this is the return type of the endpoint. It contains an EntityClient instance variable to handle the ownership type fetching.

Often the structure of the Resolver classes is to call a service to receive a response, then use a method to transform the result from the service into the GraphQL type returned.

listOwnershipTypes example: The ListOwnershipTypesResolver calls the search method in its EntityClient to get the ownership types, then calls the defined mapUnresolvedOwnershipTypes function to transform the response into a ListOwnershipTypesResult.

Tip: Resolver classes can be tested with unit tests!

listOwnershipTypes example: The reference commit adds the ListOwnershipTypeResolverTest class.

Adding the resolver to the GMS server

The main GMS server is located in GmsGraphQLEngine.java. To hook up the resolver to handle the endpoint, find the relevant section based on if the new enpoint is a Query or a Mutation and add the resolver as the dataFetcher for the name of the endpoint.

listOwnershipTypes example: The following line of code is added in GmsGraphQLEngine: .dataFetcher("listOwnershipTypes", new ListOwnershipTypesResolver(this.entityClient)). This uses the ListOwnershipTypes resolver to handle queries for listOwnershipTypes endpoint.

Testing your change

In addition to unit tests for your resolver mentioned above, GraphQL functionality in datahub can be tested using the built-in GraphiQL endpoint. The endpoint is located at localhost:8080/api/graphiql on Quickstart and at the equivalent URL for a production instance. This provides fast debug-ability for querying GraphQL. See How to Set Up GraphQL for more information

- + \ No newline at end of file diff --git a/docs/api/graphql/how-to-set-up-graphql/index.html b/docs/api/graphql/how-to-set-up-graphql/index.html index 35cf70a9ba398..d13d0b08ef496 100644 --- a/docs/api/graphql/how-to-set-up-graphql/index.html +++ b/docs/api/graphql/how-to-set-up-graphql/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ providing an Authorization header containing a Bearer token. The header should take the following format:

Authorization: Bearer <access-token>

Authorization for actions exposed by the GraphQL endpoint will be performed based on the actor making the request. For Personal Access Tokens, the token will carry the user's privileges. Please refer to Access Token Management for more information.

What's Next?

Now that you are ready with GraphQL, how about browsing through some use cases? Please refer to Getting Started With GraphQL for more information.

- + \ No newline at end of file diff --git a/docs/api/graphql/overview/index.html b/docs/api/graphql/overview/index.html index 796e0b3990527..b88ae368ad49e 100644 --- a/docs/api/graphql/overview/index.html +++ b/docs/api/graphql/overview/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ validated against this schema. You can use these docs to understand data that is available for retrieval and operations that may be performed using the API.

Visit our Slack channel to ask questions, tell us what we can do better, & make requests for what you'd like to see in the future. Or just stop by to say 'Hi'.

- + \ No newline at end of file diff --git a/docs/api/graphql/token-management/index.html b/docs/api/graphql/token-management/index.html index 1f837f4924dbc..cf9da622a1008 100644 --- a/docs/api/graphql/token-management/index.html +++ b/docs/api/graphql/token-management/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ which will allow you to identify said access token later on.

For example, to generate an access token for the datahub corp user, you can issue the following graphql Query:

As GraphQL

mutation {
createAccessToken(
input: {
type: PERSONAL
actorUrn: "urn:li:corpuser:datahub"
duration: ONE_HOUR
name: "my personal token"
}
) {
accessToken
metadata {
id
name
description
}
}
}

As CURL

curl --location --request POST 'http://localhost:8080/api/graphql' \
--header 'X-DataHub-Actor: urn:li:corpuser:datahub' \
--header 'Content-Type: application/json' \
--data-raw '{ "query":"mutation { createAccessToken(input: { type: PERSONAL, actorUrn: \"urn:li:corpuser:datahub\", duration: ONE_HOUR, name: \"my personal token\" } ) { accessToken metadata { id name description} } }", "variables":{}}'

Listing Access Tokens

Listing tokens is a powerful endpoint that allows you to list the tokens owned by a particular user (ie. YOU). To list all tokens that you own, you must specify a filter with: {field: "actorUrn", value: "<your user urn>"} configuration.

As GraphQL

{
listAccessTokens(
input: {
start: 0
count: 100
filters: [{ field: "ownerUrn", value: "urn:li:corpuser:datahub" }]
}
) {
start
count
total
tokens {
urn
id
actorUrn
}
}
}

As CURL

curl --location --request POST 'http://localhost:8080/api/graphql' \
--header 'X-DataHub-Actor: urn:li:corpuser:datahub' \
--header 'Content-Type: application/json' \
--data-raw '{ "query":"{ listAccessTokens(input: {start: 0, count: 100, filters: [{field: \"ownerUrn\", value: \"urn:li:corpuser:datahub\"}]}) { start count total tokens {urn id actorUrn} } }", "variables":{}}'

Admin users can also list tokens owned by other users of the platform. To list tokens belonging to other users, you must have the Manage All Access Tokens Platform privilege.

As GraphQL

{
listAccessTokens(input: { start: 0, count: 100, filters: [] }) {
start
count
total
tokens {
urn
id
actorUrn
}
}
}

As CURL

curl --location --request POST 'http://localhost:8080/api/graphql' \
--header 'X-DataHub-Actor: urn:li:corpuser:datahub' \
--header 'Content-Type: application/json' \
--data-raw '{ "query":"{ listAccessTokens(input: {start: 0, count: 100, filters: []}) { start count total tokens {urn id actorUrn} } }", "variables":{}}'

Other filters besides actorUrn=<some value> are possible. You can filter by property in the DataHubAccessTokenInfo aspect which you can find in the Entities documentation.

Revoking Access Tokens

To revoke an existing access token, you can use the revokeAccessToken mutation.

As GraphQL

mutation {
revokeAccessToken(tokenId: "HnMJylxuowJ1FKN74BbGogLvXCS4w+fsd3MZdI35+8A=")
}
curl --location --request POST 'http://localhost:8080/api/graphql' \
--header 'X-DataHub-Actor: urn:li:corpuser:datahub' \
--header 'Content-Type: application/json' \
--data-raw '{"query":"mutation {revokeAccessToken(tokenId: \"HnMJylxuowJ1FKN74BbGogLvXCS4w+fsd3MZdI35+8A=\")}","variables":{}}}'

This endpoint will return a boolean detailing whether the operation was successful. In case of failure, an error message will appear explaining what went wrong.

Visit our Slack channel to ask questions, tell us what we can do better, & make requests for what you'd like to see in the future. Or just stop by to say 'Hi'.

- + \ No newline at end of file diff --git a/docs/api/openapi/openapi-usage-guide/index.html b/docs/api/openapi/openapi-usage-guide/index.html index 5c82518cda578..2edb9dff86a70 100644 --- a/docs/api/openapi/openapi-usage-guide/index.html +++ b/docs/api/openapi/openapi-usage-guide/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ The raw forms can be fed into codegen systems to generate client side code in the language of your choice that support the OpenAPI format. We have noticed varying degrees of maturity with different languages in these codegen systems so some may require customizations to be fully compatible.

The OpenAPI UI includes explorable schemas for request and response objects that are fully documented. The models used in the OpenAPI UI are all autogenerated at build time from the PDL models to JSON Schema compatible Java Models.

Understanding the OpenAPI endpoints

While the full OpenAPI spec is always available at GMS_SERVER_HOST:GMS_PORT/openapi/swagger-ui/index.html, here's a quick overview of the main OpenAPI endpoints and their purpose.

Entities (/entities)

The entities endpoints are intended for reads and writes to the metadata graph. The entire DataHub metadata model is available for you to write to (as entity, aspect pairs) or to read an individual entity's metadata from. See examples below.

Relationships (/relationships)

The relationships endpoints are intended for you to query the graph, to navigate relationships from one entity to others. See examples below.

Timeline (/timeline)

The timeline endpoints are intended for querying the versioned history of a given entity over time. For example, you can query a dataset for all schema changes that have happened to it over time, or all documentation changes that have happened to it. See this guide for more details.

Platform (/platform)

Even lower-level API-s that allow you to write metadata events into the DataHub platform using a standard format.

Example Requests

Entities (/entities) endpoint

POST
curl --location --request POST 'localhost:8080/openapi/entities/v1/' \
--header 'Content-Type: application/json' \
--header 'Accept: application/json' \
--header 'Authorization: Bearer eyJhbGciOiJIUzI1NiJ9.eyJhY3RvclR5cGUiOiJVU0VSIiwiYWN0b3JJZCI6ImRhdGFodWIiLCJ0eXBlIjoiUEVSU09OQUwiLCJ2ZXJzaW9uIjoiMSIsImV4cCI6MTY1MDY2MDY1NSwianRpIjoiM2E4ZDY3ZTItOTM5Yi00NTY3LWE0MjYtZDdlMDA1ZGU3NjJjIiwic3ViIjoiZGF0YWh1YiIsImlzcyI6ImRhdGFodWItbWV0YWRhdGEtc2VydmljZSJ9.pp_vW2u1tiiTT7U0nDF2EQdcayOMB8jatiOA8Je4JJA' \
--data-raw '[
{
"aspect": {
"__type": "SchemaMetadata",
"schemaName": "SampleHdfsSchema",
"platform": "urn:li:dataPlatform:platform",
"platformSchema": {
"__type": "MySqlDDL",
"tableSchema": "schema"
},
"version": 0,
"created": {
"time": 1621882982738,
"actor": "urn:li:corpuser:etl",
"impersonator": "urn:li:corpuser:jdoe"
},
"lastModified": {
"time": 1621882982738,
"actor": "urn:li:corpuser:etl",
"impersonator": "urn:li:corpuser:jdoe"
},
"hash": "",
"fields": [
{
"fieldPath": "county_fips_codefg",
"jsonPath": "null",
"nullable": true,
"description": "null",
"type": {
"type": {
"__type": "StringType"
}
},
"nativeDataType": "String()",
"recursive": false
},
{
"fieldPath": "county_name",
"jsonPath": "null",
"nullable": true,
"description": "null",
"type": {
"type": {
"__type": "StringType"
}
},
"nativeDataType": "String()",
"recursive": false
}
]
},
"entityType": "dataset",
"entityUrn": "urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)"
}
]'
GET
curl --location --request GET 'localhost:8080/openapi/entities/v1/latest?urns=urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)&aspectNames=schemaMetadata' \
--header 'Accept: application/json' \
--header 'Authorization: Bearer eyJhbGciOiJIUzI1NiJ9.eyJhY3RvclR5cGUiOiJVU0VSIiwiYWN0b3JJZCI6ImRhdGFodWIiLCJ0eXBlIjoiUEVSU09OQUwiLCJ2ZXJzaW9uIjoiMSIsImV4cCI6MTY1MDY2MDY1NSwianRpIjoiM2E4ZDY3ZTItOTM5Yi00NTY3LWE0MjYtZDdlMDA1ZGU3NjJjIiwic3ViIjoiZGF0YWh1YiIsImlzcyI6ImRhdGFodWItbWV0YWRhdGEtc2VydmljZSJ9.pp_vW2u1tiiTT7U0nDF2EQdcayOMB8jatiOA8Je4JJA'
DELETE
curl --location --request DELETE 'localhost:8080/openapi/entities/v1/?urns=urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)&soft=true' \
--header 'Accept: application/json' \
--header 'Authorization: Bearer eyJhbGciOiJIUzI1NiJ9.eyJhY3RvclR5cGUiOiJVU0VSIiwiYWN0b3JJZCI6ImRhdGFodWIiLCJ0eXBlIjoiUEVSU09OQUwiLCJ2ZXJzaW9uIjoiMSIsImV4cCI6MTY1MDY2MDY1NSwianRpIjoiM2E4ZDY3ZTItOTM5Yi00NTY3LWE0MjYtZDdlMDA1ZGU3NjJjIiwic3ViIjoiZGF0YWh1YiIsImlzcyI6ImRhdGFodWItbWV0YWRhdGEtc2VydmljZSJ9.pp_vW2u1tiiTT7U0nDF2EQdcayOMB8jatiOA8Je4JJA'

Postman Collection

Collection includes a POST, GET, and DELETE for a single entity with a SchemaMetadata aspect

{
"info": {
"_postman_id": "87b7401c-a5dc-47e4-90b4-90fe876d6c28",
"name": "DataHub OpenAPI",
"description": "A description",
"schema": "https://schema.getpostman.com/json/collection/v2.1.0/collection.json"
},
"item": [
{
"name": "entities/v1",
"item": [
{
"name": "post Entities 1",
"request": {
"method": "POST",
"header": [
{
"key": "Content-Type",
"value": "application/json"
},
{
"key": "Accept",
"value": "application/json"
}
],
"body": {
"mode": "raw",
"raw": "[\n {\n \"aspect\": {\n \"__type\": \"SchemaMetadata\",\n \"schemaName\": \"SampleHdfsSchema\",\n \"platform\": \"urn:li:dataPlatform:platform\",\n \"platformSchema\": {\n \"__type\": \"MySqlDDL\",\n \"tableSchema\": \"schema\"\n },\n \"version\": 0,\n \"created\": {\n \"time\": 1621882982738,\n \"actor\": \"urn:li:corpuser:etl\",\n \"impersonator\": \"urn:li:corpuser:jdoe\"\n },\n \"lastModified\": {\n \"time\": 1621882982738,\n \"actor\": \"urn:li:corpuser:etl\",\n \"impersonator\": \"urn:li:corpuser:jdoe\"\n },\n \"hash\": \"\",\n \"fields\": [\n {\n \"fieldPath\": \"county_fips_codefg\",\n \"jsonPath\": \"null\",\n \"nullable\": true,\n \"description\": \"null\",\n \"type\": {\n \"type\": {\n \"__type\": \"StringType\"\n }\n },\n \"nativeDataType\": \"String()\",\n \"recursive\": false\n },\n {\n \"fieldPath\": \"county_name\",\n \"jsonPath\": \"null\",\n \"nullable\": true,\n \"description\": \"null\",\n \"type\": {\n \"type\": {\n \"__type\": \"StringType\"\n }\n },\n \"nativeDataType\": \"String()\",\n \"recursive\": false\n }\n ]\n },\n \"aspectName\": \"schemaMetadata\",\n \"entityType\": \"dataset\",\n \"entityUrn\": \"urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)\"\n }\n]",
"options": {
"raw": {
"language": "json"
}
}
},
"url": {
"raw": "{{baseUrl}}/openapi/entities/v1/",
"host": [
"{{baseUrl}}"
],
"path": [
"openapi",
"entities",
"v1",
""
]
}
},
"response": [
{
"name": "OK",
"originalRequest": {
"method": "POST",
"header": [],
"body": {
"mode": "raw",
"raw": "[\n {\n \"aspect\": {\n \"value\": \"<Error: Too many levels of nesting to fake this schema>\"\n },\n \"aspectName\": \"aliquip ipsum tempor\",\n \"entityType\": \"ut est\",\n \"entityUrn\": \"enim in nulla\",\n \"entityKeyAspect\": {\n \"value\": \"<Error: Too many levels of nesting to fake this schema>\"\n }\n },\n {\n \"aspect\": {\n \"value\": \"<Error: Too many levels of nesting to fake this schema>\"\n },\n \"aspectName\": \"ipsum id\",\n \"entityType\": \"deser\",\n \"entityUrn\": \"aliqua sit\",\n \"entityKeyAspect\": {\n \"value\": \"<Error: Too many levels of nesting to fake this schema>\"\n }\n }\n]",
"options": {
"raw": {
"language": "json"
}
}
},
"url": {
"raw": "{{baseUrl}}/entities/v1/",
"host": [
"{{baseUrl}}"
],
"path": [
"entities",
"v1",
""
]
}
},
"status": "OK",
"code": 200,
"_postman_previewlanguage": "json",
"header": [
{
"key": "Content-Type",
"value": "application/json"
}
],
"cookie": [],
"body": "[\n \"c\",\n \"labore dolor exercitation in\"\n]"
}
]
},
{
"name": "delete Entities",
"request": {
"method": "DELETE",
"header": [
{
"key": "Accept",
"value": "application/json"
}
],
"url": {
"raw": "{{baseUrl}}/openapi/entities/v1/?urns=urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)&soft=true",
"host": [
"{{baseUrl}}"
],
"path": [
"openapi",
"entities",
"v1",
""
],
"query": [
{
"key": "urns",
"value": "urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)",
"description": "(Required) A list of raw urn strings, only supports a single entity type per request."
},
{
"key": "urns",
"value": "labore dolor exercitation in",
"description": "(Required) A list of raw urn strings, only supports a single entity type per request.",
"disabled": true
},
{
"key": "soft",
"value": "true",
"description": "Determines whether the delete will be soft or hard, defaults to true for soft delete"
}
]
}
},
"response": [
{
"name": "OK",
"originalRequest": {
"method": "DELETE",
"header": [],
"url": {
"raw": "{{baseUrl}}/entities/v1/?urns=urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)&soft=true",
"host": [
"{{baseUrl}}"
],
"path": [
"entities",
"v1",
""
],
"query": [
{
"key": "urns",
"value": "urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)"
},
{
"key": "urns",
"value": "officia occaecat elit dolor",
"disabled": true
},
{
"key": "soft",
"value": "true"
}
]
}
},
"status": "OK",
"code": 200,
"_postman_previewlanguage": "json",
"header": [
{
"key": "Content-Type",
"value": "application/json"
}
],
"cookie": [],
"body": "[\n {\n \"rowsRolledBack\": [\n {\n \"urn\": \"urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)\"\n }\n ],\n \"rowsDeletedFromEntityDeletion\": 1\n }\n]"
}
]
},
{
"name": "get Entities",
"protocolProfileBehavior": {
"disableUrlEncoding": false
},
"request": {
"method": "GET",
"header": [
{
"key": "Accept",
"value": "application/json"
}
],
"url": {
"raw": "{{baseUrl}}/openapi/entities/v1/latest?urns=urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)&aspectNames=schemaMetadata",
"host": [
"{{baseUrl}}"
],
"path": [
"openapi",
"entities",
"v1",
"latest"
],
"query": [
{
"key": "urns",
"value": "urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)",
"description": "(Required) A list of raw urn strings, only supports a single entity type per request."
},
{
"key": "urns",
"value": "labore dolor exercitation in",
"description": "(Required) A list of raw urn strings, only supports a single entity type per request.",
"disabled": true
},
{
"key": "aspectNames",
"value": "schemaMetadata",
"description": "The list of aspect names to retrieve"
},
{
"key": "aspectNames",
"value": "labore dolor exercitation in",
"description": "The list of aspect names to retrieve",
"disabled": true
}
]
}
},
"response": [
{
"name": "OK",
"originalRequest": {
"method": "GET",
"header": [],
"url": {
"raw": "{{baseUrl}}/entities/v1/latest?urns=urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)&aspectNames=schemaMetadata",
"host": [
"{{baseUrl}}"
],
"path": [
"entities",
"v1",
"latest"
],
"query": [
{
"key": "urns",
"value": "non exercitation occaecat",
"disabled": true
},
{
"key": "urns",
"value": "urn:li:dataset:(urn:li:dataPlatform:platform,testSchemaIngest,PROD)"
},
{
"key": "aspectNames",
"value": "non exercitation occaecat",
"disabled": true
},
{
"key": "aspectNames",
"value": "schemaMetadata"
}
]
}
},
"status": "OK",
"code": 200,
"_postman_previewlanguage": "json",
"header": [
{
"key": "Content-Type",
"value": "application/json"
}
],
"cookie": [],
"body": "{\n \"responses\": {\n \"urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)\": {\n \"entityName\": \"dataset\",\n \"urn\": \"urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)\",\n \"aspects\": {\n \"datasetKey\": {\n \"name\": \"datasetKey\",\n \"type\": \"VERSIONED\",\n \"version\": 0,\n \"value\": {\n \"__type\": \"DatasetKey\",\n \"platform\": \"urn:li:dataPlatform:hive\",\n \"name\": \"SampleHiveDataset\",\n \"origin\": \"PROD\"\n },\n \"created\": {\n \"time\": 1650657843351,\n \"actor\": \"urn:li:corpuser:__datahub_system\"\n }\n },\n \"schemaMetadata\": {\n \"name\": \"schemaMetadata\",\n \"type\": \"VERSIONED\",\n \"version\": 0,\n \"value\": {\n \"__type\": \"SchemaMetadata\",\n \"schemaName\": \"SampleHiveSchema\",\n \"platform\": \"urn:li:dataPlatform:hive\",\n \"version\": 0,\n \"created\": {\n \"time\": 1581407189000,\n \"actor\": \"urn:li:corpuser:jdoe\"\n },\n \"lastModified\": {\n \"time\": 1581407189000,\n \"actor\": \"urn:li:corpuser:jdoe\"\n },\n \"hash\": \"\",\n \"platformSchema\": {\n \"__type\": \"KafkaSchema\",\n \"documentSchema\": \"{\\\"type\\\":\\\"record\\\",\\\"name\\\":\\\"SampleHiveSchema\\\",\\\"namespace\\\":\\\"com.linkedin.dataset\\\",\\\"doc\\\":\\\"Sample Hive dataset\\\",\\\"fields\\\":[{\\\"name\\\":\\\"field_foo\\\",\\\"type\\\":[\\\"string\\\"]},{\\\"name\\\":\\\"field_bar\\\",\\\"type\\\":[\\\"boolean\\\"]}]}\"\n },\n \"fields\": [\n {\n \"fieldPath\": \"field_foo\",\n \"nullable\": false,\n \"description\": \"Foo field description\",\n \"type\": {\n \"type\": {\n \"__type\": \"BooleanType\"\n }\n },\n \"nativeDataType\": \"varchar(100)\",\n \"recursive\": false,\n \"isPartOfKey\": true\n },\n {\n \"fieldPath\": \"field_bar\",\n \"nullable\": false,\n \"description\": \"Bar field description\",\n \"type\": {\n \"type\": {\n \"__type\": \"BooleanType\"\n }\n },\n \"nativeDataType\": \"boolean\",\n \"recursive\": false,\n \"isPartOfKey\": false\n }\n ]\n },\n \"created\": {\n \"time\": 1650610810000,\n \"actor\": \"urn:li:corpuser:UNKNOWN\"\n }\n }\n }\n }\n }\n}"
}
]
}
],
"auth": {
"type": "bearer",
"bearer": [
{
"key": "token",
"value": "{{token}}",
"type": "string"
}
]
},
"event": [
{
"listen": "prerequest",
"script": {
"type": "text/javascript",
"exec": [
""
]
}
},
{
"listen": "test",
"script": {
"type": "text/javascript",
"exec": [
""
]
}
}
]
}
],
"event": [
{
"listen": "prerequest",
"script": {
"type": "text/javascript",
"exec": [
""
]
}
},
{
"listen": "test",
"script": {
"type": "text/javascript",
"exec": [
""
]
}
}
],
"variable": [
{
"key": "baseUrl",
"value": "localhost:8080",
"type": "string"
},
{
"key": "token",
"value": "eyJhbGciOiJIUzI1NiJ9.eyJhY3RvclR5cGUiOiJVU0VSIiwiYWN0b3JJZCI6ImRhdGFodWIiLCJ0eXBlIjoiUEVSU09OQUwiLCJ2ZXJzaW9uIjoiMSIsImV4cCI6MTY1MDY2MDY1NSwianRpIjoiM2E4ZDY3ZTItOTM5Yi00NTY3LWE0MjYtZDdlMDA1ZGU3NjJjIiwic3ViIjoiZGF0YWh1YiIsImlzcyI6ImRhdGFodWItbWV0YWRhdGEtc2VydmljZSJ9.pp_vW2u1tiiTT7U0nDF2EQdcayOMB8jatiOA8Je4JJA",
"type": "default"
}
]
}

Relationships (/relationships) endpoint

GET

Sample Request

curl -X 'GET' \
'http://localhost:8080/openapi/relationships/v1/?urn=urn%3Ali%3Acorpuser%3Adatahub&relationshipTypes=IsPartOf&direction=INCOMING&start=0&count=200' \
-H 'accept: application/json'

Sample Response

{
"start": 0,
"count": 2,
"total": 2,
"entities": [
{
"relationshipType": "IsPartOf",
"urn": "urn:li:corpGroup:bfoo"
},
{
"relationshipType": "IsPartOf",
"urn": "urn:li:corpGroup:jdoe"
}
]
}

Programmatic Usage

Programmatic usage of the models can be done through the Java Rest Emitter which includes the generated models. A minimal Java project for emitting to the OpenAPI endpoints would need the following dependencies (gradle format):

dependencies {
implementation 'io.acryl:datahub-client:<DATAHUB_CLIENT_VERSION>'
implementation 'org.apache.httpcomponents:httpclient:<APACHE_HTTP_CLIENT_VERSION>'
implementation 'org.apache.httpcomponents:httpasyncclient:<APACHE_ASYNC_CLIENT_VERSION>'
}

Writing metadata events to the /platform endpoints

The following code emits metadata events through OpenAPI by constructing a list of UpsertAspectRequests. Behind the scenes, this is using the /platform/entities/v1 endpoint to send metadata to GMS.

import io.datahubproject.openapi.generated.DatasetProperties;
import datahub.client.rest.RestEmitter;
import datahub.event.UpsertAspectRequest;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ExecutionException;


public class Main {
public static void main(String[] args) throws IOException, ExecutionException, InterruptedException {
RestEmitter emitter = RestEmitter.createWithDefaults();

List<UpsertAspectRequest> requests = new ArrayList<>();
UpsertAspectRequest upsertAspectRequest = UpsertAspectRequest.builder()
.entityType("dataset")
.entityUrn("urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.my-other-dataset.user-table,PROD)")
.aspect(new DatasetProperties().description("This is the canonical User profile dataset"))
.build();
UpsertAspectRequest upsertAspectRequest2 = UpsertAspectRequest.builder()
.entityType("dataset")
.entityUrn("urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.another-dataset.user-table,PROD)")
.aspect(new DatasetProperties().description("This is the canonical User profile dataset 2"))
.build();
requests.add(upsertAspectRequest);
requests.add(upsertAspectRequest2);
System.out.println(emitter.emit(requests, null).get());
System.exit(0);
}
}
- + \ No newline at end of file diff --git a/docs/api/restli/evaluate-tests/index.html b/docs/api/restli/evaluate-tests/index.html index 389a19cd5310a..210c3a4c2e1c2 100644 --- a/docs/api/restli/evaluate-tests/index.html +++ b/docs/api/restli/evaluate-tests/index.html @@ -8,14 +8,14 @@ - +

Evaluate Tests Endpoint

You can do a HTTP POST request to /gms/test?action=evaluate endpoint with the urn as part of JSON Payload to run metadata tests for the particular URN.

curl --location --request POST 'https://DOMAIN.acryl.io/gms/test?action=evaluate' \
--header 'Authorization: Bearer TOKEN' \
--header 'Content-Type: application/json' \
--data-raw '{
"urn": "YOUR_URN"
}'

w The supported parameters are

  • urn - Required URN string
  • shouldPush - Optional Boolean - whether or not to push the results to persist them
  • testUrns - Optional List of string - If you wish to get specific test URNs evaluated
- + \ No newline at end of file diff --git a/docs/api/restli/get-elastic-task-status/index.html b/docs/api/restli/get-elastic-task-status/index.html index 6760bd69bd7fd..3e44ed95667af 100644 --- a/docs/api/restli/get-elastic-task-status/index.html +++ b/docs/api/restli/get-elastic-task-status/index.html @@ -8,13 +8,13 @@ - +

Get ElasticSearch Task Status Endpoint

You can do a HTTP POST request to /gms/operations?action=getEsTaskStatus endpoint to see the status of the input task running in ElasticSearch. For example, the task ID given by the truncateTimeseriesAspect endpoint. The task ID can be passed in as a string with node name and task ID separated by a colon (as is output by the previous API), or the node name and task ID parameters separately.

curl --location --request POST 'https://demo.datahubproject.io/api/gms/operations?action=getEsTaskStatus' \
--header 'Authorization: Bearer TOKEN'
--header 'Content-Type: application/json' \
--data-raw '{
"task": "aB1cdEf2GHIJKLMnoPQr3S:123456"
}'

curl --location --request POST http://localhost:8080/operations\?action\=getEsTaskStatus \
--header 'Authorization: Bearer TOKEN'
--header 'Content-Type: application/json' \
--data-raw '{
"nodeId": "aB1cdEf2GHIJKLMnoPQr3S",
taskId: 12345
}'

The output will be a string representing a JSON object with the task status.

{
"value": "{\"error\":\"Could not get task status for XIAMx5WySACgg9XxBgaKmw:12587\"}"
}
"{
"completed": true,
"taskId": "qhxGdzytQS-pQek8CwBCZg:54654",
"runTimeNanos": 1179458,
"status": "{
"total": 0,
"updated": 0,
"created": 0,
"deleted": 0,
"batches": 0,
"version_conflicts": 0,
"noops": 0,
"retries": {
"bulk": 0,
"search": 0
},
"throttled_millis": 0,
"requests_per_second": -1.0,
"throttled_until_millis": 0
}
}
- + \ No newline at end of file diff --git a/docs/api/restli/get-index-sizes/index.html b/docs/api/restli/get-index-sizes/index.html index 81f08e3505b75..f88466e9d2747 100644 --- a/docs/api/restli/get-index-sizes/index.html +++ b/docs/api/restli/get-index-sizes/index.html @@ -8,13 +8,13 @@ - +

Get Index Sizes Endpoint

You can do a HTTP POST request to /gms/operations?action=getIndexSizes endpoint with no parameters to see the size of indices in ElasticSearch. For now, only timeseries indices are supported, as they can grow indefinitely, and the truncateTimeseriesAspect endpoint is provided to clean up old entries. This endpoint can be used in conjunction with the cleanup endpoint to see which indices are the largest before truncation.

curl --location --request POST 'https://demo.datahubproject.io/api/gms/operations?action=getIndexSizes' \
--header 'Authorization: Bearer TOKEN'

The endpoint takes no parameters, and the output will be a string representing a JSON object containing the following information about each index:

      {
"aspectName": "datasetusagestatistics",
"sizeMb": 0.208,
"indexName": "dataset_datasetusagestatisticsaspect_v1",
"entityName": "dataset"
}
- + \ No newline at end of file diff --git a/docs/api/restli/restli-overview/index.html b/docs/api/restli/restli-overview/index.html index 35047ddc8e535..4eebe50ade282 100644 --- a/docs/api/restli/restli-overview/index.html +++ b/docs/api/restli/restli-overview/index.html @@ -8,7 +8,7 @@ - + @@ -33,7 +33,7 @@ the dataset-specific aspects are located under metadata-models/src/main/pegasus/com/linkedin/metadata/dataset.

3. How do I find the valid set of Relationship names?

All relationships are defined on foreign-key fields inside Aspect PDLs. They are reflected by fields bearing the @Relationship annotation. Inside this annotation is a "name" field that defines the standardized name of the Relationship to be used when querying.

By convention, all entity PDLs live under metadata-models/src/main/pegasus/com/linkedin/metadata/common or metadata-models/src/main/pegasus/com/linkedin/metadata/<entity-name>. For example, the dataset-specific aspects are located under metadata-models/src/main/pegasus/com/linkedin/metadata/dataset.

- + \ No newline at end of file diff --git a/docs/api/restli/restore-indices/index.html b/docs/api/restli/restore-indices/index.html index 760f975975cce..357518f48f74e 100644 --- a/docs/api/restli/restore-indices/index.html +++ b/docs/api/restli/restore-indices/index.html @@ -8,13 +8,13 @@ - +

Restore Indices Endpoint

You can do a HTTP POST request to /gms/operations?action=restoreIndices endpoint with the urn as part of JSON Payload to restore indices for the particular URN, or with the urnLike regex to restore for batchSize URNs matching the pattern starting from start.

curl --location --request POST 'https://demo.datahubproject.io/api/gms/operations?action=restoreIndices' \
--header 'Authorization: Bearer TOKEN' \
--header 'Content-Type: application/json' \
--data-raw '{
"urn": "YOUR_URN"
}'

curl --location --request POST 'https://demo.datahubproject.io/api/gms/operations?action=restoreIndices' \
--header 'Authorization: Bearer TOKEN' \
--header 'Content-Type: application/json' \
--data-raw '{
"urnLike": "urn:dataPlatform:%"
}'

The supported parameters are

  • urn - Optional URN string
  • aspect - Optional Aspect string
  • urnLike - Optional string regex to match URNs
  • start - Optional integer to decide which rows number of sql store to restore. Default: 0
  • batchSize - Optional integer to decide how many rows to restore. Default: 10
- + \ No newline at end of file diff --git a/docs/api/restli/truncate-time-series-aspect/index.html b/docs/api/restli/truncate-time-series-aspect/index.html index 4cae0ecabceb2..1492a0acc7436 100644 --- a/docs/api/restli/truncate-time-series-aspect/index.html +++ b/docs/api/restli/truncate-time-series-aspect/index.html @@ -8,13 +8,13 @@ - +

Truncate Timeseries Index Endpoint

You can do a HTTP POST request to /gms/operations?action=truncateTimeseriesAspect endpoint to manage the size of a time series index by removing entries older than a certain timestamp, thereby truncating the table to only the entries needed, to save space. The getIndexSizes endpoint can be used to identify the largest indices. The output includes the index parameters needed for this function.

curl --location --request POST 'https://demo.datahubproject.io/api/gms/operations?action=truncateTimeseriesAspect' \
--header 'Authorization: Bearer TOKEN' \
--header 'Content-Type: application/json' \
--data-raw '{
"entityType": "YOUR_ENTITY_TYPE",
"aspect": "YOUR_ASPECT_NAME",
"endTimeMillis": 1000000000000
}'

curl --location --request POST 'https://demo.datahubproject.io/api/gms/operations?action=truncateTimeseriesAspect' \
--header 'Authorization: Bearer TOKEN' \
--header 'Content-Type: application/json' \
--data-raw '{
"entityType": "YOUR_ENTITY_TYPE",
"aspect": "YOUR_ASPECT_NAME",
"endTimeMillis": 1000000000000,
"dryRun": false,
"batchSize": 100,
"timeoutSeconds": 3600
}'

The supported parameters are

  • entityType - Required type of the entity to truncate the index of, for example, dataset.
  • aspect - Required name of the aspect to truncate the index of, for example, datasetusagestatistics. A call to getIndexSizes shows the entityType and aspect parameters for each index along with its size.
  • endTimeMillis - Required timestamp to truncate the index to. Entities with timestamps older than this time will be deleted.
  • dryRun - Optional boolean to enable/disable dry run functionality. Default: true. In a dry run, the following information will be printed:
{"value":"Delete 0 out of 201 rows (0.00%). Reindexing the aspect without the deleted records. This was a dry run. Run with dryRun = false to execute."}
  • batchSize - Optional integer to control the batch size for the deletion. Default: 10000
  • timeoutSeconds - Optional integer to set a timeout for the delete operation. Default: No timeout set

The output to the call will be information about how many rows would be deleted and how to proceed for a dry run:

{"value":"Delete 0 out of 201 rows (0.00%). Reindexing the aspect without the deleted records. This was a dry run. Run with dryRun = false to execute."}

For a non-dry-run, the output will be the Task ID of the asynchronous delete operation. This task ID can be used to monitor the status of the operation.

- + \ No newline at end of file diff --git a/docs/api/tutorials/custom-properties/index.html b/docs/api/tutorials/custom-properties/index.html index 884d3b5900575..05c7b40da21d7 100644 --- a/docs/api/tutorials/custom-properties/index.html +++ b/docs/api/tutorials/custom-properties/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ Please check out API feature comparison table for more information,

# Inlined from /metadata-ingestion/examples/library/dataset_add_properties.py
import logging
from typing import Union

from datahub.configuration.kafka import KafkaProducerConnectionConfig
from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig
from datahub.emitter.mce_builder import make_dataset_urn
from datahub.emitter.rest_emitter import DataHubRestEmitter
from datahub.specific.dataset import DatasetPatchBuilder

log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)


# Get an emitter, either REST or Kafka, this example shows you both
def get_emitter() -> Union[DataHubRestEmitter, DatahubKafkaEmitter]:
USE_REST_EMITTER = True
if USE_REST_EMITTER:
gms_endpoint = "http://localhost:8080"
return DataHubRestEmitter(gms_server=gms_endpoint)
else:
kafka_server = "localhost:9092"
schema_registry_url = "http://localhost:8081"
return DatahubKafkaEmitter(
config=KafkaEmitterConfig(
connection=KafkaProducerConnectionConfig(
bootstrap=kafka_server, schema_registry_url=schema_registry_url
)
)
)


dataset_urn = make_dataset_urn(platform="hive", name="fct_users_created", env="PROD")

with get_emitter() as emitter:
for patch_mcp in (
DatasetPatchBuilder(dataset_urn)
.add_custom_property("cluster_name", "datahubproject.acryl.io")
.add_custom_property("retention_time", "2 years")
.build()
):
emitter.emit(patch_mcp)


log.info(f"Added cluster_name, retention_time properties to dataset {dataset_urn}")

Expected Outcome of Adding Custom Properties

You can now see the two new properties are added to fct_users_deleted and the previous property encoding is unchanged.

dataset-properties-added

We can also verify this operation by programmatically checking the datasetProperties aspect after running this code using the datahub cli.

datahub get --urn "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)" --aspect datasetProperties
{
"datasetProperties": {
"customProperties": {
"encoding": "utf-8",
"cluster_name": "datahubproject.acryl.io",
"retention_time": "2 years"
},
"description": "table containing all the users deleted on a single day",
"tags": []
}
}

Add and Remove Custom Properties programmatically

The following code shows you how can add and remove custom properties in the same call. In the following code, we add custom property cluster_name and remove property retention_time from a dataset named fct_users_deleted without affecting existing properties.

# Inlined from /metadata-ingestion/examples/library/dataset_add_remove_properties.py
import logging
from typing import Union

from datahub.configuration.kafka import KafkaProducerConnectionConfig
from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig
from datahub.emitter.mce_builder import make_dataset_urn
from datahub.emitter.rest_emitter import DataHubRestEmitter
from datahub.specific.dataset import DatasetPatchBuilder

log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)


# Get an emitter, either REST or Kafka, this example shows you both
def get_emitter() -> Union[DataHubRestEmitter, DatahubKafkaEmitter]:
USE_REST_EMITTER = True
if USE_REST_EMITTER:
gms_endpoint = "http://localhost:8080"
return DataHubRestEmitter(gms_server=gms_endpoint)
else:
kafka_server = "localhost:9092"
schema_registry_url = "http://localhost:8081"
return DatahubKafkaEmitter(
config=KafkaEmitterConfig(
connection=KafkaProducerConnectionConfig(
bootstrap=kafka_server, schema_registry_url=schema_registry_url
)
)
)


dataset_urn = make_dataset_urn(platform="hive", name="fct_users_created", env="PROD")

with get_emitter() as emitter:
for patch_mcp in (
DatasetPatchBuilder(dataset_urn)
.add_custom_property("cluster_name", "datahubproject.acryl.io")
.remove_custom_property("retention_time")
.build()
):
emitter.emit(patch_mcp)


log.info(
f"Added cluster_name property, removed retention_time property from dataset {dataset_urn}"
)

Expected Outcome of Add and Remove Operations on Custom Properties

You can now see the cluster_name property is added to fct_users_deleted and the retention_time property is removed.

dataset-properties-added-removed

We can also verify this operation programmatically by checking the datasetProperties aspect using the datahub cli.

datahub get --urn "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)" --aspect datasetProperties
{
"datasetProperties": {
"customProperties": {
"encoding": "utf-8",
"cluster_name": "datahubproject.acryl.io"
},
"description": "table containing all the users deleted on a single day",
"tags": []
}
}

Replace Custom Properties programmatically

The following code replaces the current custom properties with a new properties map that includes only the properties cluster_name and retention_time. After running this code, the previous encoding property will be removed.

# Inlined from /metadata-ingestion/examples/library/dataset_replace_properties.py
import logging
from typing import Union

from datahub.configuration.kafka import KafkaProducerConnectionConfig
from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig
from datahub.emitter.mce_builder import make_dataset_urn
from datahub.emitter.rest_emitter import DataHubRestEmitter
from datahub.specific.dataset import DatasetPatchBuilder

log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)


# Get an emitter, either REST or Kafka, this example shows you both
def get_emitter() -> Union[DataHubRestEmitter, DatahubKafkaEmitter]:
USE_REST_EMITTER = True
if USE_REST_EMITTER:
gms_endpoint = "http://localhost:8080"
return DataHubRestEmitter(gms_server=gms_endpoint)
else:
kafka_server = "localhost:9092"
schema_registry_url = "http://localhost:8081"
return DatahubKafkaEmitter(
config=KafkaEmitterConfig(
connection=KafkaProducerConnectionConfig(
bootstrap=kafka_server, schema_registry_url=schema_registry_url
)
)
)


dataset_urn = make_dataset_urn(platform="hive", name="fct_users_created", env="PROD")

property_map_to_set = {
"cluster_name": "datahubproject.acryl.io",
"retention_time": "2 years",
}

with get_emitter() as emitter:
for patch_mcp in (
DatasetPatchBuilder(dataset_urn)
.set_custom_properties(property_map_to_set)
.build()
):
emitter.emit(patch_mcp)


log.info(
f"Replaced custom properties on dataset {dataset_urn} as {property_map_to_set}"
)

Expected Outcome of Replacing Custom Properties

You can now see the cluster_name and retention_time properties are added to fct_users_deleted but the previous encoding property is no longer present.

dataset-properties-replaced

We can also verify this operation programmatically by checking the datasetProperties aspect using the datahub cli.

datahub get --urn "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)" --aspect datasetProperties
{
"datasetProperties": {
"customProperties": {
"cluster_name": "datahubproject.acryl.io",
"retention_time": "2 years"
},
"description": "table containing all the users deleted on a single day",
"tags": []
}
}
- + \ No newline at end of file diff --git a/docs/api/tutorials/datasets/index.html b/docs/api/tutorials/datasets/index.html index e127a6a8bd6ed..09dfed154c468 100644 --- a/docs/api/tutorials/datasets/index.html +++ b/docs/api/tutorials/datasets/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ Soft delete sets the Status aspect of the entity to Removed, which hides the entity and all its aspects from being returned by the UI. Hard delete physically deletes all rows for all aspects of the entity.

For more information about soft delete and hard delete, please refer to Removing Metadata from DataHub.

# Inlined from /metadata-ingestion/examples/library/delete_dataset.py
import logging

from datahub.emitter.mce_builder import make_dataset_urn
from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph

log = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)

graph = DataHubGraph(
config=DatahubClientConfig(
server="http://localhost:8080",
)
)

dataset_urn = make_dataset_urn(name="fct_users_created", platform="hive")

# Soft-delete the dataset.
graph.delete_entity(urn=dataset_urn, hard=False)

log.info(f"Deleted dataset {dataset_urn}")

Expected Outcomes of Deleting Dataset

The dataset fct_users_deleted has now been deleted, so if you search for a hive dataset named fct_users_delete, you will no longer be able to see it.

dataset-deleted

- + \ No newline at end of file diff --git a/docs/api/tutorials/deprecation/index.html b/docs/api/tutorials/deprecation/index.html index e6a0323b07115..377fea44e83f8 100644 --- a/docs/api/tutorials/deprecation/index.html +++ b/docs/api/tutorials/deprecation/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ For detailed steps, please refer to Datahub Quickstart Guide.

note

Before updating deprecation, you need to ensure the targeted dataset is already present in your datahub. If you attempt to manipulate entities that do not exist, your operation will fail. In this guide, we will be using data from a sample ingestion.

Read Deprecation

query {
dataset(urn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)") {
deprecation {
deprecated
decommissionTime
}
}
}

If you see the following response, the operation was successful:

{
"data": {
"dataset": {
"deprecation": {
"deprecated": false,
"decommissionTime": null
}
}
},
"extensions": {}
}

Update Deprecation

mutation updateDeprecation {
updateDeprecation(input: { urn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)", deprecated: true })
}

Also note that you can update deprecation status of multiple entities or subresource using batchUpdateDeprecation.

mutation batchUpdateDeprecation {
batchUpdateDeprecation(
input: {
deprecated: true,
resources: [
{ resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)"} ,
{ resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)"} ,]
}
)
}

If you see the following response, the operation was successful:

{
"data": {
"updateDeprecation": true
},
"extensions": {}
}

Expected Outcomes of Updating Deprecation

You can now see the dataset fct_users_created has been marked as Deprecated.

tag-removed

- + \ No newline at end of file diff --git a/docs/api/tutorials/descriptions/index.html b/docs/api/tutorials/descriptions/index.html index 51e387175ce20..aadcb1c7643aa 100644 --- a/docs/api/tutorials/descriptions/index.html +++ b/docs/api/tutorials/descriptions/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ If you attempt to manipulate entities that do not exist, your operation will fail. In this guide, we will be using data from sample ingestion.

In this example, we will add a description to user_name column of a dataset fct_users_deleted.

Read Description on Dataset

query {
dataset(urn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)") {
properties {
description
}
}
}

If you see the following response, the operation was successful:

{
"data": {
"dataset": {
"properties": {
"description": "table containing all the users deleted on a single day"
}
}
},
"extensions": {}
}

Read Description on Columns

query {
dataset(urn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)") {
schemaMetadata {
fields {
fieldPath
description
}
}
}
}

If you see the following response, the operation was successful:

{
"data": {
"dataset": {
"schemaMetadata": {
"fields": [
{
"fieldPath": "user_name",
"description": "Name of the user who was deleted"
},
...
{
"fieldPath": "deletion_reason",
"description": "Why the user chose to deactivate"
}
]
}
}
},
"extensions": {}
}

Add Description on Dataset

curl --location --request POST 'http://localhost:8080/api/graphql' \
--header 'Authorization: Bearer <my-access-token>' \
--header 'Content-Type: application/json' \
--data-raw '{
"query": "mutation updateDataset { updateDataset( urn:\"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)\", input: { editableProperties: { description: \"## The Real Estate Sales Dataset\nThis is a really important Dataset that contains all the relevant information about sales that have happened organized by address.\n\" } institutionalMemory: { elements: { author: \"urn:li:corpuser:jdoe\", url: \"https://wikipedia.com/real_estate\", description: \"This is the definition of what real estate means\" } } } ) { urn } }",
"variables": {}
}'

Expected Response:

{
"data": {
"updateDataset": {
"urn": "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)"
}
},
"extensions": {}
}

Expected Outcomes of Adding Description on Dataset

You can now see the description is added to fct_users_deleted.

dataset-description-added

Add Description on Column

mutation updateDescription {
updateDescription(
input: {
description: "Name of the user who was deleted. This description is updated via GrpahQL.",
resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)",
subResource: "user_name",
subResourceType:DATASET_FIELD
}
)
}

Note that you can use general markdown in description. For example, you can do the following.

mutation updateDescription {
updateDescription(
input: {
description: """
### User Name
The `user_name` column is a primary key column that contains the name of the user who was deleted.
""",
resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)",
subResource: "user_name",
subResourceType:DATASET_FIELD
}
)
}

updateDescription currently only supports Dataset Schema Fields, Containers. For more information about the updateDescription mutation, please refer to updateLineage.

If you see the following response, the operation was successful:

{
"data": {
"updateDescription": true
},
"extensions": {}
}

Expected Outcomes of Adding Description on Column

You can now see column description is added to user_name column of fct_users_deleted.

column-description-added

- + \ No newline at end of file diff --git a/docs/api/tutorials/domains/index.html b/docs/api/tutorials/domains/index.html index 2fdaf98f3ffdd..983940474887e 100644 --- a/docs/api/tutorials/domains/index.html +++ b/docs/api/tutorials/domains/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

Domains

Why Would You Use Domains?

Domains are curated, top-level folders or categories where related assets can be explicitly grouped. Management of Domains can be centralized, or distributed out to Domain owners Currently, an asset can belong to only one Domain at a time. For more information about domains, refer to About DataHub Domains.

Goal Of This Guide

This guide will show you how to

  • Create a domain.
  • Read domains attached to a dataset.
  • Add a dataset to a domain
  • Remove the domain from a dataset.

Prerequisites

For this tutorial, you need to deploy DataHub Quickstart and ingest sample data. For detailed steps, please refer to Datahub Quickstart Guide.

Create Domain

mutation createDomain {
createDomain(input: { name: "Marketing", description: "Entities related to the marketing department" })
}

If you see the following response, the operation was successful:

{
"data": {
"createDomain": "<domain_urn>"
},
"extensions": {}
}

Expected Outcomes of Creating Domain

You can now see Marketing domain has been created under Govern > Domains.

domain-created

Read Domains

query {
dataset(urn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)") {
domain {
associatedUrn
domain {
urn
properties {
name
}
}
}
}
}

If you see the following response, the operation was successful:

{
"data": {
"dataset": {
"domain": {
"associatedUrn": "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
"domain": {
"urn": "urn:li:domain:71b3bf7b-2e3f-4686-bfe1-93172c8c4e10",
"properties": {
"name": "Marketing"
}
}
}
}
},
"extensions": {}
}

Add Domains

mutation setDomain {
setDomain(domainUrn: "urn:li:domain:marketing", entityUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)")
}

If you see the following response, the operation was successful:

{
"data": {
"setDomain": true
},
"extensions": {}
}

Expected Outcomes of Adding Domain

You can now see Marketing domain has been added to the dataset.

domain-added

Remove Domains

mutation unsetDomain {
unsetDomain(
entityUrn:"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)"
)
}

Expected Response:

{
"data": {
"removeDomain": true
},
"extensions": {}
}

Expected Outcomes of Removing Domain

You can now see a domain Marketing has been removed from the fct_users_created dataset.

domain-removed

- + \ No newline at end of file diff --git a/docs/api/tutorials/lineage/index.html b/docs/api/tutorials/lineage/index.html index 7e732af1c6a39..9b718cc4f7886 100644 --- a/docs/api/tutorials/lineage/index.html +++ b/docs/api/tutorials/lineage/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ For detailed steps, please refer to Datahub Quickstart Guide.

note

Before adding lineage, you need to ensure the targeted dataset is already present in your datahub. If you attempt to manipulate entities that do not exist, your operation will fail. In this guide, we will be using data from sample ingestion.

Add Lineage

mutation updateLineage {
updateLineage(
input: {
edgesToAdd: [
{
downstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,logging_events,PROD)"
upstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)"
}
]
edgesToRemove: []
}
)
}

Note that you can create a list of edges. For example, if you want to assign multiple upstream entities to a downstream entity, you can do the following.

mutation updateLineage {
updateLineage(
input: {
edgesToAdd: [
{
downstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,logging_events,PROD)"
upstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)"
}
{
downstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,logging_events,PROD)"
upstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)"
}
]
edgesToRemove: []
}
)
}

For more information about the updateLineage mutation, please refer to updateLineage.

If you see the following response, the operation was successful:

{
"data": {
"updateLineage": true
},
"extensions": {}
}

Expected Outcomes of Adding Lineage

You can now see the lineage between fct_users_deleted and logging_events.

lineage-added

Add Column-level Lineage

# Inlined from /metadata-ingestion/examples/library/lineage_emitter_dataset_finegrained_sample.py
import datahub.emitter.mce_builder as builder
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.metadata.com.linkedin.pegasus2avro.dataset import (
DatasetLineageType,
FineGrainedLineage,
FineGrainedLineageDownstreamType,
FineGrainedLineageUpstreamType,
Upstream,
UpstreamLineage,
)


def datasetUrn(tbl):
return builder.make_dataset_urn("hive", tbl)


def fldUrn(tbl, fld):
return builder.make_schema_field_urn(datasetUrn(tbl), fld)


fineGrainedLineages = [
FineGrainedLineage(
upstreamType=FineGrainedLineageUpstreamType.FIELD_SET,
upstreams=[
fldUrn("fct_users_deleted", "browser_id"),
fldUrn("fct_users_created", "user_id"),
],
downstreamType=FineGrainedLineageDownstreamType.FIELD,
downstreams=[fldUrn("logging_events", "browser")],
),
]


# this is just to check if any conflicts with existing Upstream, particularly the DownstreamOf relationship
upstream = Upstream(
dataset=datasetUrn("fct_users_deleted"), type=DatasetLineageType.TRANSFORMED
)

fieldLineages = UpstreamLineage(
upstreams=[upstream], fineGrainedLineages=fineGrainedLineages
)

lineageMcp = MetadataChangeProposalWrapper(
entityUrn=datasetUrn("logging_events"),
aspect=fieldLineages,
)

# Create an emitter to the GMS REST API.
emitter = DatahubRestEmitter("http://localhost:8080")

# Emit metadata!
emitter.emit_mcp(lineageMcp)

Expected Outcome of Adding Column Level Lineage

You can now see the column-level lineage between datasets. Note that you have to enable Show Columns to be able to see the column-level lineage.

column-level-lineage-added

Read Lineage

mutation searchAcrossLineage {
searchAcrossLineage(
input: {
query: "*"
urn: "urn:li:dataset:(urn:li:dataPlatform:dbt,long_tail_companions.adoption.human_profiles,PROD)"
start: 0
count: 10
direction: DOWNSTREAM
orFilters: [
{
and: [
{
condition: EQUAL
negated: false
field: "degree"
values: ["1", "2", "3+"]
}
]
}
]
}
) {
searchResults {
degree
entity {
urn
type
}
}
}
}

This example shows using lineage degrees as a filter, but additional search filters can be included here as well.

This will perform a multi-hop lineage search on the urn specified. For more information about the searchAcrossLineage mutation, please refer to searchAcrossLineage.

- + \ No newline at end of file diff --git a/docs/api/tutorials/ml/index.html b/docs/api/tutorials/ml/index.html index 6faff19767d62..d399973b87f7f 100644 --- a/docs/api/tutorials/ml/index.html +++ b/docs/api/tutorials/ml/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ Thus, it is essential for these systems to be discoverable to facilitate easy access and utilization by other members of the organization.

For more information on ML entities, please refer to the following docs:

Goal Of This Guide

This guide will show you how to

  • Create ML entities: MlFeature, MlFeatureTable, MlModel, MlModelGroup
  • Read ML entities: MlFeature, MlFeatureTable, MlModel, MlModelGroup
  • Attach MlFeatureTable or MlModel to MlFeature

Prerequisites

For this tutorial, you need to deploy DataHub Quickstart and ingest sample data. For detailed steps, please refer to Datahub Quickstart Guide.

Create ML Entities

Create MlFeature

# Inlined from /metadata-ingestion/examples/library/create_mlfeature.py
import datahub.emitter.mce_builder as builder
import datahub.metadata.schema_classes as models
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter

# Create an emitter to DataHub over REST
emitter = DatahubRestEmitter(gms_server="http://localhost:8080", extra_headers={})

dataset_urn = builder.make_dataset_urn(
name="fct_users_deleted", platform="hive", env="PROD"
)
feature_urn = builder.make_ml_feature_urn(
feature_table_name="my-feature-table",
feature_name="my-feature",
)

# Create feature
metadata_change_proposal = MetadataChangeProposalWrapper(
entityType="mlFeature",
changeType=models.ChangeTypeClass.UPSERT,
entityUrn=feature_urn,
aspectName="mlFeatureProperties",
aspect=models.MLFeaturePropertiesClass(
description="my feature", sources=[dataset_urn], dataType="TEXT"
),
)

# Emit metadata!
emitter.emit(metadata_change_proposal)

Note that when creating a feature, you can access a list of data sources using sources.

Create MlFeatureTable

# Inlined from /metadata-ingestion/examples/library/create_mlfeature_table.py
import datahub.emitter.mce_builder as builder
import datahub.metadata.schema_classes as models
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter

# Create an emitter to DataHub over REST
emitter = DatahubRestEmitter(gms_server="http://localhost:8080", extra_headers={})

feature_table_urn = builder.make_ml_feature_table_urn(
feature_table_name="my-feature-table", platform="feast"
)
feature_urns = [
builder.make_ml_feature_urn(
feature_name="my-feature", feature_table_name="my-feature-table"
),
builder.make_ml_feature_urn(
feature_name="my-feature2", feature_table_name="my-feature-table"
),
]
feature_table_properties = models.MLFeatureTablePropertiesClass(
description="Test description", mlFeatures=feature_urns
)

# MCP creation
metadata_change_proposal = MetadataChangeProposalWrapper(
entityType="mlFeatureTable",
changeType=models.ChangeTypeClass.UPSERT,
entityUrn=feature_table_urn,
aspect=feature_table_properties,
)

# Emit metadata!
emitter.emit(metadata_change_proposal)

Note that when creating a feature table, you can access a list of features using mlFeatures.

Create MlModel

Please note that an MlModel represents the outcome of a single training run for a model, not the collective results of all model runs.

# Inlined from /metadata-ingestion/examples/library/create_mlmodel.py
import datahub.emitter.mce_builder as builder
import datahub.metadata.schema_classes as models
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter

# Create an emitter to DataHub over REST
emitter = DatahubRestEmitter(gms_server="http://localhost:8080", extra_headers={})
model_urn = builder.make_ml_model_urn(
model_name="my-test-model", platform="science", env="PROD"
)
model_group_urns = [
builder.make_ml_model_group_urn(
group_name="my-model-group", platform="science", env="PROD"
)
]
feature_urns = [
builder.make_ml_feature_urn(
feature_name="my-feature", feature_table_name="my-feature-table"
),
builder.make_ml_feature_urn(
feature_name="my-feature2", feature_table_name="my-feature-table"
),
]

metadata_change_proposal = MetadataChangeProposalWrapper(
entityType="mlModel",
changeType=models.ChangeTypeClass.UPSERT,
entityUrn=model_urn,
aspectName="mlModelProperties",
aspect=models.MLModelPropertiesClass(
description="my feature",
groups=model_group_urns,
mlFeatures=feature_urns,
trainingMetrics=[
models.MLMetricClass(
name="accuracy", description="accuracy of the model", value="1.0"
)
],
hyperParams=[
models.MLHyperParamClass(
name="hyper_1", description="hyper_1", value="0.102"
)
],
),
)

# Emit metadata!
emitter.emit(metadata_change_proposal)

Note that when creating a model, you can access a list of features using mlFeatures. Additionally, you can access the relationship to model groups with groups.

Create MlModelGroup

Please note that an MlModelGroup serves as a container for all the runs of a single ML model.

# Inlined from /metadata-ingestion/examples/library/create_mlmodel_group.py
import datahub.emitter.mce_builder as builder
import datahub.metadata.schema_classes as models
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter

# Create an emitter to DataHub over REST
emitter = DatahubRestEmitter(gms_server="http://localhost:8080", extra_headers={})
model_group_urn = builder.make_ml_model_group_urn(
group_name="my-model-group", platform="science", env="PROD"
)


metadata_change_proposal = MetadataChangeProposalWrapper(
entityType="mlModelGroup",
changeType=models.ChangeTypeClass.UPSERT,
entityUrn=model_group_urn,
aspectName="mlModelGroupProperties",
aspect=models.MLModelGroupPropertiesClass(
description="my model group",
),
)


# Emit metadata!
emitter.emit(metadata_change_proposal)

Expected Outcome of creating entities

You can search the entities in DataHub UI.

feature-table-created

model-group-created

Read ML Entities

Read MLFeature

query {
mlFeature(urn: "urn:li:mlFeature:(test_feature_table_all_feature_dtypes,test_BOOL_LIST_feature)"){
name
featureNamespace
description
properties {
description
dataType
version {
versionTag
}
}
}
}

Expected response:

{
"data": {
"mlFeature": {
"name": "test_BOOL_LIST_feature",
"featureNamespace": "test_feature_table_all_feature_dtypes",
"description": null,
"properties": {
"description": null,
"dataType": "SEQUENCE",
"version": null
}
}
},
"extensions": {}
}

Read MLFeatureTable

query {
mlFeatureTable(urn: "urn:li:mlFeatureTable:(urn:li:dataPlatform:feast,test_feature_table_all_feature_dtypes)"){
name
description
platform {
name
}
properties {
description
mlFeatures {
name
}
}
}
}

Expected Response:

{
"data": {
"mlFeatureTable": {
"name": "test_feature_table_all_feature_dtypes",
"description": null,
"platform": {
"name": "feast"
},
"properties": {
"description": null,
"mlFeatures": [
{
"name": "test_BOOL_LIST_feature"
},
...
{
"name": "test_STRING_feature"
}
]
}
}
},
"extensions": {}
}

Read MLModel

query {
mlModel(urn: "urn:li:mlModel:(urn:li:dataPlatform:science,scienceModel,PROD)"){
name
description
properties {
description
version
type
mlFeatures
groups {
urn
name
}
}
}
}

Expected Response:

{
"data": {
"mlModel": {
"name": "scienceModel",
"description": "A sample model for predicting some outcome.",
"properties": {
"description": "A sample model for predicting some outcome.",
"version": null,
"type": "Naive Bayes classifier",
"mlFeatures": null,
"groups": []
}
}
},
"extensions": {}
}

Read MLModelGroup

query {
mlModelGroup(urn: "urn:li:mlModelGroup:(urn:li:dataPlatform:science,my-model-group,PROD)"){
name
description
platform {
name
}
properties {
description
}
}
}

Expected Response: (Note that this entity does not exist in the sample ingestion and you might want to create this entity first.)

{
"data": {
"mlModelGroup": {
"name": "my-model-group",
"description": "my model group",
"platform": {
"name": "science"
},
"properties": {
"description": "my model group"
}
}
},
"extensions": {}
}

Add ML Entities

Add MlFeature to MlFeatureTable

# Inlined from /metadata-ingestion/examples/library/add_mlfeature_to_mlfeature_table.py
import datahub.emitter.mce_builder as builder
import datahub.metadata.schema_classes as models
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph
from datahub.metadata.schema_classes import MLFeatureTablePropertiesClass

gms_endpoint = "http://localhost:8080"
# Create an emitter to DataHub over REST
emitter = DatahubRestEmitter(gms_server=gms_endpoint, extra_headers={})

feature_table_urn = builder.make_ml_feature_table_urn(
feature_table_name="my-feature-table", platform="feast"
)
feature_urns = [
builder.make_ml_feature_urn(
feature_name="my-feature2", feature_table_name="my-feature-table"
),
]

# This code concatenates the new features with the existing features in the feature table.
# If you want to replace all existing features with only the new ones, you can comment out this line.
graph = DataHubGraph(DatahubClientConfig(server=gms_endpoint))
feature_table_properties = graph.get_aspect(
entity_urn=feature_table_urn, aspect_type=MLFeatureTablePropertiesClass
)
if feature_table_properties:
current_features = feature_table_properties.mlFeatures
print("current_features:", current_features)
if current_features:
feature_urns += current_features

feature_table_properties = models.MLFeatureTablePropertiesClass(mlFeatures=feature_urns)
# MCP createion
metadata_change_proposal = MetadataChangeProposalWrapper(
entityType="mlFeatureTable",
changeType=models.ChangeTypeClass.UPSERT,
entityUrn=feature_table_urn,
aspect=feature_table_properties,
)

# Emit metadata! This is a blocking call
emitter.emit(metadata_change_proposal)

Add MlFeature to MLModel

# Inlined from /metadata-ingestion/examples/library/add_mlfeature_to_mlmodel.py
import datahub.emitter.mce_builder as builder
import datahub.metadata.schema_classes as models
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph
from datahub.metadata.schema_classes import MLModelPropertiesClass

gms_endpoint = "http://localhost:8080"
# Create an emitter to DataHub over REST
emitter = DatahubRestEmitter(gms_server=gms_endpoint, extra_headers={})

model_urn = builder.make_ml_model_urn(
model_name="my-test-model", platform="science", env="PROD"
)
feature_urns = [
builder.make_ml_feature_urn(
feature_name="my-feature3", feature_table_name="my-feature-table"
),
]

# This code concatenates the new features with the existing features in the model
# If you want to replace all existing features with only the new ones, you can comment out this line.
graph = DataHubGraph(DatahubClientConfig(server=gms_endpoint))
model_properties = graph.get_aspect(
entity_urn=model_urn, aspect_type=MLModelPropertiesClass
)
if model_properties:
current_features = model_properties.mlFeatures
print("current_features:", current_features)
if current_features:
feature_urns += current_features

model_properties = models.MLModelPropertiesClass(mlFeatures=feature_urns)

# MCP creation
metadata_change_proposal = MetadataChangeProposalWrapper(
entityType="mlModel",
changeType=models.ChangeTypeClass.UPSERT,
entityUrn=model_urn,
aspect=model_properties,
)

# Emit metadata!
emitter.emit(metadata_change_proposal)

Add MLGroup To MLModel

# Inlined from /metadata-ingestion/examples/library/add_mlgroup_to_mlmodel.py
import datahub.emitter.mce_builder as builder
import datahub.metadata.schema_classes as models
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.ingestion.graph.client import DatahubClientConfig, DataHubGraph

gms_endpoint = "http://localhost:8080"
# Create an emitter to DataHub over REST
emitter = DatahubRestEmitter(gms_server=gms_endpoint, extra_headers={})

model_group_urns = [
builder.make_ml_model_group_urn(
group_name="my-model-group", platform="science", env="PROD"
)
]
model_urn = builder.make_ml_model_urn(
model_name="science-model", platform="science", env="PROD"
)

# This code concatenates the new features with the existing features in the feature table.
# If you want to replace all existing features with only the new ones, you can comment out this line.
graph = DataHubGraph(DatahubClientConfig(server=gms_endpoint))

target_model_properties = graph.get_aspect(
entity_urn=model_urn, aspect_type=models.MLModelPropertiesClass
)
if target_model_properties:
current_model_groups = target_model_properties.groups
print("current_model_groups:", current_model_groups)
if current_model_groups:
model_group_urns += current_model_groups

model_properties = models.MLModelPropertiesClass(groups=model_group_urns)
# MCP createion
metadata_change_proposal = MetadataChangeProposalWrapper(
entityType="mlModel",
changeType=models.ChangeTypeClass.UPSERT,
entityUrn=model_urn,
aspect=model_properties,
)

# Emit metadata! This is a blocking call
emitter.emit(metadata_change_proposal)

Expected Outcome of Adding ML Entities

You can access to Features or Group Tab of each entity to view the added entities.

feature-added-to-model

model-group-added-to-model

- + \ No newline at end of file diff --git a/docs/api/tutorials/owners/index.html b/docs/api/tutorials/owners/index.html index 34605deff3d75..df97cfff886c3 100644 --- a/docs/api/tutorials/owners/index.html +++ b/docs/api/tutorials/owners/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ user-upserted

Upsert Group

Save this group.yaml as a local file. Note that the group includes a list of users who are owners and members. Within these lists, you can refer to the users by their ids or their urns, and can additionally specify their metadata inline within the group description itself. See the example below to understand how this works and feel free to make modifications to this file locally to see the effects of your changes in your local DataHub instance.

id: foogroup@acryl.io
display_name: Foo Group
owners:
- datahub
members:
- bar@acryl.io # refer to a user either by id or by urn
- id: joe@acryl.io # inline specification of user
slack: "@joe_shmoe"
display_name: "Joe's Hub"

Execute the following CLI command to ingest this group's information.

datahub group upsert -f group.yaml

If you see the following logs, the operation was successful:

Update succeeded for group urn:li:corpGroup:foogroup@acryl.io.

Expected Outcomes of Upserting Group

You can see the group Foo Group has been created under Settings > Access > Users & Groups group-upserted

Read Owners

query {
dataset(urn: "urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)") {
ownership {
owners {
owner {
... on CorpUser {
urn
type
}
... on CorpGroup {
urn
type
}
}
}
}
}
}

If you see the following response, the operation was successful:

{
"data": {
"dataset": {
"ownership": {
"owners": [
{
"owner": {
"urn": "urn:li:corpuser:jdoe",
"type": "CORP_USER"
}
},
{
"owner": {
"urn": "urn:li:corpuser:datahub",
"type": "CORP_USER"
}
}
]
}
}
},
"extensions": {}
}

Add Owners

mutation addOwners {
addOwner(
input: {
ownerUrn: "urn:li:corpGroup:bfoo",
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
ownerEntityType: CORP_GROUP,
type: TECHNICAL_OWNER
}
)
}

Expected Response:

{
"data": {
"addOwner": true
},
"extensions": {}
}

Expected Outcomes of Adding Owner

You can now see bfoo has been added as an owner to the fct_users_created dataset.

ownership-added

Remove Owners

mutation removeOwners {
removeOwner(
input: {
ownerUrn: "urn:li:corpuser:jdoe",
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)",
}
)
}

Note that you can also remove owners from multiple entities or subresource using batchRemoveOwners.

mutation batchRemoveOwners {
batchRemoveOwners(
input: {
ownerUrns: ["urn:li:corpuser:jdoe"],
resources: [
{ resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)"} ,
{ resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)"} ,]
}
)
}

Expected Response:

{
"data": {
"removeOwner": true
},
"extensions": {}
}

Expected Outcomes of Removing Owners

You can now see John Doe has been removed as an owner from the fct_users_created dataset.

ownership-removed

- + \ No newline at end of file diff --git a/docs/api/tutorials/tags/index.html b/docs/api/tutorials/tags/index.html index 10513f68677e8..41c43d114c6e7 100644 --- a/docs/api/tutorials/tags/index.html +++ b/docs/api/tutorials/tags/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ In this guide, we will be using data from sample ingestion.

For more information on how to set up for GraphQL, please refer to How To Set Up GraphQL.

Create Tags

The following code creates a tag Deprecated.

mutation createTag {
createTag(input:
{
name: "Deprecated",
id: "deprecated",
description: "Having this tag means this column or table is deprecated."
})
}

If you see the following response, the operation was successful:

{
"data": {
"createTag": "urn:li:tag:deprecated"
},
"extensions": {}
}

Expected Outcome of Creating Tags

You can now see the new tag Deprecated has been created.

tag-created

We can also verify this operation by programmatically searching Deprecated tag after running this code using the datahub cli.

datahub get --urn "urn:li:tag:deprecated" --aspect tagProperties

{
"tagProperties": {
"description": "Having this tag means this column or table is deprecated.",
"name": "Deprecated"
}
}

Read Tags

query {
dataset(urn: "urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)") {
tags {
tags {
tag {
name
urn
properties {
description
colorHex
}
}
}
}
}
}

If you see the following response, the operation was successful:

{
"data": {
"dataset": {
"tags": {
"tags": [
{
"tag": {
"name": "Legacy",
"urn": "urn:li:tag:Legacy",
"properties": {
"description": "Indicates the dataset is no longer supported",
"colorHex": null,
"name": "Legacy"
}
}
}
]
}
}
},
"extensions": {}
}

Add Tags

Add Tags to a dataset

The following code shows you how can add tags to a dataset. In the following code, we add a tag Deprecated to a dataset named fct_users_created.

mutation addTags {
addTags(
input: {
tagUrns: ["urn:li:tag:deprecated"],
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
}
)
}

If you see the following response, the operation was successful:

{
"data": {
"addTags": true
},
"extensions": {}
}

Add Tags to a Column of a dataset

mutation addTags {
addTags(
input: {
tagUrns: ["urn:li:tag:deprecated"],
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
subResourceType:DATASET_FIELD,
subResource:"user_name"})
}

Expected Outcome of Adding Tags

You can now see Deprecated tag has been added to user_name column.

tag-added

We can also verify this operation programmatically by checking the globalTags aspect using the datahub cli.

datahub get --urn "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)" --aspect globalTags

Remove Tags

The following code remove a tag from a dataset. After running this code, Deprecated tag will be removed from a user_name column.

mutation removeTag {
removeTag(
input: {
tagUrn: "urn:li:tag:deprecated",
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
subResourceType:DATASET_FIELD,
subResource:"user_name"})
}

Expected Outcome of Removing Tags

You can now see Deprecated tag has been removed to user_name column.

tag-removed

We can also verify this operation programmatically by checking the gloablTags aspect using the datahub cli.

datahub get --urn "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)" --aspect globalTags

{
"globalTags": {
"tags": []
}
}
- + \ No newline at end of file diff --git a/docs/api/tutorials/terms/index.html b/docs/api/tutorials/terms/index.html index a6fcca22dafe8..dc2d7b43a88a5 100644 --- a/docs/api/tutorials/terms/index.html +++ b/docs/api/tutorials/terms/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ In this guide, we will be using data from sample ingestion.

For more information on how to set up for GraphQL, please refer to How To Set Up GraphQL.

Create Terms

The following code creates a term Rate of Return.

mutation createGlossaryTerm {
createGlossaryTerm(input: {
name: "Rate of Return",
id: "rateofreturn",
description: "A rate of return (RoR) is the net gain or loss of an investment over a specified time period."
},
)
}

If you see the following response, the operation was successful:

{
"data": {
"createGlossaryTerm": "urn:li:glossaryTerm:rateofreturn"
},
"extensions": {}
}

Expected Outcome of Creating Terms

You can now see the new term Rate of Return has been created.

term-created

We can also verify this operation by programmatically searching Rate of Return term after running this code using the datahub cli.

datahub get --urn "urn:li:glossaryTerm:rateofreturn" --aspect glossaryTermInfo

{
"glossaryTermInfo": {
"definition": "A rate of return (RoR) is the net gain or loss of an investment over a specified time period.",
"name": "Rate of Return",
"termSource": "INTERNAL"
}
}

Read Terms

query {
dataset(urn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)") {
glossaryTerms {
terms {
term {
urn
glossaryTermInfo {
name
description
}
}
}
}
}
}

If you see the following response, the operation was successful:

{
"data": {
"dataset": {
"glossaryTerms": {
"terms": [
{
"term": {
"urn": "urn:li:glossaryTerm:CustomerAccount",
"glossaryTermInfo": {
"name": "CustomerAccount",
"description": "account that represents an identified, named collection of balances and cumulative totals used to summarize customer transaction-related activity over a designated period of time"
}
}
}
]
}
}
},
"extensions": {}
}

Add Terms

Add Terms to a dataset

The following code shows you how can add terms to a dataset. In the following code, we add a term Rate of Return to a dataset named fct_users_created.

mutation addTerms {
addTerms(
input: {
termUrns: ["urn:li:glossaryTerm:rateofreturn"],
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
}
)
}

If you see the following response, the operation was successful:

{
"data": {
"addTerms": true
},
"extensions": {}
}

Add Terms to a Column of a Dataset

mutation addTerms {
addTerms(
input: {
termUrns: ["urn:li:glossaryTerm:rateofreturn"],
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
subResourceType:DATASET_FIELD,
subResource:"user_name"})
}

Expected Outcome of Adding Terms

You can now see Rate of Return term has been added to user_name column.

term-added

Remove Terms

The following code remove a term from a dataset. After running this code, Rate of Return term will be removed from a user_name column.

mutation removeTerm {
removeTerm(
input: {
termUrn: "urn:li:glossaryTerm:rateofreturn",
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
subResourceType:DATASET_FIELD,
subResource:"user_name"})
}

Note that you can also remove a term from a dataset if you don't specify subResourceType and subResource.

mutation removeTerm {
removeTerm(
input: {
termUrn: "urn:li:glossaryTerm:rateofreturn",
resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)",
})
}

Also note that you can remove terms from multiple entities or subresource using batchRemoveTerms.

mutation batchRemoveTerms {
batchRemoveTerms(
input: {
termUrns: ["urn:li:glossaryTerm:rateofreturn"],
resources: [
{ resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)"} ,
{ resourceUrn:"urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)"} ,]
}
)
}

Expected Outcome of Removing Terms

You can now see Rate of Return term has been removed to user_name column.

term-removed

- + \ No newline at end of file diff --git a/docs/architecture/architecture/index.html b/docs/architecture/architecture/index.html index 2d7e35eb97274..4e1d563f83b23 100644 --- a/docs/architecture/architecture/index.html +++ b/docs/architecture/architecture/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ that is built for the Modern Data Stack. DataHub employs a model-first philosophy, with a focus on unlocking interoperability between disparate tools & systems.

The figures below describe the high-level architecture of DataHub.

datahub-architecture Acryl DataHub System Architecture

For a more detailed look at the components that make up the Architecture, check out Components.

Architecture Highlights

There are three main highlights of DataHub's architecture.

Schema-first approach to Metadata Modeling

DataHub's metadata model is described using a serialization agnostic language. Both REST as well as GraphQL API-s are supported. In addition, DataHub supports an AVRO-based API over Kafka to communicate metadata changes and subscribe to them. Our roadmap includes a milestone to support no-code metadata model edits very soon, which will allow for even more ease of use, while retaining all the benefits of a typed API. Read about metadata modeling at metadata modeling.

Stream-based Real-time Metadata Platform

DataHub's metadata infrastructure is stream-oriented, which allows for changes in metadata to be communicated and reflected within the platform within seconds. You can also subscribe to changes happening in DataHub's metadata, allowing you to build real-time metadata-driven systems. For example, you can build an access-control system that can observe a previously world-readable dataset adding a new schema field which contains PII, and locks down that dataset for access control reviews.

Federated Metadata Serving

DataHub comes with a single metadata service (gms) as part of the open source repository. However, it also supports federated metadata services which can be owned and operated by different teams –– in fact, that is how LinkedIn runs DataHub internally. The federated services communicate with the central search index and graph using Kafka, to support global search and discovery while still enabling decoupled ownership of metadata. This kind of architecture is very amenable for companies who are implementing data mesh.

- + \ No newline at end of file diff --git a/docs/architecture/docker-containers/index.html b/docs/architecture/docker-containers/index.html index 0f143931699c1..5df90a0bd087e 100644 --- a/docs/architecture/docker-containers/index.html +++ b/docs/architecture/docker-containers/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

Docker Container Architecture

When running DataHub via docker-compose. or helm, the following is a diagram of the containers involved with running DataHub and their relationships with each other. The helm chart uses helm hooks to determine the proper ordering of the components whereas docker-compose relies on a series of health checks.

                datahub-frontend-react  datahub-actions
\ /
| datahub-upgrade (NoCodeDataMigration, helm only)
| /
datahub-gms (healthy)
|
datahub-upgrade (SystemUpdate completed)
/--------------------/ | \ \------------------------------------------------\
/ | \-------------------\ \
mysql-setup (completed) elasticsearch-setup (completed) kafka-setup (completed) (if apply) neo4j (healthy)
| | / \
| | / \
mysql (healthy) elasticsearch (healthy) broker (healthy) (if not internal) schema-registry (healthy)
|
zookeeper (healthy)
- + \ No newline at end of file diff --git a/docs/architecture/metadata-ingestion/index.html b/docs/architecture/metadata-ingestion/index.html index 3dd292f5db90d..30cb09bd44b0c 100644 --- a/docs/architecture/metadata-ingestion/index.html +++ b/docs/architecture/metadata-ingestion/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ The figure below describes all the options possible for connecting your favorite system to DataHub. Ingestion Architecture

Metadata Change Proposal: The Center Piece

The center piece for ingestion are Metadata Change Proposals which represent requests to make a metadata change to an organization's Metadata Graph. Metadata Change Proposals can be sent over Kafka, for highly scalable async publishing from source systems. They can also be sent directly to the HTTP endpoint exposed by the DataHub service tier to get synchronous success / failure responses.

Pull-based Integration

DataHub ships with a Python based metadata-ingestion system that can connect to different sources to pull metadata from them. This metadata is then pushed via Kafka or HTTP to the DataHub storage tier. Metadata ingestion pipelines can be integrated with Airflow to set up scheduled ingestion or capture lineage. If you don't find a source already supported, it is very easy to write your own.

Push-based Integration

As long as you can emit a Metadata Change Proposal (MCP) event to Kafka or make a REST call over HTTP, you can integrate any system with DataHub. For convenience, DataHub also provides simple Python emitters for you to integrate into your systems to emit metadata changes (MCP-s) at the point of origin.

Internal Components

Applying Metadata Change Proposals to DataHub Metadata Service (mce-consumer-job)

DataHub comes with a Spring job, mce-consumer-job, which consumes the Metadata Change Proposals and writes them into the DataHub Metadata Service (datahub-gms) using the /ingest endpoint.

- + \ No newline at end of file diff --git a/docs/architecture/metadata-serving/index.html b/docs/architecture/metadata-serving/index.html index 6c464d879a3cb..463d1a53de3c8 100644 --- a/docs/architecture/metadata-serving/index.html +++ b/docs/architecture/metadata-serving/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ Note that not all MCP-s will result in an MCL, because the DataHub serving tier will ignore any duplicate changes to metadata.

Metadata Index Applier (mae-consumer-job)

Metadata Change Logs are consumed by another Spring job, mae-consumer-job, which applies the changes to the graph and search index accordingly. The job is entity-agnostic and will execute corresponding graph & search index builders, which will be invoked by the job when a specific metadata aspect is changed. The builder should instruct the job how to update the graph and search index based on the metadata change.

To ensure that metadata changes are processed in the correct chronological order, MCLs are keyed by the entity URN — meaning all MAEs for a particular entity will be processed sequentially by a single thread.

Metadata Query Serving

Primary-key based reads (e.g. getting schema metadata for a dataset based on the dataset-urn) on metadata are routed to the document store. Secondary index based reads on metadata are routed to the search index (or alternately can use the strongly consistent secondary index support described here). Full-text and advanced search queries are routed to the search index. Complex graph queries such as lineage are routed to the graph index.

- + \ No newline at end of file diff --git a/docs/authentication/changing-default-credentials/index.html b/docs/authentication/changing-default-credentials/index.html index ba4a82e7327ee..abf5ff56cd51f 100644 --- a/docs/authentication/changing-default-credentials/index.html +++ b/docs/authentication/changing-default-credentials/index.html @@ -8,7 +8,7 @@ - + @@ -22,7 +22,7 @@ For example, to change the password for the DataHub root user to 'newpassword', your file would contain the following:

// new user.props
datahub:newpassword

2. Mount the updated config file

Change the docker-compose.yaml to mount an updated user.props file to the following location inside the datahub-frontend-react container using a volume:/datahub-frontend/conf/user.props

  datahub-frontend-react:
...
volumes:
...
- <absolute_path_to_your_custom_user_props_file>:/datahub-frontend/conf/user.props

3. Restart DataHub

Restart the DataHub containers or pods to pick up the new configs.

Quickstart

1. Modify a config file

Modify user.props which defines the updated password for the datahub user.

To remove the user 'datahub' from the new file, simply omit the username. Please note that you can also choose to leave the file empty. For example, to change the password for the DataHub root user to 'newpassword', your file would contain the following:

// new user.props
datahub:newpassword

2. Mount the updated config file

In docker-compose file used in quickstart. Modify the datahub-frontend-react block to contain the extra volume mount.

  datahub-frontend-react:
...
volumes:
...
- <absolute_path_to_your_custom_user_props_file>:/datahub-frontend/conf/user.props

3. Restart Datahub

Run the following command.

datahub docker quickstart --quickstart-compose-file <your-modified-compose>.yml
- + \ No newline at end of file diff --git a/docs/authentication/concepts/index.html b/docs/authentication/concepts/index.html index a2c32adb6ab12..2ec35747821da 100644 --- a/docs/authentication/concepts/index.html +++ b/docs/authentication/concepts/index.html @@ -8,7 +8,7 @@ - + @@ -30,7 +30,7 @@ OIDC, the datahub-frontend service issues an request to the Metadata Service to generate a SESSION token on behalf of of the user logging in. (*Only the frontend service is authorized to perform this action).
  • Generating Personal Access Tokens: When a user requests to generate a Personal Access Token (described below) from the UI.
  • At present, the Token Service supports the symmetric signing method HS256 to generate and verify tokens.

    Now that we're familiar with the concepts, we will talk concretely about what new capabilities have been built on top of Metadata Service Authentication.

    - + \ No newline at end of file diff --git a/docs/authentication/guides/add-users/index.html b/docs/authentication/guides/add-users/index.html index c7917346aaab9..e8ba27f7bc103 100644 --- a/docs/authentication/guides/add-users/index.html +++ b/docs/authentication/guides/add-users/index.html @@ -8,7 +8,7 @@ - + @@ -38,7 +38,7 @@ Edit Profile. Add some details like a display name, an email, and more. Then click Save. Now you should be able to find the user via search.

    You can also use our Python Emitter SDK to produce custom information about the new user via the CorpUser metadata entity.

    For a more comprehensive overview of how users & groups are managed within DataHub, check out this video.

    FAQ

    1. Can I enable OIDC and username / password (JaaS) authentication at the same time?

    YES! If you have not explicitly disabled JaaS via an environment variable on the datahub-frontend container (AUTH_JAAS_ENABLED), then you can always access the standard login flow at http://your-datahub-url.com/login.

    Feedback / Questions / Concerns

    We want to hear from you! For any inquiries, including Feedback, Questions, or Concerns, reach out on Slack!

    - + \ No newline at end of file diff --git a/docs/authentication/guides/jaas/index.html b/docs/authentication/guides/jaas/index.html index a70d20f550439..8dc72bfc1400d 100644 --- a/docs/authentication/guides/jaas/index.html +++ b/docs/authentication/guides/jaas/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ /datahub-frontend/conf/user.props. If you wish to launch this container with a custom set of users, you'll need to override the default file mounting when running using docker-compose.

    To do so, change the datahub-frontend-react service in the docker-compose.yml file containing it to include the custom file:

    datahub-frontend-react:
    build:
    context: ../
    dockerfile: docker/datahub-frontend/Dockerfile
    image: linkedin/datahub-frontend-react:${DATAHUB_VERSION:-head}
    env_file: datahub-frontend/env/docker.env
    hostname: datahub-frontend-react
    container_name: datahub-frontend-react
    ports:
    - "9002:9002"
    depends_on:
    - datahub-gms
    volumes:
    - ./my-custom-dir/user.props:/datahub-frontend/conf/user.props

    And then run docker-compose up against your compose file.

    Custom JaaS Configuration

    In order to change the default JaaS module configuration, you will have to launch the datahub-frontend-react container with the custom jaas.conf file mounted as a volume at the location /datahub-frontend/conf/jaas.conf.

    To do so, change the datahub-frontend-react service in the docker-compose.yml file containing it to include the custom file:

    datahub-frontend-react:
    build:
    context: ../
    dockerfile: docker/datahub-frontend/Dockerfile
    image: linkedin/datahub-frontend-react:${DATAHUB_VERSION:-head}
    env_file: datahub-frontend/env/docker.env
    hostname: datahub-frontend-react
    container_name: datahub-frontend-react
    ports:
    - "9002:9002"
    depends_on:
    - datahub-gms
    volumes:
    - ./my-custom-dir/jaas.conf:/datahub-frontend/conf/jaas.conf

    And then run docker-compose up against your compose file.

    - + \ No newline at end of file diff --git a/docs/authentication/guides/sso/configure-oidc-react-azure/index.html b/docs/authentication/guides/sso/configure-oidc-react-azure/index.html index 512065e46f1dd..996a2c0b047d1 100644 --- a/docs/authentication/guides/sso/configure-oidc-react-azure/index.html +++ b/docs/authentication/guides/sso/configure-oidc-react-azure/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ please see this guide to mount a custom user.props file for a JAAS authenticated deployment.

    Steps

    1. Create an application registration in Microsoft Azure portal

    a. Using an account linked to your organization, navigate to the Microsoft Azure Portal.

    b. Select App registrations, then New registration to register a new app.

    c. Name your app registration and choose who can access your application.

    d. Select Web as the Redirect URI type and enter the following:

    https://your-datahub-domain.com/callback/oidc

    If you are just testing locally, the following can be used: http://localhost:9002/callback/oidc. Azure supports more than one redirect URI, so both can be configured at the same time from the Authentication tab once the registration is complete.

    At this point, your app registration should look like the following:

    azure-setup-app-registration

    e. Click Register.

    2. Configure Authentication (optional)

    Once registration is done, you will land on the app registration Overview tab. On the left-side navigation bar, click on Authentication under Manage and add extra redirect URIs if need be (if you want to support both local testing and Azure deployments).

    azure-setup-authentication

    Click Save.

    3. Configure Certificates & secrets

    On the left-side navigation bar, click on Certificates & secrets under Manage.
    Select Client secrets, then New client secret. Type in a meaningful description for your secret and select an expiry. Click the Add button when you are done.

    IMPORTANT: Copy the value of your newly create secret since Azure will never display its value afterwards.

    azure-setup-certificates-secrets

    4. Configure API permissions

    On the left-side navigation bar, click on API permissions under Manage. DataHub requires the following four Microsoft Graph APIs:

    1. User.Read (should be already configured)
    2. profile
    3. email
    4. openid

    Click on Add a permission, then from the Microsoft APIs tab select Microsoft Graph, then Delegated permissions. From the OpenId permissions category, select email, openid, profile and click Add permissions.

    At this point, you should be looking at a screen like the following:

    azure-setup-api-permissions

    5. Obtain Application (Client) ID

    On the left-side navigation bar, go back to the Overview tab. You should see the Application (client) ID. Save its value for the next step.

    6. Obtain Discovery URI

    On the same page, you should see a Directory (tenant) ID. Your OIDC discovery URI will be formatted as follows:

    https://login.microsoftonline.com/{tenant ID}/v2.0/.well-known/openid-configuration

    7. Configure datahub-frontend to enable OIDC authentication

    a. Open the file docker/datahub-frontend/env/docker.env

    b. Add the following configuration values to the file:

    AUTH_OIDC_ENABLED=true
    AUTH_OIDC_CLIENT_ID=your-client-id
    AUTH_OIDC_CLIENT_SECRET=your-client-secret
    AUTH_OIDC_DISCOVERY_URI=https://login.microsoftonline.com/{tenant ID}/v2.0/.well-known/openid-configuration
    AUTH_OIDC_BASE_URL=your-datahub-url
    AUTH_OIDC_SCOPE="openid profile email"

    Replacing the placeholders above with the client id (step 5), client secret (step 3) and tenant ID (step 6) received from Microsoft Azure.

    9. Restart datahub-frontend-react docker container

    Now, simply restart the datahub-frontend-react container to enable the integration.

    docker-compose -p datahub -f docker-compose.yml -f docker-compose.override.yml  up datahub-frontend-react

    Navigate to your DataHub domain to see SSO in action.

    Resources

    - + \ No newline at end of file diff --git a/docs/authentication/guides/sso/configure-oidc-react-google/index.html b/docs/authentication/guides/sso/configure-oidc-react-google/index.html index b636f02fdf1cb..6f99a2eb29f7c 100644 --- a/docs/authentication/guides/sso/configure-oidc-react-google/index.html +++ b/docs/authentication/guides/sso/configure-oidc-react-google/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ Note that in order to complete this step you should be logged into a Google account associated with your organization.

    c. Fill out the details in the App Information & Domain sections. Make sure the 'Application Home Page' provided matches where DataHub is deployed at your organization.

    google-setup-1

    Once you've completed this, Save & Continue.

    d. Configure the scopes: Next, click Add or Remove Scopes. Select the following scopes:

    - `.../auth/userinfo.email`
    - `.../auth/userinfo.profile`
    - `openid`

    Once you've selected these, Save & Continue.

    3. Configure client credentials

    Now navigate to the Credentials tab. This is where you'll obtain your client id & secret, as well as configure info like the redirect URI used after a user is authenticated.

    a. Click Create Credentials & select OAuth client ID as the credential type.

    b. On the following screen, select Web application as your Application Type.

    c. Add the domain where DataHub is hosted to your 'Authorized Javascript Origins'.

    https://your-datahub-domain.com

    d. Add the domain where DataHub is hosted with the path /callback/oidc appended to 'Authorized Redirect URLs'.

    https://your-datahub-domain.com/callback/oidc

    e. Click Create

    f. You will now receive a pair of values, a client id and a client secret. Bookmark these for the next step.

    At this point, you should be looking at a screen like the following:

    google-setup-2

    Success!

    4. Configure datahub-frontend to enable OIDC authentication

    a. Open the file docker/datahub-frontend/env/docker.env

    b. Add the following configuration values to the file:

    AUTH_OIDC_ENABLED=true
    AUTH_OIDC_CLIENT_ID=your-client-id
    AUTH_OIDC_CLIENT_SECRET=your-client-secret
    AUTH_OIDC_DISCOVERY_URI=https://accounts.google.com/.well-known/openid-configuration
    AUTH_OIDC_BASE_URL=your-datahub-url
    AUTH_OIDC_SCOPE="openid profile email"
    AUTH_OIDC_USER_NAME_CLAIM=email
    AUTH_OIDC_USER_NAME_CLAIM_REGEX=([^@]+)

    Replacing the placeholders above with the client id & client secret received from Google in Step 3f.

    5. Restart datahub-frontend-react docker container

    Now, simply restart the datahub-frontend-react container to enable the integration.

    docker-compose -p datahub -f docker-compose.yml -f docker-compose.override.yml  up datahub-frontend-react

    Navigate to your DataHub domain to see SSO in action.

    References

    - + \ No newline at end of file diff --git a/docs/authentication/guides/sso/configure-oidc-react-okta/index.html b/docs/authentication/guides/sso/configure-oidc-react-okta/index.html index a8d6cfe93c929..5491ed7c6e27f 100644 --- a/docs/authentication/guides/sso/configure-oidc-react-okta/index.html +++ b/docs/authentication/guides/sso/configure-oidc-react-okta/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ can set if to

    https://your-datahub-domain.com/authenticate

    If you're just testing locally, this can be http://localhost:9002.

    i. Click Save

    2. Obtain Client Credentials

    On the subsequent screen, you should see the client credentials. Bookmark the Client id and Client secret for the next step.

    3. Obtain Discovery URI

    On the same page, you should see an Okta Domain. Your OIDC discovery URI will be formatted as follows:

    https://your-okta-domain.com/.well-known/openid-configuration

    for example, https://dev-33231928.okta.com/.well-known/openid-configuration.

    At this point, you should be looking at a screen like the following:

    okta-setup-1 okta-setup-2

    Success!

    4. Configure datahub-frontend to enable OIDC authentication

    a. Open the file docker/datahub-frontend/env/docker.env

    b. Add the following configuration values to the file:

    AUTH_OIDC_ENABLED=true
    AUTH_OIDC_CLIENT_ID=your-client-id
    AUTH_OIDC_CLIENT_SECRET=your-client-secret
    AUTH_OIDC_DISCOVERY_URI=https://your-okta-domain.com/.well-known/openid-configuration
    AUTH_OIDC_BASE_URL=your-datahub-url
    AUTH_OIDC_SCOPE="openid profile email groups"

    Replacing the placeholders above with the client id & client secret received from Okta in Step 2.

    Pro Tip! You can easily enable Okta to return the groups that a user is associated with, which will be provisioned in DataHub, along with the user logging in. This can be enabled by setting the AUTH_OIDC_EXTRACT_GROUPS_ENABLED flag to true. if they do not already exist in DataHub. You can enable your Okta application to return a 'groups' claim from the Okta Console at Applications > Your Application -> Sign On -> OpenID Connect ID Token Settings (Requires an edit).

    By default, we assume that the groups will appear in a claim named "groups". This can be customized using the AUTH_OIDC_GROUPS_CLAIM container configuration.

    okta-setup-2

    5. Restart datahub-frontend-react docker container

    Now, simply restart the datahub-frontend-react container to enable the integration.

    docker-compose -p datahub -f docker-compose.yml -f docker-compose.override.yml  up datahub-frontend-react

    Navigate to your DataHub domain to see SSO in action.

    Resources

    - + \ No newline at end of file diff --git a/docs/authentication/guides/sso/configure-oidc-react/index.html b/docs/authentication/guides/sso/configure-oidc-react/index.html index 8a0fae5ed1b4f..2ec682e78721f 100644 --- a/docs/authentication/guides/sso/configure-oidc-react/index.html +++ b/docs/authentication/guides/sso/configure-oidc-react/index.html @@ -8,7 +8,7 @@ - + @@ -50,7 +50,7 @@ around why exactly the login handoff is not working.

    If all else fails, feel free to reach out to the DataHub Community on Slack for real-time support

    I'm seeing an error in the datahub-frontend logs when a user tries to login

    Caused by: java.lang.RuntimeException: Failed to resolve user name claim from profile provided by Identity Provider. Missing attribute. Attribute: 'email', Regex: '(.*)', Profile: { ...

    what do I do?

    This indicates that your Identity Provider does not provide the claim with name 'email', which DataHub uses by default to uniquely identify users within your organization.

    To fix this, you may need to

    1. Change the claim that is used as the unique user identifier to something else by changing the AUTH_OIDC_USER_NAME_CLAIM (e.g. to "name" or "preferredusername") _OR
    2. Change the environment variable AUTH_OIDC_SCOPE to include the scope required to retrieve the claim with name "email"

    For the datahub-frontend container / pod.

    Pro-Tip: Check the documentation for your Identity Provider to learn more about the scope claims supported.

    - + \ No newline at end of file diff --git a/docs/authentication/index.html b/docs/authentication/index.html index 145d0d497f8d5..88c298abf4023 100644 --- a/docs/authentication/index.html +++ b/docs/authentication/index.html @@ -8,7 +8,7 @@ - + @@ -26,7 +26,7 @@ backend (token-based) authentication, simply set the METADATA_SERVICE_AUTH_ENABLED=true environment variable for the datahub-gms container or pod.

    References

    For a quick video on the topic of users and groups within DataHub, have a look at DataHub Basics — Users, Groups, & Authentication 101

    - + \ No newline at end of file diff --git a/docs/authentication/introducing-metadata-service-authentication/index.html b/docs/authentication/introducing-metadata-service-authentication/index.html index 12e23f8888fcd..1678cd2d328c4 100644 --- a/docs/authentication/introducing-metadata-service-authentication/index.html +++ b/docs/authentication/introducing-metadata-service-authentication/index.html @@ -8,7 +8,7 @@ - + @@ -48,7 +48,7 @@ This recommendation is in effort to minimize the exposed surface area of DataHub to make securing, operating, maintaining, and developing the platform simpler.

    In practice, this will require migrating Metadata Ingestion Recipes use the datahub-rest sink to pointing at a slightly different host + path.

    Example recipe that proxies through DataHub Frontend

    source:
    # source configs
    sink:
    type: "datahub-rest"
    config:
    ...
    token: <your-personal-access-token-here!>

    Feedback / Questions / Concerns

    We want to hear from you! For any inquiries, including Feedback, Questions, or Concerns, reach out on Slack!

    - + \ No newline at end of file diff --git a/docs/authentication/personal-access-tokens/index.html b/docs/authentication/personal-access-tokens/index.html index b146aeb4ca3f2..619edd2bb5fe0 100644 --- a/docs/authentication/personal-access-tokens/index.html +++ b/docs/authentication/personal-access-tokens/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ the generated Access Token as a Bearer token in the Authorization header:

    Authorization: Bearer <generated-access-token> 

    For example, using a curl to the frontend proxy (preferred in production):

    curl 'http://localhost:9002/api/gms/entities/urn:li:corpuser:datahub' -H 'Authorization: Bearer <access-token>

    or to Metadata Service directly:

    curl 'http://localhost:8080/entities/urn:li:corpuser:datahub' -H 'Authorization: Bearer <access-token>

    Since authorization happens at the GMS level, this means that ingestion is also protected behind access tokens, to use them simply add a token to the sink config property as seen below:

    note

    Without an access token, making programmatic requests will result in a 401 result from the server if Metadata Service Authentication is enabled.

    Additional Resources

    GraphQL

    FAQ and Troubleshooting

    The button to create tokens is greyed out - why can’t I click on it?

    This means that the user currently logged in DataHub does not have either Generate Personal Access Tokens or Manage All Access Tokens permissions. Please ask your DataHub administrator to grant you those permissions.

    When using a token, I get 401 unauthorized - why?

    A PAT represents a user in DataHub, if that user does not have permissions for a given action, neither will the token.

    Can I create a PAT that represents some other user?

    Yes, although not through the UI correctly, you will have to use the token management graphQL API and the user making the request must have Manage All Access Tokens permissions.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/authorization/access-policies-guide/index.html b/docs/authorization/access-policies-guide/index.html index ce550b98f0f4a..36564684746ff 100644 --- a/docs/authorization/access-policies-guide/index.html +++ b/docs/authorization/access-policies-guide/index.html @@ -8,7 +8,7 @@ - + @@ -40,7 +40,7 @@ Note that these Privileges can and likely should be changed inside the Policies page before onboarding your company's users.

    REST API Authorization

    Policies only affect REST APIs when the environment variable REST_API_AUTHORIZATION is set to true for GMS. Some policies only apply when this setting is enabled, marked above, and other Metadata and Platform policies apply to the APIs where relevant, also specified in the table above.

    Additional Resources

    Videos

    GraphQL

    FAQ and Troubleshooting

    How do Policies relate to Roles?

    Policies are the lowest level primitive for granting Privileges to users on DataHub.

    Roles are built for convenience on top of Policies. Roles grant Privileges to actors indirectly, driven by Policies behind the scenes. Both can be used in conjunction to grant Privileges to end users.

    Need more help? Join the conversation in Slack!


    1. Only active if REST_API_AUTHORIZATION_ENABLED environment flag is enabled
    - + \ No newline at end of file diff --git a/docs/authorization/groups/index.html b/docs/authorization/groups/index.html index a1d80a02dee1d..63a895c0f314d 100644 --- a/docs/authorization/groups/index.html +++ b/docs/authorization/groups/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ Azure AD using the DataHub ingestion framework.

    If you routinely ingest groups from these providers, you will also be able to keep groups synced. New groups will be created in DataHub, stale groups will be deleted, and group membership will be updated!

    Custom Groups

    DataHub admins can create custom groups by going to the Settings > Users & Groups > Groups > Create Group. Members can be added to Groups via the Group profile page.

    Feedback / Questions / Concerns

    We want to hear from you! For any inquiries, including Feedback, Questions, or Concerns, reach out on Slack!

    - + \ No newline at end of file diff --git a/docs/authorization/index.html b/docs/authorization/index.html index 8adbc43a0478c..7837fa894ee58 100644 --- a/docs/authorization/index.html +++ b/docs/authorization/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ This section is all about how DataHub authorizes a given user/service that wants to interact with the system.

    note

    Authorization only makes sense in the context of an Authenticated DataHub deployment. To use DataHub's authorization features please first make sure that the system has been configured from an authentication perspective as you intend.

    Once the identity of a user or service has been established, DataHub determines what accesses the authenticated request has.

    This is done by checking what operation a given user/service wants to perform within DataHub & whether it is allowed to do so. The set of operations that are allowed in DataHub are what we call Policies.

    Policies specify fine-grain access control for who can do what to which resources, for more details on the set of Policies that DataHub provides please see the Policies Guide.

    - + \ No newline at end of file diff --git a/docs/authorization/policies/index.html b/docs/authorization/policies/index.html index 875b9720d20b2..ed9d1526e9efd 100644 --- a/docs/authorization/policies/index.html +++ b/docs/authorization/policies/index.html @@ -8,7 +8,7 @@ - + @@ -34,7 +34,7 @@ the policies management UI and by default will allow all actions on the platform. It will be as though each user has all privileges, both of the Platform & Metadata flavor.

    To disable Policies, you can simply set the AUTH_POLICIES_ENABLED environment variable for the datahub-gms service container to false. For example in your docker/datahub-gms/docker.env, you'd place

    AUTH_POLICIES_ENABLED=false

    REST API Authorization

    Policies only affect REST APIs when the environment variable REST_API_AUTHORIZATION is set to true for GMS. Some policies only apply when this setting is enabled, marked above, and other Metadata and Platform policies apply to the APIs where relevant, also specified in the table above.

    Coming Soon

    The DataHub team is hard at work trying to improve the Policies feature. We are planning on building out the following:

    • Hide edit action buttons on Entity pages to reflect user privileges

    Under consideration

    • Ability to define Metadata Policies against multiple reosurces scoped to particular "Containers" (e.g. A "schema", "database", or "collection")

    Feedback / Questions / Concerns

    We want to hear from you! For any inquiries, including Feedback, Questions, or Concerns, reach out on Slack!


    1. Only active if REST_API_AUTHORIZATION_ENABLED is true
    - + \ No newline at end of file diff --git a/docs/authorization/roles/index.html b/docs/authorization/roles/index.html index d4f22a6520f3e..90f8c9a65dd54 100644 --- a/docs/authorization/roles/index.html +++ b/docs/authorization/roles/index.html @@ -8,7 +8,7 @@ - + @@ -22,7 +22,7 @@ with the Policies system. For example, if you would like to give a user a Reader role, but also allow them to edit metadata for certain domains, you can add a policy that will allow them to do. Note that adding a policy like this will only add to what a user can do in DataHub.

    Role Privileges

    Self-Hosted DataHub and Managed DataHub

    These privileges are common to both Self-Hosted DataHub and Managed DataHub.

    Platform Privileges
    PrivilegeAdminEditorReader
    Generate Personal Access Tokens✔️✔️
    Manage Domains✔️✔️
    Manage Glossaries✔️✔️
    Manage Tags✔️✔️
    Manage Policies✔️
    Manage Ingestion✔️
    Manage Secrets✔️
    Manage Users and Groups✔️
    Manage Access Tokens✔️
    Manage User Credentials✔️
    Manage Public Views✔️
    View Analytics✔️
    Metadata Privileges
    PrivilegeAdminEditorReader
    View Entity Page✔️✔️✔️
    View Dataset Usage✔️✔️✔️
    View Dataset Profile✔️✔️✔️
    Edit Entity✔️✔️
    Edit Entity Tags✔️✔️
    Edit Entity Glossary Terms✔️✔️
    Edit Entity Owners✔️✔️
    Edit Entity Docs✔️✔️
    Edit Entity Doc Links✔️✔️
    Edit Entity Status✔️✔️
    Edit Entity Assertions✔️✔️
    Manage Entity Tags✔️✔️
    Manage Entity Glossary Terms✔️✔️
    Edit Dataset Column Tags✔️✔️
    Edit Dataset Column Glossary Terms✔️✔️
    Edit Dataset Column Descriptions✔️✔️
    Manage Dataset Column Tags✔️✔️
    Manage Dataset Column Glossary Terms✔️✔️
    Edit Tag Color✔️✔️
    Edit User Profile✔️✔️
    Edit Contact Info✔️✔️

    Managed DataHub

    These privileges are only relevant to Managed DataHub.

    Platform Privileges
    PrivilegeAdminEditorReader
    Create Constraints✔️✔️
    View Metadata Proposals✔️✔️
    Manage Tests✔️
    Manage Global Settings✔️
    Metadata Privileges
    PrivilegeAdminEditorReader
    Propose Entity Tags✔️✔️✔️
    Propose Entity Glossary Terms✔️✔️✔️
    Propose Dataset Column Tags✔️✔️✔️
    Propose Dataset Column Glossary Terms✔️✔️✔️
    Edit Entity Operations✔️✔️

    Additional Resources

    GraphQL

    FAQ and Troubleshooting

    What updates are planned for Roles?

    In the future, the DataHub team is looking into adding the following features to Roles.

    • Defining a role mapping from OIDC identity providers to DataHub that will grant users a DataHub role based on their IdP role
    • Allowing Admins to set a default role on DataHub so all users are assigned a role
    • Building custom roles
    - + \ No newline at end of file diff --git a/docs/browse/index.html b/docs/browse/index.html index a3b7c18fae793..0d8506f523f0a 100644 --- a/docs/browse/index.html +++ b/docs/browse/index.html @@ -8,13 +8,13 @@ - +

    About DataHub Browse

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Browse is one of the primary entrypoints for discovering different Datasets, Dashboards, Charts and other DataHub Entities.

    Browsing is useful for finding data entities based on a hierarchical structure set in the source system. Generally speaking, that hierarchy will contain the following levels:

    • Entity Type (Dataset, Dashboard, Chart, etc.)
    • Environment (prod vs. dev)
    • Platform Type (Snowflake, dbt, Looker, etc.)
    • Container (Warehouse, Schema, Folder, etc.)
    • Entity Name

    For example, a user can easily browse for Datasets within the PROD Snowflake environment, the long_tail_companions warehouse, and the analytics schema:

    Using Browse

    Browse is accessible by clicking on an Entity Type on the front page of the DataHub UI.

    This will take you into the folder explorer view for browse in which you can drill down to your desired sub categories to find the data you are looking for.

    Additional Resources

    GraphQL

    FAQ and Troubleshooting

    How are BrowsePaths created?

    BrowsePaths are automatically created for ingested entities based on separator characters that appear within an Urn.

    How can I customize browse paths?

    BrowsePaths are an Aspect similar to other components of an Entity. They can be customized by ingesting custom paths for specified Urns.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/browsev2/browse-paths-v2/index.html b/docs/browsev2/browse-paths-v2/index.html index dc5662bc69b4a..a7f89119d38c4 100644 --- a/docs/browsev2/browse-paths-v2/index.html +++ b/docs/browsev2/browse-paths-v2/index.html @@ -8,13 +8,13 @@ - +

    Generating Browse Paths (V2)

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Introduction

    Browse (V2) is a way for users to explore and dive deeper into their data. Its integration with the search experience allows users to combine search queries and filters with entity type and platform nested folders.

    Most entities should have a browse path that allows users to navigate the left side panel on the search page to find groups of entities under different folders that come from these browse paths. Below, you can see an example of the sidebar with some new browse paths.

    This new browse sidebar always starts with Entity Type, then optionally shows Environment (PROD, DEV, etc.) if there are 2 or more Environments, then Platform. Below the Platform level, we render out folders that come directly from entity's browsePathsV2 aspects.

    Generating Custom Browse Paths

    A browsePathsV2 aspect has a field called path which contains a list of BrowsePathEntry objects. Each object in the path represents one level of the entity's browse path where the first entry is the highest level and the last entry is the lowest level.

    If an entity has this aspect filled out, their browse path will show up in the browse sidebar so that you can navigate its folders and select one to filter search results down.

    For example, in the browse sidebar on the left of the image above, there are 10 Dataset entities from the BigQuery Platform that have browsePathsV2 aspects that look like the following:

    [ { id: "bigquery-public-data" }, { id: "covid19_public_forecasts" } ]

    The id in a BrowsePathEntry is required and is what will be shown in the UI unless the optional urn field is populated. If the urn field is populated, we will try to resolve this path entry into an entity object and display that entity's name. We will also show a link to allow you to open up the entity profile.

    The urn field should only be populated if there is an entity in your DataHub instance that belongs in that entity's browse path. This makes most sense for Datasets to have Container entities in the browse paths as well as some other cases such as a DataFlow being part of a DataJob's browse path. For any other situation, feel free to leave urn empty and populate id with the text you want to be shown in the UI for your entity's path.

    Additional Resources

    GraphQL

    FAQ and Troubleshooting

    How are browsePathsV2 aspects created?

    We create browsePathsV2 aspects for all entities that should have one by default when you ingest your data if this aspect is not already provided. This happens based on separator characters that appear within an Urn.

    Our ingestion sources are also producing browsePathsV2 aspects since CLI version v0.10.5.

    - + \ No newline at end of file diff --git a/docs/cli/index.html b/docs/cli/index.html index 68c12d8c846d7..19be029c27213 100644 --- a/docs/cli/index.html +++ b/docs/cli/index.html @@ -8,7 +8,7 @@ - + @@ -38,7 +38,7 @@ The code responsible for collecting and broadcasting these events is open-source and can be found within our GitHub.

    Telemetry is enabled by default, and the telemetry command lets you toggle the sending of these statistics via telemetry enable/disable.

    migrate

    The migrate group of commands allows you to perform certain kinds of migrations.

    dataplatform2instance

    The dataplatform2instance migration command allows you to migrate your entities from an instance-agnostic platform identifier to an instance-specific platform identifier. If you have ingested metadata in the past for this platform and would like to transfer any important metadata over to the new instance-specific entities, then you should use this command. For example, if your users have added documentation or added tags or terms to your datasets, then you should run this command to transfer this metadata over to the new entities. For further context, read the Platform Instance Guide here.

    A few important options worth calling out:

    • --dry-run / -n : Use this to get a report for what will be migrated before running
    • --force / -F : Use this if you know what you are doing and do not want to get a confirmation prompt before migration is started
    • --keep : When enabled, will preserve the old entities and not delete them. Default behavior is to soft-delete old entities.
    • --hard : When enabled, will hard-delete the old entities.

    Note: Timeseries aspects such as Usage Statistics and Dataset Profiles are not migrated over to the new entity instances, you will get new data points created when you re-run ingestion using the usage or sources with profiling turned on.

    Dry Run
    datahub migrate dataplatform2instance --platform elasticsearch --instance prod_index --dry-run
    Starting migration: platform:elasticsearch, instance=prod_index, force=False, dry-run=True
    100% (25 of 25) |####################################################################################################################################################################################| Elapsed Time: 0:00:00 Time: 0:00:00
    [Dry Run] Migration Report:
    --------------
    [Dry Run] Migration Run Id: migrate-5710349c-1ec7-4b83-a7d3-47d71b7e972e
    [Dry Run] Num entities created = 25
    [Dry Run] Num entities affected = 0
    [Dry Run] Num entities migrated = 25
    [Dry Run] Details:
    [Dry Run] New Entities Created: {'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.datahubretentionindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.schemafieldindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.system_metadata_service_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.tagindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.dataset_datasetprofileaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.mlmodelindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.mlfeaturetableindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.datajob_datahubingestioncheckpointaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.datahub_usage_event,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.dataset_operationaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.datajobindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.dataprocessindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.glossarytermindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.dataplatformindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.mlmodeldeploymentindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.datajob_datahubingestionrunsummaryaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.graph_service_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.datahubpolicyindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.dataset_datasetusagestatisticsaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.dashboardindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.glossarynodeindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.mlfeatureindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.dataflowindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.mlprimarykeyindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,prod_index.chartindex_v2,PROD)'}
    [Dry Run] External Entities Affected: None
    [Dry Run] Old Entities Migrated = {'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,dataset_datasetusagestatisticsaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,mlmodelindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,mlmodeldeploymentindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,datajob_datahubingestionrunsummaryaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,datahubretentionindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,datahubpolicyindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,dataset_datasetprofileaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,glossarynodeindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,dataset_operationaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,graph_service_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,datajobindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,mlprimarykeyindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,dashboardindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,datajob_datahubingestioncheckpointaspect_v1,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,tagindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,datahub_usage_event,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,schemafieldindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,mlfeatureindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,dataprocessindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,dataplatformindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,mlfeaturetableindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,glossarytermindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,dataflowindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,chartindex_v2,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:elasticsearch,system_metadata_service_v1,PROD)'}
    Real Migration (with soft-delete)
    > datahub migrate dataplatform2instance --platform hive --instance
    datahub migrate dataplatform2instance --platform hive --instance warehouse
    Starting migration: platform:hive, instance=warehouse, force=False, dry-run=False
    Will migrate 4 urns such as ['urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,logging_events,PROD)']
    New urns will look like ['urn:li:dataset:(urn:li:dataPlatform:hive,warehouse.logging_events,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,warehouse.fct_users_created,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,warehouse.SampleHiveDataset,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,warehouse.fct_users_deleted,PROD)']

    Ok to proceed? [y/N]:
    ...
    Migration Report:
    --------------
    Migration Run Id: migrate-f5ae7201-4548-4bee-aed4-35758bb78c89
    Num entities created = 4
    Num entities affected = 0
    Num entities migrated = 4
    Details:
    New Entities Created: {'urn:li:dataset:(urn:li:dataPlatform:hive,warehouse.SampleHiveDataset,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,warehouse.fct_users_deleted,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,warehouse.logging_events,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,warehouse.fct_users_created,PROD)'}
    External Entities Affected: None
    Old Entities Migrated = {'urn:li:dataset:(urn:li:dataPlatform:hive,logging_events,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)', 'urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_created,PROD)'}

    Alternate Installation Options

    Using docker

    Docker Hub datahub-ingestion docker

    If you don't want to install locally, you can alternatively run metadata ingestion within a Docker container. We have prebuilt images available on Docker hub. All plugins will be installed and enabled automatically.

    You can use the datahub-ingestion docker image as explained in Docker Images. In case you are using Kubernetes you can start a pod with the datahub-ingestion docker image, log onto a shell on the pod and you should have the access to datahub CLI in your kubernetes cluster.

    Limitation: the datahub_docker.sh convenience script assumes that the recipe and any input/output files are accessible in the current working directory or its subdirectories. Files outside the current working directory will not be found, and you'll need to invoke the Docker image directly.

    # Assumes the DataHub repo is cloned locally.
    ./metadata-ingestion/scripts/datahub_docker.sh ingest -c ./examples/recipes/example_to_datahub_rest.yml

    Install from source

    If you'd like to install from source, see the developer guide.

    Installing Plugins

    We use a plugin architecture so that you can install only the dependencies you actually need. Click the plugin name to learn more about the specific source recipe and any FAQs!

    Sources

    Please see our Integrations page if you want to filter on the features offered by each source.

    Plugin NameInstall CommandProvides
    fileincluded by defaultFile source and sink
    athenapip install 'acryl-datahub[athena]'AWS Athena source
    bigquerypip install 'acryl-datahub[bigquery]'BigQuery source
    datahub-lineage-fileno additional dependenciesLineage File source
    datahub-business-glossaryno additional dependenciesBusiness Glossary File source
    dbtno additional dependenciesdbt source
    druidpip install 'acryl-datahub[druid]'Druid Source
    feastpip install 'acryl-datahub[feast]'Feast source (0.26.0)
    gluepip install 'acryl-datahub[glue]'AWS Glue source
    hanapip install 'acryl-datahub[hana]'SAP HANA source
    hivepip install 'acryl-datahub[hive]'Hive source
    kafkapip install 'acryl-datahub[kafka]'Kafka source
    kafka-connectpip install 'acryl-datahub[kafka-connect]'Kafka connect source
    ldappip install 'acryl-datahub[ldap]' (extra requirements)LDAP source
    lookerpip install 'acryl-datahub[looker]'Looker source
    lookmlpip install 'acryl-datahub[lookml]'LookML source, requires Python 3.7+
    metabasepip install 'acryl-datahub[metabase]'Metabase source
    modepip install 'acryl-datahub[mode]'Mode Analytics source
    mongodbpip install 'acryl-datahub[mongodb]'MongoDB source
    mssqlpip install 'acryl-datahub[mssql]'SQL Server source
    mysqlpip install 'acryl-datahub[mysql]'MySQL source
    mariadbpip install 'acryl-datahub[mariadb]'MariaDB source
    openapipip install 'acryl-datahub[openapi]'OpenApi Source
    oraclepip install 'acryl-datahub[oracle]'Oracle source
    postgrespip install 'acryl-datahub[postgres]'Postgres source
    redashpip install 'acryl-datahub[redash]'Redash source
    redshiftpip install 'acryl-datahub[redshift]'Redshift source
    sagemakerpip install 'acryl-datahub[sagemaker]'AWS SageMaker source
    snowflakepip install 'acryl-datahub[snowflake]'Snowflake source
    sqlalchemypip install 'acryl-datahub[sqlalchemy]'Generic SQLAlchemy source
    supersetpip install 'acryl-datahub[superset]'Superset source
    tableaupip install 'acryl-datahub[tableau]'Tableau source
    trinopip install 'acryl-datahub[trino]'Trino source
    starburst-trino-usagepip install 'acryl-datahub[starburst-trino-usage]'Starburst Trino usage statistics source
    nifipip install 'acryl-datahub[nifi]'NiFi source
    powerbipip install 'acryl-datahub[powerbi]'Microsoft Power BI source
    powerbi-report-serverpip install 'acryl-datahub[powerbi-report-server]'Microsoft Power BI Report Server source

    Sinks

    Plugin NameInstall CommandProvides
    fileincluded by defaultFile source and sink
    consoleincluded by defaultConsole sink
    datahub-restpip install 'acryl-datahub[datahub-rest]'DataHub sink over REST API
    datahub-kafkapip install 'acryl-datahub[datahub-kafka]'DataHub sink over Kafka

    These plugins can be mixed and matched as desired. For example:

    pip install 'acryl-datahub[bigquery,datahub-rest]'

    Check the active plugins

    datahub check plugins

    Release Notes and CLI versions

    The server release notes can be found in github releases. These releases are done approximately every week on a regular cadence unless a blocking issue or regression is discovered.

    CLI release is made through a different repository and release notes can be found in acryldata releases. At least one release which is tied to the server release is always made alongwith the server release. Multiple other bigfix releases are made in between based on amount of fixes that are merged between the server release mentioned above.

    If server with version 0.8.28 is being used then CLI used to connect to it should be 0.8.28.x. Tests of new CLI are not ran with older server versions so it is not recommended to update the CLI if the server is not updated.

    - + \ No newline at end of file diff --git a/docs/code_of_conduct/index.html b/docs/code_of_conduct/index.html index 94ab6abc610c2..4798d02b07687 100644 --- a/docs/code_of_conduct/index.html +++ b/docs/code_of_conduct/index.html @@ -8,7 +8,7 @@ - + @@ -43,7 +43,7 @@ members of the project's leadership.

    Attribution

    This Code of Conduct is adapted from the Contributor Covenant, version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html

    For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq

    - + \ No newline at end of file diff --git a/docs/components/index.html b/docs/components/index.html index 2849ee5c29d8c..e38ede8708150 100644 --- a/docs/components/index.html +++ b/docs/components/index.html @@ -8,7 +8,7 @@ - + @@ -29,7 +29,7 @@ For a full overview of the capabilities currently supported, take a look at the Features overview. For a look at what's coming next, head over to the Roadmap.

    Learn More

    Learn more about the specifics of the DataHub Architecture in the Architecture Overview. Learn about using & developing the components of the Platform by visiting the Module READMEs.

    Feedback / Questions / Concerns

    We want to hear from you! For any inquiries, including Feedback, Questions, or Concerns, reach out on Slack!

    - + \ No newline at end of file diff --git a/docs/contributing/index.html b/docs/contributing/index.html index ffcf447afa244..cc34df44d03c2 100644 --- a/docs/contributing/index.html +++ b/docs/contributing/index.html @@ -8,13 +8,13 @@ - +

    Contributing

    We always welcome contributions to help make DataHub better. Take a moment to read this document if you would like to contribute.

    Provide Feedback

    Have ideas about how to make DataHub better? Head over to DataHub Feature Requests and tell us all about it!

    Show your support for other requests by upvoting; stay up to date on progess by subscribing for updates via email.

    Reporting Issues

    We use GitHub issues to track bug reports and submitting pull requests.

    If you find a bug:

    1. Use the GitHub issue search to check whether the bug has already been reported.

    2. If the issue has been fixed, try to reproduce the issue using the latest master branch of the repository.

    3. If the issue still reproduces or has not yet been reported, try to isolate the problem before opening an issue.

    Submitting a Request For Comment (RFC)

    If you have a substantial feature or a design discussion that you'd like to have with the community follow the RFC process outlined here

    Submitting a Pull Request (PR)

    Before you submit your Pull Request (PR), consider the following guidelines:

    • Search GitHub for an open or closed PR that relates to your submission. You don't want to duplicate effort.
    • Follow the standard GitHub approach to create the PR. Please also follow our commit message format.
    • If there are any breaking changes, potential downtime, deprecations, or big feature please add an update in Updating DataHub under Next.
    • That's it! Thank you for your contribution!

    Commit Message Format

    Please follow the Conventional Commits specification for the commit message format. In summary, each commit message consists of a header, a body and a footer, separated by a single blank line.

    <type>[optional scope]: <description>

    [optional body]

    [optional footer(s)]

    Any line of the commit message cannot be longer than 88 characters! This allows the message to be easier to read on GitHub as well as in various Git tools.

    Type

    Must be one of the following (based on the Angular convention):

    • feat: A new feature
    • fix: A bug fix
    • refactor: A code change that neither fixes a bug nor adds a feature
    • docs: Documentation only changes
    • test: Adding missing tests or correcting existing tests
    • perf: A code change that improves performance
    • style: Changes that do not affect the meaning of the code (whitespace, formatting, missing semicolons, etc.)
    • build: Changes that affect the build system or external dependencies
    • ci: Changes to our CI configuration files and scripts

    A scope may be provided to a commit’s type, to provide additional contextual information and is contained within parenthesis, e.g.,

    feat(parser): add ability to parse arrays

    Description

    Each commit must contain a succinct description of the change:

    • use the imperative, present tense: "change" not "changed" nor "changes"
    • don't capitalize the first letter
    • no dot(.) at the end

    Body

    Just as in the description, use the imperative, present tense: "change" not "changed" nor "changes". The body should include the motivation for the change and contrast this with previous behavior.

    The footer should contain any information about Breaking Changes, and is also the place to reference GitHub issues that this commit Closes.

    Breaking Changes should start with the words BREAKING CHANGE: with a space or two new lines. The rest of the commit message is then used for this.

    Revert

    If the commit reverts a previous commit, it should begin with revert:, followed by the description. In the body it should say: Refs: <hash1> <hash2> ..., where the hashs are the SHA of the commits being reverted, e.g.

    revert: let us never again speak of the noodle incident

    Refs: 676104e, a215868
    - + \ No newline at end of file diff --git a/docs/datahub-frontend/index.html b/docs/datahub-frontend/index.html index c61650d41b734..70e4d4e356d25 100644 --- a/docs/datahub-frontend/index.html +++ b/docs/datahub-frontend/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ the application directly from command line after a successful build:

    cd datahub-frontend/run && ./run-local-frontend

    Checking out DataHub UI

    After starting your application in one of the two ways mentioned above, you can connect to it by typing below into your favorite web browser:

    http://localhost:9002

    To be able to sign in, you need to provide your user name. The default account is datahub, password datahub.

    Authentication

    DataHub frontend leverages Java Authentication and Authorization Service (JAAS) to perform the authentication. By default we provided a DummyLoginModule which will accept any username/password combination. You can update jaas.conf to match your authentication requirement. For example, use the following config for LDAP-based authentication,

    WHZ-Authentication {
      com.sun.security.auth.module.LdapLoginModule sufficient
      userProvider="ldaps://<host>:636/dc=<domain>"
      authIdentity="{USERNAME}"
      userFilter="(&(objectClass=person)(uid={USERNAME}))"
      java.naming.security.authentication="simple"
      debug="false"
      useSSL="true";
    };

    Authentication in React

    The React app supports both JAAS as described above and separately OIDC authentication. To learn about configuring OIDC for React, see the OIDC in React document.

    API Debugging

    Most DataHub frontend API endpoints are protected using Play Authentication, which means it requires authentication information stored in the cookie for the request to go through. This makes debugging using curl difficult. One option is to first make a curl call against the /authenticate endpoint and stores the authentication info in a cookie file like this

    curl -c cookie.txt -d '{"username":"datahub", "password":"datahub"}' -H 'Content-Type: application/json' http://localhost:9002/authenticate

    You can then make all subsequent calls using the same cookie file to pass the authentication check.

    curl -b cookie.txt "http://localhost:9001/api/v2/search?type=dataset&input=page"
    - + \ No newline at end of file diff --git a/docs/datahub-graphql-core/index.html b/docs/datahub-graphql-core/index.html index 98c80aa187a01..c7af3f8c7a563 100644 --- a/docs/datahub-graphql-core/index.html +++ b/docs/datahub-graphql-core/index.html @@ -8,7 +8,7 @@ - + @@ -28,7 +28,7 @@ on the complexity of the new entity. See here for reference.

    Note: If you want your new Entity to be "browsable" (folder navigation) via the UI, make sure you implement the BrowsableEntityType interface.

    Enabling Search for a new Entity

    In order to enable searching an Entity, you'll need to modify the SearchAcrossEntities.java resolver, which enables unified search across all DataHub entities.

    Steps:

    1. Add your new Entity type to this list.
    2. Add a new statement to UrnToEntityMapper.java. This maps an URN to a "placeholder" GraphQL entity which is subsequently resolved by the GraphQL engine.

    That should be it!

    Now, you can try to issue a search for the new entities you've ingested

    - + \ No newline at end of file diff --git a/docs/datahub-web-react/index.html b/docs/datahub-web-react/index.html index 11b097a8ccba7..801759b325b93 100644 --- a/docs/datahub-web-react/index.html +++ b/docs/datahub-web-react/index.html @@ -8,7 +8,7 @@ - + @@ -46,7 +46,7 @@ and appear in the UI. To manually retrieve the info about your entity or others, simply use an instance of the EntityRegistry, which is provided via ReactContext to all components in the hierarchy. For example
    entityRegistry.getCollectionName(EntityType.YOUR_NEW_ENTITY)

    That's it! For any questions, do not hesitate to reach out on the DataHub Slack community in #datahub-react.

    - + \ No newline at end of file diff --git a/docs/datahub-web-react/src/app/analytics/index.html b/docs/datahub-web-react/src/app/analytics/index.html index ad13e7ffb76cb..9a635fa059559 100644 --- a/docs/datahub-web-react/src/app/analytics/index.html +++ b/docs/datahub-web-react/src/app/analytics/index.html @@ -8,13 +8,13 @@ - +

    DataHub React Analytics

    About

    The DataHub React application can be configured to emit a set of standardized product analytics events to multiple backend providers including

    • Mixpanel
    • Amplitude
    • Google Analytics

    This provides operators of DataHub with visibility into how their users are engaging with the platform, allowing them to answer questions around weekly active users, the most used features, the least used features, and more.

    To accomplish this, we have built a small extension on top of the popular Analytics npm package. This package was chosen because it offers a clear pathway to extending support to many other providers, all of which you can find listed here.

    Configuring an Analytics Provider

    Currently, configuring an analytics provider requires that you fork DataHub & modify code. As described in 'Coming Soon', we intend to improve this process by implementing no-code configuration.

    Mixpanel

    1. Open datahub-web-react/src/conf/analytics.ts
    2. Uncomment the mixpanel field within the config object.
    3. Replace the sample token with the API token provided by Mixpanel.
    4. Rebuild & redeploy datahub-frontend-react to start tracking.
    const config: any = {
    mixpanel: {
    token: 'fad1285da4e618b618973cacf6565e61',
    },
    };

    Amplitude

    1. Open datahub-web-react/src/conf/analytics.ts
    2. Uncomment the amplitude field within the config object.
    3. Replace the sample apiKey with the key provided by Amplitude.
    4. Rebuild & redeploy datahub-frontend-react to start tracking.
    const config: any = {
    amplitude: {
    apiKey: 'c5c212632315d19c752ab083bc7c92ff',
    },
    };

    Google Analytics

    Disclaimers

    • This plugin requires use of Universal Analytics and does not yet support GA4. To create a Universal Analytics Property, follow this guide.
    • Google Analytics lacks robust support for custom event properties. For that reason many of the DataHub events discussed above will not be fully populated. Instead, we map certain fields of the DataHub event to the standard category, action, label fields required by GA.
    1. Open datahub-web-react/src/conf/analytics.ts
    2. Uncomment the googleAnalytics field within the config object.
    3. Replace the sample trackingId with the one provided by Google Analytics.
    4. Rebuild & redeploy datahub-frontend-react to start tracking.
    const config: any = {
    googleAnalytics: {
    trackingId: 'UA-24123123-01',
    },
    };

    Verifying your Analytics Setup

    To verify that analytics are being sent to your provider, you can inspect the networking tab of a Google Chrome inspector window:

    With DataHub open on Google Chrome

    1. Right click, then Inspect
    2. Click 'Network'
    3. Issue a search in DataHub
    4. Inspect the outbound traffic for requests routed to your analytics provider.

    Development

    Adding a plugin

    To add a new plugin from the Analytics library:

    1. Add a new file under src/app/analytics/plugin named based on the plugin
    2. Extract configs from the analytics config object required to instantiate the plugin
    3. Instantiate the plugin
    4. Export a default object with 'isEnabled' and 'plugin' fields
    5. Import / Export the new plugin module from src/app/analytics/plugin/index.js

    If you're unsure, check out the existing plugin implements as examples. Before contributing a plugin, please be sure to verify the integration by viewing the product metrics in the new analytics provider.

    Adding an event

    To add a new DataHub analytics event, make the following changes to src/app/analytics/event.ts:

    1. Add a new value to the EventType enum
       export enum EventType {
    LogInEvent,
    LogOutEvent,
    ...,
    MyNewEvent
    }
    1. Create a new interface extending BaseEvent
    export interface MyNewEvent extends BaseEvent {
    type: EventType.MyNewEvent; // must be the type you just added
    ... your event's custom fields
    }
    1. Add the interface to the exported Event type.
    export type Event =
    | LogInEvent
    | LogOutEvent
    ....
    | MyNewEvent

    Emitting an event

    Emitting a tracking DataHub analytics event is a 2-step process:

    1. Import relevant items from analytics module
    import analytics, { EventType } from '../analytics';
    1. Call the event method, passing in an event object of the appropriate type
    analytics.event({ type: EventType.MyNewEvent, ...my event fields });

    Debugging: Enabling Event Logging

    To log events to the console for debugging / verification purposes

    1. Open datahub-web-react/src/conf/analytics.ts
    2. Uncomment logging: true within the config object.
    3. Rebuild & redeploy datahub-frontend-react to start logging all events to your browser's console.

    Coming Soon

    In the near future, we intend to

    1. Send product analytics events back to DataHub itself, using them as feedback to improve the product experience.
    2. No-code configuration of Analytics plugins. This will be achieved using server driven configuration for the React app.
    - + \ No newline at end of file diff --git a/docs/datahub_lite/index.html b/docs/datahub_lite/index.html index ee28681710331..63fc53c1fdfa8 100644 --- a/docs/datahub_lite/index.html +++ b/docs/datahub_lite/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ Writing these functions requires that you understand the DataHub metadata model and how the data is laid out in DataHub Lite.

    For example, to find all entities whose datasetProperties aspect includes the view_definition in its customProperties sub-field, we can issue the following command:

    > datahub lite search --aspect datasetProperties --flavor exact "metadata -> '$.customProperties' ->> '$.view_definition' IS NOT NULL"
    {"id": "urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.INNODB_MUTEXES,PROD)", "aspect": "datasetProperties", "snippet": "{\"customProperties\": {\"view_definition\": \"CREATE TEMPORARY TABLE `INNODB_MUTEXES` (\\n  `NAME` varchar(4000) NOT NULL DEFAULT '',\\n  `CREATE_FILE` varchar(4000) NOT NULL DEFAULT '',\\n  `CREATE_LINE` int(11) unsigned NOT NULL DEFAULT 0,\\n  `OS_WAITS` bigint(21) unsigned NOT NULL DEFAULT 0\\n) ENGINE=MEMORY DEFAULT CHARSET=utf8\", \"is_view\": \"True\"}, \"name\": \"INNODB_MUTEXES\", \"tags\": []}"}
    {"id": "urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.user_variables,PROD)", "aspect": "datasetProperties", "snippet": "{\"customProperties\": {\"view_definition\": \"CREATE TEMPORARY TABLE `user_variables` (\\n `VARIABLE_NAME` varchar(64) NOT NULL DEFAULT '',\\n `VARIABLE_VALUE` varchar(2048) DEFAULT NULL,\\n `VARIABLE_TYPE` varchar(64) NOT NULL DEFAULT '',\\n `CHARACTER_SET_NAME` varchar(32) DEFAULT NULL\\n) ENGINE=MEMORY DEFAULT CHARSET=utf8\", \"is_view\": \"True\"}, \"name\": \"user_variables\", \"tags\": []}"}
    {"id": "urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.INNODB_TABLESPACES_ENCRYPTION,PROD)", "aspect": "datasetProperties", "snippet": "{\"customProperties\": {\"view_definition\": \"CREATE TEMPORARY TABLE `INNODB_TABLESPACES_ENCRYPTION` (\\n `SPACE` int(11) unsigned NOT NULL DEFAULT 0,\\n `NAME` varchar(655) DEFAULT NULL,\\n `ENCRYPTION_SCHEME` int(11) unsigned NOT NULL DEFAULT 0,\\n `KEYSERVER_REQUESTS` int(11) unsigned NOT NULL DEFAULT 0,\\n `MIN_KEY_VERSION` int(11) unsigned NOT NULL DEFAULT 0,\\n `CURRENT_KEY_VERSION` int(11) unsigned NOT NULL DEFAULT 0,\\n `KEY_ROTATION_PAGE_NUMBER` bigint(21) unsigned DEFAULT NULL,\\n `KEY_ROTATION_MAX_PAGE_NUMBER` bigint(21) unsigned DEFAULT NULL,\\n `CURRENT_KEY_ID` int(11) unsigned NOT NULL DEFAULT 0,\\n `ROTATING_OR_FLUSHING` int(1) NOT NULL DEFAULT 0\\n) ENGINE=MEMORY DEFAULT CHARSET=utf8\", \"is_view\": \"True\"}, \"name\": \"INNODB_TABLESPACES_ENCRYPTION\", \"tags\": []}"}

    Search will return results that include the id of the entity that matched along with the aspect and the content of the aspect as part of the snippet field. If you just want the id of the entity to be returned, use the --no-details flag.

    > datahub lite search --aspect datasetProperties --flavor exact "metadata -> '$.customProperties' ->> '$.view_definition' IS NOT NULL" --no-details
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.INNODB_SYS_FOREIGN,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.INNODB_CMPMEM_RESET,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.INNODB_FT_DEFAULT_STOPWORD,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.INNODB_SYS_TABLES,PROD)
    ...
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.INNODB_SYS_COLUMNS,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.INNODB_FT_CONFIG,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.USER_STATISTICS,PROD)

    List Urns (list-urns)

    List all the ids in the DataHub Lite instance.

    > datahub lite list-urns
    urn:li:container:21d4204e13d5b984c58acad468ecdbdd
    urn:li:dataset:(urn:li:dataPlatform:mysql,datahub.metadata_aspect_v2,PROD)

    urn:li:container:aa82e8309ce84acc350640647a54ca3b
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.ALL_PLUGINS,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.APPLICABLE_ROLES,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.CHARACTER_SETS,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.CHECK_CONSTRAINTS,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.COLLATIONS,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.COLLATION_CHARACTER_SET_APPLICABILITY,PROD)
    urn:li:dataset:(urn:li:dataPlatform:mysql,information_schema.COLUMNS,PROD)
    ...

    HTTP Server (serve)

    DataHub Lite can be run as a lightweight HTTP server, exposing an OpenAPI spec over FastAPI.

    > datahub lite serve
    INFO: Started server process [33364]
    INFO: Waiting for application startup.
    INFO: Application startup complete.
    INFO: Uvicorn running on http://127.0.0.1:8979 (Press CTRL+C to quit)

    OpenAPI docs are available via your browser at the same port: http://localhost:8979

    The server exposes similar commands as the lite cli commands over HTTP:

    • entities: list of all entity ids and get metadata for an entity
    • browse: traverse the entity hierarchy in a path based way
    • search: execute search against the metadata

    Server Configuration

    Configuration for the server is picked up from the standard location for the datahub cli: ~/.datahubenv and can be created using datahub lite init.

    Here is a sample config file with the lite section filled out:

    gms:
    server: http://localhost:8080
    token: ''
    lite:
    config:
    file: /Users/<username>/.datahub/lite/datahub.duckdb
    type: duckdb
    forward_to:
    type: datahub-rest
    server: "http://datahub-gms:8080

    Admin Commands

    Export Metadata (export)

    The export command allows you to export the contents of DataHub Lite into a metadata events file that you can then send to another DataHub instance (e.g. over REST).

    > datahub lite export --file datahub_lite_export.json
    Successfully exported 1785 events to datahub_lite_export.json

    Clear (nuke)

    If you want to clear your DataHub lite instance, you can just issue the nuke command.

    > datahub lite nuke
    DataHub Lite destroyed at <path>

    Use a different file (init)

    By default, DataHub Lite will create and use a local duckdb instance located at ~/.datahub/lite/datahub.duckdb. If you want to use a different location, you can configure it using the datahub lite init command.

    > datahub lite init --type duckdb --file my_local_datahub.duckdb
    Will replace datahub lite config type='duckdb' config={'file': '/Users/<username>/.datahub/lite/datahub.duckdb', 'options': {}} with type='duckdb' config={'file': 'my_local_datahub.duckdb', 'options': {}} [y/N]: y
    DataHub Lite inited at my_local_datahub.duckdb

    Reindex

    DataHub Lite maintains a few derived tables to make access possible via both the native id (urn) as well as the logical path of the entity. The reindex command recomputes these indexes.

    Caveat Emptor!

    DataHub Lite is a very new project. Do not use it for production use-cases. The API-s and storage formats are subject to change and we get feedback from early adopters. That said, we are really interested in accepting PR-s and suggestions for improvements to this fledgling project.

    Advanced Options

    Tab Completion

    Using the datahub lite commands like ls or get is much more pleasant when you have tab completion enabled on your shell. Tab completion is supported on the command line through the Click Shell completion module. To set up shell completion for your shell, follow the instructions below based on your shell variant:

    Option 1: Inline eval (easy, less performant)

    Add this to ~/.zshrc:

    eval "$(_DATAHUB_COMPLETE=zsh_source datahub)"

    Using eval means that the command is invoked and evaluated every time a shell is started, which can delay shell responsiveness. To speed it up, write the generated script to a file, then source that.

    Save the script somewhere.

    # the additional sed patches completion to be path oriented and not add spaces between each completed token
    _DATAHUB_COMPLETE=zsh_source datahub | sed 's;compadd;compadd -S /;' > ~/.datahub-complete.zsh

    Source the file in ~/.zshrc.

    . ~/.datahub-complete.zsh
    - + \ No newline at end of file diff --git a/docs/dataproducts/index.html b/docs/dataproducts/index.html index d1a78e54f5438..70c474508d932 100644 --- a/docs/dataproducts/index.html +++ b/docs/dataproducts/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ 'Customer Orders' or 'Revenue Attribution'. You can also add documentation for your product to help other users easily discover it. Don't worry, this can be changed later.

    Once you've chosen a name and a description, click 'Create' to create the new Data Product. Once you've created the Data Product, you can click on it to continue on to the next step, adding assets to it.

    Assigning an Asset to a Data Product (UI)

    You can assign an asset to a Data Product either using the Data Product page as the starting point or the Asset's page as the starting point. On a Data Product page, click the 'Add Assets' button on the top right corner to add assets to the Data Product.

    On an Asset's profile page, use the right sidebar to locate the Data Product section. Click 'Set Data Product', and then search for the Data Product you'd like to add this asset to. When you're done, click 'Add'.

    To remove an asset from a Data Product, click the 'x' icon on the Data Product label.

    Notice: Adding or removing an asset from a Data Product requires the Edit Data Product Metadata Privilege, which can be granted by a Policy.

    Creating a Data Product (YAML + git)

    DataHub ships with a YAML-based Data Product spec for defining and managing Data Products as code.

    Here is an example of a Data Product named "Pet of the Week" which belongs to the Marketing domain and contains three data assets. The Spec tab describes the JSON Schema spec for a DataHub data product file.

    # Inlined from /metadata-ingestion/examples/data_product/dataproduct.yaml
    id: pet_of_the_week
    domain: Marketing
    display_name: Pet of the Week Campaign
    description: |-
    This campaign includes Pet of the Week data.

    # List of assets that belong to this Data Product
    assets:
    - urn:li:dataset:(urn:li:dataPlatform:snowflake,long_tail_companions.analytics.pet_details,PROD)
    - urn:li:dashboard:(looker,dashboards.19)
    - urn:li:dataFlow:(airflow,snowflake_load,prod)

    owners:
    - id: urn:li:corpuser:jdoe
    type: BUSINESS_OWNER

    # Tags associated with this Data Product
    tags:
    - urn:li:tag:adoption

    # Glossary Terms associated with this Data Product
    terms:
    - urn:li:glossaryTerm:ClientsAndAccounts.AccountBalance

    # Custom Properties
    properties:
    lifecycle: production
    sla: 7am every day

    note

    When bare domain names like Marketing is used, datahub will first check if a domain like urn:li:domain:Marketing is provisioned, failing that; it will check for a provisioned domain that has the same name. If we are unable to resolve bare domain names to provisioned domains, then yaml-based ingestion will refuse to proceeed until the domain is provisioned on DataHub.

    You can also provide fully-qualified domain names (e.g. urn:li:domain:dcadded3-2b70-4679-8b28-02ac9abc92eb) to ensure that no ingestion-time domain resolution is needed.

    To sync this yaml file to DataHub, use the datahub cli via the dataproduct group of commands.

    datahub dataproduct upsert -f user_dataproduct.yaml

    Keeping the YAML file sync-ed with changes in UI

    The datahub cli allows you to keep this YAML file synced with changes happening in the UI. All you have to do is run the datahub dataproduct diff command.

    Here is an example invocation that checks if there is any diff and updates the file in place:

    datahub dataproduct diff -f user_dataproduct.yaml --update

    This allows you to manage your data product definition in git while still allowing for edits in the UI. Business Users and Developers can both collaborate on the definition of a data product with ease using this workflow.

    Advanced cli commands for managing Data Products

    There are many more advanced cli commands for managing Data Products as code. Take a look at the Data Products section on the CLI reference guide for more details.

    What updates are planned for the Data Products feature?

    The following features are next on the roadmap for Data Products

    • Support for marking data assets in a Data Product as private versus shareable for other teams to consume
    • Support for declaring lineage manually to upstream and downstream data products
    • Support for declaring logical schema for Data Products
    • Support for associating data contracts with Data Products
    • Support for semantic versioning of the Data Product entity

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/deploy/aws/index.html b/docs/deploy/aws/index.html index b8f5add4f2c18..fe6f1da22c856 100644 --- a/docs/deploy/aws/index.html +++ b/docs/deploy/aws/index.html @@ -8,7 +8,7 @@ - + @@ -80,7 +80,7 @@ following command to do so.

    eksctl create iamserviceaccount \
    --name <<service-account-name>> \
    --namespace <<namespace>> \
    --cluster <<eks-cluster-name>> \
    --attach-policy-arn <<policy-ARN>> \
    --approve \
    --override-existing-serviceaccounts

    For example, running the following will create a service account "acryl-datahub-actions" in the datahub namespace of datahub EKS cluster with arn:aws:iam::<<account-id>>:policy/policy1 attached.

    eksctl create iamserviceaccount \
    --name acryl-datahub-actions \
    --namespace datahub \
    --cluster datahub \
    --attach-policy-arn arn:aws:iam::<<account-id>>:policy/policy1 \
    --approve \
    --override-existing-serviceaccounts

    Lastly, in the helm values.yaml, you can add the following to the acryl-datahub-actions to attach the service account to the acryl-datahub-actions pod.

    acryl-datahub-actions:
    enabled: true
    serviceAccount:
    name: <<service-account-name>>
    ...
    - + \ No newline at end of file diff --git a/docs/deploy/confluent-cloud/index.html b/docs/deploy/confluent-cloud/index.html index f4f15f7e500b4..b3a4ea15aac8c 100644 --- a/docs/deploy/confluent-cloud/index.html +++ b/docs/deploy/confluent-cloud/index.html @@ -8,7 +8,7 @@ - + @@ -29,7 +29,7 @@ which will use the same username and API Key you generated for the JAAS config.

    See Overwriting a System Action Config for detailed reflection procedures.

    credentialsAndCertsSecrets:
    name: confluent-secrets
    secureEnv:
    sasl.jaas.config: sasl_jaas_config
    basic.auth.user.info: basic_auth_user_info
    sasl.username: sasl_username
    sasl.password: sasl_password

    The Actions pod will automatically pick these up in the correctly named environment variables when they are named this exact way.

    Contribution

    Accepting contributions for a setup script compatible with Confluent Cloud!

    The kafka-setup-job container we ship with is only compatible with a distribution of Kafka wherein ZooKeeper is exposed and available. A version of the job using the Confluent CLI would be very useful for the broader community.

    - + \ No newline at end of file diff --git a/docs/deploy/environment-vars/index.html b/docs/deploy/environment-vars/index.html index d517eca781eb6..767f4b89c6824 100644 --- a/docs/deploy/environment-vars/index.html +++ b/docs/deploy/environment-vars/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ DataHub works.

    Feature Flags

    VariableDefaultUnit/TypeComponentsDescription
    UI_INGESTION_ENABLEDtrueboolean[GMS, MCE Consumer]Enable UI based ingestion.
    DATAHUB_ANALYTICS_ENABLEDtrueboolean[Frontend, GMS]Collect DataHub usage to populate the analytics dashboard.
    BOOTSTRAP_SYSTEM_UPDATE_WAIT_FOR_SYSTEM_UPDATEtrueboolean[GMS, MCE Consumer, MAE Consumer]Do not wait for the system-update to complete before starting. This should typically only be disabled during development.

    Ingestion

    VariableDefaultUnit/TypeComponentsDescription
    ASYNC_INGESTION_DEFAULTfalseboolean[GMS]Asynchronously process ingestProposals by writing the ingestion MCP to Kafka. Typically enabled with standalone consumers.
    MCP_CONSUMER_ENABLEDtrueboolean[GMS, MCE Consumer]When running in standalone mode, disabled on GMS and enabled on separate MCE Consumer.
    MCL_CONSUMER_ENABLEDtrueboolean[GMS, MAE Consumer]When running in standalone mode, disabled on GMS and enabled on separate MAE Consumer.
    PE_CONSUMER_ENABLEDtrueboolean[GMS, MAE Consumer]When running in standalone mode, disabled on GMS and enabled on separate MAE Consumer.
    ES_BULK_REQUESTS_LIMIT1000docs[GMS, MAE Consumer]Number of bulk documents to index. MAE Consumer if standalone.
    ES_BULK_FLUSH_PERIOD1seconds[GMS, MAE Consumer]How frequently indexed documents are made available for query.
    ALWAYS_EMIT_CHANGE_LOGfalseboolean[GMS]Enables always emitting a MCL even when no changes are detected. Used for Time Based Lineage when no changes occur.
    GRAPH_SERVICE_DIFF_MODE_ENABLEDtrueboolean[GMS]Enables diff mode for graph writes, uses a different code path that produces a diff from previous to next to write relationships instead of wholesale deleting edges and reading.

    Caching

    VariableDefaultUnit/TypeComponentsDescription
    SEARCH_SERVICE_ENABLE_CACHEfalseboolean[GMS]Enable caching of search results.
    SEARCH_SERVICE_CACHE_IMPLEMENTATIONcaffeinestring[GMS]Set to hazelcast if the number of GMS replicas > 1 for enabling distributed cache.
    CACHE_TTL_SECONDS600seconds[GMS]Default cache time to live.
    CACHE_MAX_SIZE10000objects[GMS]Maximum number of items to cache.
    LINEAGE_SEARCH_CACHE_ENABLEDtrueboolean[GMS]Enables in-memory cache for searchAcrossLineage query.
    CACHE_ENTITY_COUNTS_TTL_SECONDS600seconds[GMS]Homepage entity count time to live.
    CACHE_SEARCH_LINEAGE_TTL_SECONDS86400seconds[GMS]Search lineage cache time to live.
    CACHE_SEARCH_LINEAGE_LIGHTNING_THRESHOLD300objects[GMS]Lineage graphs exceeding this limit will use a local cache.
    VariableDefaultUnit/TypeComponentsDescription
    INDEX_PREFIX``string[GMS, MAE Consumer, Elasticsearch Setup, System Update]Prefix Elasticsearch indices with the given string.
    ELASTICSEARCH_NUM_SHARDS_PER_INDEX1integer[System Update]Default number of shards per Elasticsearch index.
    ELASTICSEARCH_NUM_REPLICAS_PER_INDEX1integer[System Update]Default number of replica per Elasticsearch index.
    ELASTICSEARCH_BUILD_INDICES_RETENTION_VALUE60integer[System Update]Number of units for the retention of Elasticsearch clone/backup indices.
    ELASTICSEARCH_BUILD_INDICES_RETENTION_UNITDAYSstring[System Update]Unit for the retention of Elasticsearch clone/backup indices.
    ELASTICSEARCH_QUERY_EXACT_MATCH_EXCLUSIVEfalseboolean[GMS]Only return exact matches when using quotes.
    ELASTICSEARCH_QUERY_EXACT_MATCH_WITH_PREFIXtrueboolean[GMS]Include prefix match in exact match results.
    ELASTICSEARCH_QUERY_EXACT_MATCH_FACTOR10.0float[GMS]Multiply by this number on true exact match.
    ELASTICSEARCH_QUERY_EXACT_MATCH_PREFIX_FACTOR1.6float[GMS]Multiply by this number when prefix match.
    ELASTICSEARCH_QUERY_EXACT_MATCH_CASE_FACTOR0.7float[GMS]Multiply by this number when case insensitive match.
    ELASTICSEARCH_QUERY_EXACT_MATCH_ENABLE_STRUCTUREDtrueboolean[GMS]When using structured query, also include exact matches.
    ELASTICSEARCH_QUERY_PARTIAL_URN_FACTOR0.5float[GMS]Multiply by this number when partial token match on URN)
    ELASTICSEARCH_QUERY_PARTIAL_FACTOR0.4float[GMS]Multiply by this number when partial token match on non-URN field.
    ELASTICSEARCH_QUERY_CUSTOM_CONFIG_ENABLEDfalseboolean[GMS]Enable search query and ranking customization configuration.
    ELASTICSEARCH_QUERY_CUSTOM_CONFIG_FILEsearch_config.ymlstring[GMS]The location of the search customization configuration.

    Kafka

    In general, there are lots of Kafka configuration environment variables for both the producer and consumers defined in the official Spring Kafka documentation here. These environment variables follow the standard Spring representation of properties as environment variables. Simply replace the dot, ., with an underscore, _, and convert to uppercase.

    VariableDefaultUnit/TypeComponentsDescription
    KAFKA_LISTENER_CONCURRENCY1integer[GMS, MCE Consumer, MAE Consumer]Number of Kafka consumer threads. Optimize throughput by matching to topic partitions.
    SPRING_KAFKA_PRODUCER_PROPERTIES_MAX_REQUEST_SIZE1048576bytes[GMS, MCE Consumer, MAE Consumer]Max produced message size. Note that the topic configuration is not controlled by this variable.
    SCHEMA_REGISTRY_TYPEINTERNALstring[GMS, MCE Consumer, MAE Consumer]Schema registry implementation. One of INTERNAL or KAFKA or AWS_GLUE
    KAFKA_SCHEMAREGISTRY_URLhttp://localhost:8080/schema-registry/api/string[GMS, MCE Consumer, MAE Consumer]Schema registry url. Used for INTERNAL and KAFKA. The default value is for the GMS component. The MCE Consumer and MAE Consumer should be the GMS hostname and port.
    AWS_GLUE_SCHEMA_REGISTRY_REGIONus-east-1string[GMS, MCE Consumer, MAE Consumer]If using AWS_GLUE in the SCHEMA_REGISTRY_TYPE variable for the schema registry implementation.
    AWS_GLUE_SCHEMA_REGISTRY_NAME``string[GMS, MCE Consumer, MAE Consumer]If using AWS_GLUE in the SCHEMA_REGISTRY_TYPE variable for the schema registry.
    USE_CONFLUENT_SCHEMA_REGISTRYtrueboolean[kafka-setup]Enable Confluent schema registry configuration.

    Frontend

    VariableDefaultUnit/TypeComponentsDescription
    AUTH_VERBOSE_LOGGINGfalseboolean[Frontend]Enable verbose authentication logging. Enabling this will leak sensisitve information in the logs. Disable when finished debugging.
    AUTH_OIDC_GROUPS_CLAIMgroupsstring[Frontend]Claim to use as the user's group.
    AUTH_OIDC_EXTRACT_GROUPS_ENABLEDfalseboolean[Frontend]Auto-provision the group from the user's group claim.
    - + \ No newline at end of file diff --git a/docs/deploy/gcp/index.html b/docs/deploy/gcp/index.html index d8715a9812699..4f39e16649156 100644 --- a/docs/deploy/gcp/index.html +++ b/docs/deploy/gcp/index.html @@ -8,7 +8,7 @@ - + @@ -35,7 +35,7 @@ you can click "CREATE A NEW CERTIFICATE" and input the host name of choice. GCP will create a certificate for you.

    Now press "CREATE" button on the left to create ingress! After around 5 minutes, you should see the following.

    Ingress Ready

    In your domain provider, add an A record for the host name set above using the IP address on the ingress page (noted with the red box). Once DNS updates, you should be able to access DataHub through the host name!!

    Note, ignore the warning icon next to ingress. It takes about ten minutes for ingress to check that the backend service is ready and show a check mark as follows. However, ingress is fully functional once you see the above page.

    Ingress Final

    - + \ No newline at end of file diff --git a/docs/deploy/kubernetes/index.html b/docs/deploy/kubernetes/index.html index 517911a51ec59..614981547dd26 100644 --- a/docs/deploy/kubernetes/index.html +++ b/docs/deploy/kubernetes/index.html @@ -8,7 +8,7 @@ - + @@ -45,7 +45,7 @@ quickstart-values.yaml file accordingly before installing.

    Run kubectl get pods to check whether all the datahub pods are running. You should get a result similar to below.

    NAME                                               READY   STATUS      RESTARTS   AGE
    datahub-datahub-frontend-84c58df9f7-5bgwx 1/1 Running 0 4m2s
    datahub-datahub-gms-58b676f77c-c6pfx 1/1 Running 0 4m2s
    datahub-datahub-mae-consumer-7b98bf65d-tjbwx 1/1 Running 0 4m3s
    datahub-datahub-mce-consumer-8c57d8587-vjv9m 1/1 Running 0 4m2s
    datahub-elasticsearch-setup-job-8dz6b 0/1 Completed 0 4m50s
    datahub-kafka-setup-job-6blcj 0/1 Completed 0 4m40s
    datahub-mysql-setup-job-b57kc 0/1 Completed 0 4m7s
    elasticsearch-master-0 1/1 Running 0 97m
    elasticsearch-master-1 1/1 Running 0 97m
    elasticsearch-master-2 1/1 Running 0 97m
    prerequisites-cp-schema-registry-cf79bfccf-kvjtv 2/2 Running 1 99m
    prerequisites-kafka-0 1/1 Running 2 97m
    prerequisites-mysql-0 1/1 Running 1 97m
    prerequisites-neo4j-community-0 1/1 Running 0 88m
    prerequisites-zookeeper-0 1/1 Running 0 97m

    You can run the following to expose the frontend locally. Note, you can find the pod name using the command above. In this case, the datahub-frontend pod name was datahub-datahub-frontend-84c58df9f7-5bgwx.

    kubectl port-forward <datahub-frontend pod name> 9002:9002

    You should be able to access the frontend via http://localhost:9002.

    Once you confirm that the pods are running well, you can set up ingress for datahub-frontend to expose the 9002 port to the public.

    Other useful commands

    CommandDescription
    helm uninstall datahubRemove DataHub
    helm lsList of Helm charts
    helm historyFetch a release history
    - + \ No newline at end of file diff --git a/docs/deploy/telemetry/index.html b/docs/deploy/telemetry/index.html index 833210f7513e4..2592aa7ed13ce 100644 --- a/docs/deploy/telemetry/index.html +++ b/docs/deploy/telemetry/index.html @@ -8,13 +8,13 @@ - +

    DataHub Telemetry

    Overview of DataHub Telemetry

    To effectively build and maintain the DataHub Project, we must understand how end-users work within DataHub. Beginning in version 0.8.35, DataHub collects anonymous usage statistics and errors to inform our roadmap priorities and to enable us to proactively address errors.

    Deployments are assigned a UUID which is sent along with event details, Java version, OS, and timestamp; telemetry collection is enabled by default and can be disabled by setting DATAHUB_TELEMETRY_ENABLED=false in your Docker Compose config.

    The source code is available here.

    - + \ No newline at end of file diff --git a/docs/dev-guides/timeline/index.html b/docs/dev-guides/timeline/index.html index e2aab427acbcf..74a215e67ec69 100644 --- a/docs/dev-guides/timeline/index.html +++ b/docs/dev-guides/timeline/index.html @@ -8,7 +8,7 @@ - + @@ -29,7 +29,7 @@ Here are a few screenshots showing how to navigate to it. You can try out the API and send example requests. ../imgs/timeline/dropdown-apis.png ../imgs/timeline/swagger-ui.png

    Future Work

    • Supporting versions as start and end parameters as part of the call to the timeline API
    • Supporting entities beyond Datasets
    • Adding GraphQL API support
    • Supporting materialization of computed versions for entity categories (compared to the current read-time version computation)
    • Support in the UI to visualize the timeline in various places (e.g. schema history, etc.)
    - + \ No newline at end of file diff --git a/docs/developers/index.html b/docs/developers/index.html index 13bfacce1bd00..432728bc25ece 100644 --- a/docs/developers/index.html +++ b/docs/developers/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ Please note that we do not actively support Windows in a non-virtualized environment.

    If you must use Windows, one workaround is to build within a virtualized environment, such as a VM(Virtual Machine) or WSL(Windows Subsystem for Linux). This approach can help ensure that your build environment remains isolated and stable, and that your code is compiled correctly.

    Common Build Issues

    Getting Unsupported class file major version 57

    You're probably using a Java version that's too new for gradle. Run the following command to check your Java version

    java --version

    While it may be possible to build and run DataHub using newer versions of Java, we currently only support Java 11 (aka Java 11).

    Getting cannot find symbol error for javax.annotation.Generated

    Similar to the previous issue, please use Java 1.8 to build the project. You can install multiple version of Java on a single machine and switch between them using the JAVA_HOME environment variable. See this document for more details.

    :metadata-models:generateDataTemplate task fails with java.nio.file.InvalidPathException: Illegal char <:> at index XX or Caused by: java.lang.IllegalArgumentException: 'other' has different root error

    This is a known issue when building the project on Windows due a bug in the Pegasus plugin. Please refer to Windows Compatibility.

    As we generate quite a few files from the models, it is possible that old generated files may conflict with new model changes. When this happens, a simple ./gradlew clean should reosolve the issue.

    Execution failed for task ':metadata-service:restli-servlet-impl:checkRestModel'

    This generally means that an incompatible change was introduced to the rest.li API in GMS. You'll need to rebuild the snapshots/IDL by running the following command once

    ./gradlew :metadata-service:restli-servlet-impl:build -Prest.model.compatibility=ignore

    java.io.IOException: No space left on device

    This means you're running out of space on your disk to build. Please free up some space or try a different disk.

    Build failed for task ./gradlew :datahub-frontend:dist -x yarnTest -x yarnLint

    This could mean that you need to update your Yarn version

    - + \ No newline at end of file diff --git a/docs/docker/airflow/local_airflow/index.html b/docs/docker/airflow/local_airflow/index.html index 19f24e5cba81f..c20cf25ab6a6d 100644 --- a/docs/docker/airflow/local_airflow/index.html +++ b/docs/docker/airflow/local_airflow/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ Paused DAG Unpaused DAG

    Then trigger the DAG to run.

    Trigger the DAG

    After the DAG runs successfully, go over to your DataHub instance to see the Pipeline and navigate its lineage.

    DataHub Pipeline View

    DataHub Pipeline Entity

    DataHub Task View

    DataHub Lineage View

    TroubleShooting

    Most issues are related to connectivity between Airflow and DataHub.

    Here is how you can debug them.

    Find the Task Log

    Inspect the Log

    In this case, clearly the connection datahub-rest has not been registered. Looks like we forgot to register the connection with Airflow! Let's execute Step 4 to register the datahub connection with Airflow.

    In case the connection was registered successfully but you are still seeing Failed to establish a new connection, check if the connection is http://datahub-gms:8080 and not http://localhost:8080.

    After re-running the DAG, we see success!

    Pipeline Success

    - + \ No newline at end of file diff --git a/docs/docker/datahub-upgrade/index.html b/docs/docker/datahub-upgrade/index.html index 6bcee8f0887a5..4b2c5b2b18c02 100644 --- a/docs/docker/datahub-upgrade/index.html +++ b/docs/docker/datahub-upgrade/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ can be specified using the -u flag when running the datahub-upgrade container.

    For example, to run the migration named "NoCodeDataMigration", you would do execute the following:

    ./datahub-upgrade.sh -u NoCodeDataMigration

    OR

    docker pull acryldata/datahub-upgrade:head && docker run --env-file env/docker.env acryldata/datahub-upgrade:head -u NoCodeDataMigration

    In addition to the required -u argument, each upgrade may require specific arguments. You can provide arguments to individual upgrades using multiple -a arguments.

    For example, the NoCodeDataMigration upgrade provides 2 optional arguments detailed above: batchSize and batchDelayMs. To specify these, you can use a combination of -a arguments and of the form argumentName=argumentValue as follows:

    ./datahub-upgrade.sh -u NoCodeDataMigration -a batchSize=500 -a batchDelayMs=1000 // Small batches with 1 second delay. 

    OR

    docker pull acryldata/datahub-upgrade:head && docker run --env-file env/docker.env acryldata/datahub-upgrade:head -u NoCodeDataMigration -a batchSize=500 -a batchDelayMs=1000
    - + \ No newline at end of file diff --git a/docs/docker/development/index.html b/docs/docker/development/index.html index b3e1a22aed3d7..68c7243f66350 100644 --- a/docs/docker/development/index.html +++ b/docs/docker/development/index.html @@ -8,7 +8,7 @@ - + @@ -29,7 +29,7 @@ require Elasticsearch reindexing. If reindexing is required, the UI will render but may temporarily return errors until this job finishes.

    Running a specific service

    docker-compose up will launch all services in the configuration, including dependencies, unless they're already running. If you, for some reason, wish to change this behavior, check out these example commands.

    docker-compose -p datahub -f docker-compose.yml -f docker-compose.override.yml -f docker-compose-without-neo4j.m1.yml -f docker-compose.dev.yml up datahub-gms

    Will only start datahub-gms and its dependencies.

    docker-compose -p datahub -f docker-compose.yml -f docker-compose.override.yml -f docker-compose-without-neo4j.m1.yml -f docker-compose.dev.yml up --no-deps datahub-gms

    Will only start datahub-gms, without dependencies.

    - + \ No newline at end of file diff --git a/docs/docker/index.html b/docs/docker/index.html index 24dbc8c6b5961..41e5e316a67ef 100644 --- a/docs/docker/index.html +++ b/docs/docker/index.html @@ -8,7 +8,7 @@ - + @@ -22,7 +22,7 @@ successful release on Github will automatically publish the images.

    Building images

    This is not our recommended development flow and most developers should be following the Using Docker Images During Development guide.

    To build the full images (that we are going to publish), you need to run the following:

    COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker-compose -p datahub build

    This is because we're relying on builtkit for multistage builds. It does not hurt also set DATAHUB_VERSION to something unique.

    Community Built Images

    As the open source project grows, community members would like to contribute additions to the docker images. Not all contributions to the images can be accepted because those changes are not useful for all community members, it will increase build times, add dependencies and possible security vulns. In those cases this section can be used to point to Dockerfiles hosted by the community which build on top of the images published by the DataHub core team along with any container registry links where the result of those images are maintained.

    - + \ No newline at end of file diff --git a/docs/domains/index.html b/docs/domains/index.html index 83e7ae9f7cf5f..c66495b4b05e0 100644 --- a/docs/domains/index.html +++ b/docs/domains/index.html @@ -8,7 +8,7 @@ - + @@ -22,7 +22,7 @@ by a Policy.

    Ingestion-time Assignment

    All SQL-based ingestion sources support assigning domains during ingestion using the domain configuration. Consult your source's configuration details page (e.g. Snowflake), to verify that it supports the Domain capability.

    note

    Assignment of domains during ingestion will overwrite domains that you have assigned in the UI. A single table can only belong to one domain.

    Here is a quick example of a snowflake ingestion recipe that has been enhanced to attach the Analytics domain to all tables in the long_tail_companions database in the analytics schema, and the Finance domain to all tables in the long_tail_companions database in the ecommerce schema.

    source:
    type: snowflake
    config:
    username: ${SNOW_USER}
    password: ${SNOW_PASS}
    account_id:
    warehouse: COMPUTE_WH
    role: accountadmin
    database_pattern:
    allow:
    - "long_tail_companions"
    schema_pattern:
    deny:
    - information_schema
    profiling:
    enabled: False
    domain:
    Analytics:
    allow:
    - "long_tail_companions.analytics.*"
    Finance:
    allow:
    - "long_tail_companions.ecommerce.*"
    note

    When bare domain names like Analytics is used, the ingestion system will first check if a domain like urn:li:domain:Analytics is provisioned, failing that; it will check for a provisioned domain that has the same name. If we are unable to resolve bare domain names to provisioned domains, then ingestion will refuse to proceeed until the domain is provisioned on DataHub.

    You can also provide fully-qualified domain names to ensure that no ingestion-time domain resolution is needed. For example, the following recipe shows an example using fully qualified domain names:

    source:
    type: snowflake
    config:
    username: ${SNOW_USER}
    password: ${SNOW_PASS}
    account_id:
    warehouse: COMPUTE_WH
    role: accountadmin
    database_pattern:
    allow:
    - "long_tail_companions"
    schema_pattern:
    deny:
    - information_schema
    profiling:
    enabled: False
    domain:
    "urn:li:domain:6289fccc-4af2-4cbb-96ed-051e7d1de93c":
    allow:
    - "long_tail_companions.analytics.*"
    "urn:li:domain:07155b15-cee6-4fda-b1c1-5a19a6b74c3a":
    allow:
    - "long_tail_companions.ecommerce.*"

    Searching by Domain

    Once you've created a Domain, you can use the search bar to find it.

    Clicking on the search result will take you to the Domain's profile, where you can edit its description, add / remove owners, and view the assets inside the Domain.

    Once you've added assets to a Domain, you can filter search results to limit to those Assets within a particular Domain using the left-side search filters.

    On the homepage, you'll also find a list of the most popular Domains in your organization.

    Additional Resources

    Videos

    Supercharge Data Mesh with Domains in DataHub

    GraphQL

    Examples

    Creating a Domain

    mutation createDomain {
    createDomain(input: { name: "My New Domain", description: "An optional description" })
    }

    This query will return an urn which you can use to fetch the Domain details.

    Fetching a Domain by Urn

    query getDomain {
    domain(urn: "urn:li:domain:engineering") {
    urn
    properties {
    name
    description
    }
    entities {
    total
    }
    }
    }

    Adding a Dataset to a Domain

    mutation setDomain {
    setDomain(entityUrn: "urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)", domainUrn: "urn:li:domain:engineering")
    }

    Pro Tip! You can try out the sample queries by visiting <your-datahub-url>/api/graphiql.

    DataHub Blog

    FAQ and Troubleshooting

    What is the difference between DataHub Domains, Tags, and Glossary Terms?

    DataHub supports Tags, Glossary Terms, & Domains as distinct types of Metadata that are suited for specific purposes:

    • Tags: Informal, loosely controlled labels that serve as a tool for search & discovery. Assets may have multiple tags. No formal, central management.
    • Glossary Terms: A controlled vocabulary, with optional hierarchy. Terms are typically used to standardize types of leaf-level attributes (i.e. schema fields) for governance. E.g. (EMAIL_PLAINTEXT)
    • Domains: A set of top-level categories. Usually aligned to business units / disciplines to which the assets are most relevant. Central or distributed management. Single Domain assignment per data asset.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/features/dataset-usage-and-query-history/index.html b/docs/features/dataset-usage-and-query-history/index.html index 8d18493fbb0b1..7b996290095a0 100644 --- a/docs/features/dataset-usage-and-query-history/index.html +++ b/docs/features/dataset-usage-and-query-history/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ In some sources, column level usage is also calculated, which can help identify frequently used columns.

    With sources that support usage statistics, you can collect Dataset, Dashboard, and Chart usages.

    Dataset Usage & Query History Setup, Prerequisites, and Permissions

    To ingest Dataset Usage & Query History data, you should check first on the specific source doc if it is supported by the Datahub source and how to enable it.

    You can validate this on the Datahub source's capabilities section:

    Some sources require a separate, usage-specific recipe to ingest Usage and Query History metadata. In this case, it is noted in the capabilities summary, like so:

    Please, always check the usage prerequisities page if the source has as it can happen you have to add additional permissions which only needs for usage.

    Using Dataset Usage & Query History

    After successful ingestion, the Queries and Stats tab will be enabled on datasets with any usage.

    On the Queries tab, you can see the top 5 most often run queries which referenced this dataset.

    On the Stats tab, you can see the top 5 users who run the most queries which referenced this dataset

    With the collected usage data, you can even see column-level usage statistics (Redshift Usage doesn't supported this yet):

    Additional Resources

    Videos

    DataHub 101: Data Profiling and Usage Stats 101

    GraphQL

    FAQ and Troubleshooting

    Why is my Queries/Stats tab greyed out?

    Queries/Stats tab is greyed out if there is no usage statistics for that dataset or there were no ingestion with usage extraction run before.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/features/index.html b/docs/features/index.html index 10588c77509e0..f09b2c775efbf 100644 --- a/docs/features/index.html +++ b/docs/features/index.html @@ -8,13 +8,13 @@ - +

    DataHub Features Overview

    DataHub is a modern data catalog built to enable end-to-end data discovery, data observability, and data governance. This extensible metadata platform is built for developers to tame the complexity of their rapidly evolving data ecosystems and for data practitioners to leverage the total value of data within their organization.

    Here’s an overview of DataHub’s current functionality. Check out our roadmap to see what's to come.


    Search and Discovery

    Search All Corners of Your Data Stack

    DataHub's unified search experience surfaces results across databases, data lakes, BI platforms, ML feature stores, orchestration tools, and more.

    Trace End-to-End Lineage

    Quickly understand the end-to-end journey of data by tracing lineage across platforms, datasets, ETL/ELT pipelines, charts, dashboards, and beyond.

    Understand the Impact of Breaking Changes on Downstream Dependencies

    Proactively identify which entities may be impacted by a breaking change using Impact Analysis.

    View Metadata 360 at a Glance

    Combine technical and logical metadata to provide a 360º view of your data entities.

    Generate Dataset Stats to understand the shape & distribution of the data

    Capture historical Data Validation Outcomes from tools like Great Expectations

    Leverage DataHub's Schema Version History to track changes to the physical structure of data over time


    Modern Data Governance

    Govern in Real Time

    The Actions Framework powers the following real-time use cases:

    • Notifications: Generate organization-specific notifications when a change is made on DataHub. For example, send an email to the governance team when a "PII" tag is added to any data asset.
    • Workflow Integration: Integrate DataHub into your organization's internal workflows. For example, create a Jira ticket when specific Tags or Terms are proposed on a Dataset.
    • Synchronization: Sync changes made in DataHub into a 3rd party system. For example, reflect Tag additions in DataHub into Snowflake.
    • Auditing: Audit who is making what changes on DataHub through time.

    Manage Entity Ownership

    Quickly and easily assign entity ownership to users and user groups.

    Govern with Tags, Glossary Terms, and Domains

    Empower data owners to govern their data entities with:

    1. Tags: Informal, loosely controlled labels that serve as a tool for search & discovery. No formal, central management.
    2. Glossary Terms: A controlled vocabulary with optional hierarchy, commonly used to describe core business concepts and measurements.
    3. Domains: Curated, top-level folders or categories, widely used in Data Mesh to organize entities by department (i.e., Finance, Marketing) or Data Products.


    DataHub Administration

    Create Users, Groups, & Access Policies

    DataHub admins can create Policies to define who can perform what action against which resource(s). When you create a new Policy, you will be able to define the following:

    • Policy Type - Platform (top-level DataHub Platform privileges, i.e., managing users, groups, and policies) or Metadata (ability to manipulate ownership, tags, documentation, and more)
    • Resource Type - Specify the type of resources, such as Datasets, Dashboards, Pipelines, and beyond
    • Privileges - Choose the set of permissions, such as Edit Owners, Edit Documentation, Edit Links
    • Users and/or Groups - Assign relevant Users and Groups; you can also assign the Policy to Resource Owners, regardless of which Group they belong

    Ingest Metadata from the UI

    Create, configure, schedule, & execute batch metadata ingestion using the DataHub user interface. This makes getting metadata into DataHub easier by minimizing the overhead required to operate custom integration pipelines.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/athena/index.html b/docs/generated/ingestion/sources/athena/index.html index f60f2cc7926fd..f0c035b215c4e 100644 --- a/docs/generated/ingestion/sources/athena/index.html +++ b/docs/generated/ingestion/sources/athena/index.html @@ -8,13 +8,13 @@ - +

    Athena

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration. Profiling uses sql queries on whole table which can be expensive operation.
    DescriptionsEnabled by default
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default
    Table-Level LineageSupported for S3 tables

    This plugin supports extracting the following metadata from Athena

    • Tables, schemas etc.
    • Lineage for S3 tables.
    • Profiling when enabled.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[athena]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: athena
    config:
    # Coordinates
    aws_region: my_aws_region
    work_group: primary

    # Options
    query_result_location: "s3://my_staging_athena_results_bucket/results/"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    aws_region 
    string
    Aws region where your Athena database is located
    query_result_location 
    string
    S3 path to the query result bucket which should be used by AWS Athena to store results of thequeries executed by DataHub.
    work_group 
    string
    The name of your Amazon Athena Workgroups
    aws_role_arn
    string
    AWS Role arn for Pyathena to assume in its connection
    aws_role_assumption_duration
    integer
    Duration to assume the AWS Role for. Maximum of 43200 (12 hours)
    Default: 3600
    catalog_name
    string
    Athena Catalog Name
    Default: awsdatacatalog
    database
    string
    The athena database to ingest from. If not set it will be autodetected
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: False
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    Same detection scheme as username
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    s3_staging_dir
    string
    [deprecated in favor of query_result_location] S3 query location
    scheme
    string
    Default: awsathena+rest
    username
    string
    Username credential. If not specified, detected with boto3 rules. See https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.athena.AthenaSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Athena, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/azure-ad/index.html b/docs/generated/ingestion/sources/azure-ad/index.html index c797af989d311..e0d21f4f9fa21 100644 --- a/docs/generated/ingestion/sources/azure-ad/index.html +++ b/docs/generated/ingestion/sources/azure-ad/index.html @@ -8,7 +8,7 @@ - + @@ -40,7 +40,7 @@ associated with DataHub users (CorpUsers).

    Prerequisite

    Create a DataHub Application within the Azure AD Portal with the permissions to read your organization's Users and Groups. The following permissions are required, with the Application permission type:

    • Group.Read.All
    • GroupMember.Read.All
    • User.Read.All

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[azure-ad]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "azure-ad"
    config:
    client_id: "00000000-0000-0000-0000-000000000000"
    tenant_id: "00000000-0000-0000-0000-000000000000"
    client_secret: "xxxxx"
    redirect: "https://login.microsoftonline.com/common/oauth2/nativeclient"
    authority: "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000"
    token_url: "https://login.microsoftonline.com/00000000-0000-0000-0000-000000000000/oauth2/token"
    graph_url: "https://graph.microsoft.com/v1.0"
    ingest_users: True
    ingest_groups: True
    groups_pattern:
    allow:
    - ".*"
    users_pattern:
    allow:
    - ".*"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    authority 
    string
    The authority (https://docs.microsoft.com/en-us/azure/active-directory/develop/msal-client-application-configuration) is a URL that indicates a directory that MSAL can request tokens from.
    client_id 
    string
    Application ID. Found in your app registration on Azure AD Portal
    client_secret 
    string
    Client secret. Found in your app registration on Azure AD Portal
    tenant_id 
    string
    Directory ID. Found in your app registration on Azure AD Portal
    token_url 
    string
    The token URL that acquires a token from Azure AD for authorizing requests. This source will only work with v1.0 endpoint.
    azure_ad_response_to_groupname_attr
    string
    Which Azure AD Group Response attribute to use as input to DataHub group name mapping.
    Default: displayName
    azure_ad_response_to_groupname_regex
    string
    A regex used to parse the DataHub group name from the attribute specified in azure_ad_response_to_groupname_attr.
    Default: (.*)
    azure_ad_response_to_username_attr
    string
    Which Azure AD User Response attribute to use as input to DataHub username mapping.
    Default: userPrincipalName
    azure_ad_response_to_username_regex
    string
    A regex used to parse the DataHub username from the attribute specified in azure_ad_response_to_username_attr.
    Default: (.*)
    filtered_tracking
    boolean
    If enabled, report will contain names of filtered users and groups.
    Default: True
    graph_url
    string
    Microsoft Graph API endpoint
    ingest_group_membership
    boolean
    Whether group membership should be ingested into DataHub. ingest_groups must be True if this is True.
    Default: True
    ingest_groups
    boolean
    Whether groups should be ingested into DataHub.
    Default: True
    ingest_groups_users
    boolean
    This option is useful only when ingest_users is set to False and ingest_group_membership to True. As effect, only the users which belongs to the selected groups will be ingested.
    Default: True
    ingest_users
    boolean
    Whether users should be ingested into DataHub.
    Default: True
    mask_group_id
    boolean
    Whether workunit ID's for groups should be masked to avoid leaking sensitive information.
    Default: True
    mask_user_id
    boolean
    Whether workunit ID's for users should be masked to avoid leaking sensitive information.
    Default: True
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    redirect
    string
    Redirect URI. Found in your app registration on Azure AD Portal.
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    groups_pattern
    AllowDenyPattern
    regex patterns for groups to include in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    groups_pattern.allow
    array(string)
    groups_pattern.deny
    array(string)
    groups_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    users_pattern
    AllowDenyPattern
    regex patterns for users to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    users_pattern.allow
    array(string)
    users_pattern.deny
    array(string)
    users_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Azure AD Stateful Ingestion Config.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    As a prerequisite, you should create a DataHub Application within the Azure AD Portal with the permissions to read your organization's Users and Groups. The following permissions are required, with the Application permission type:

    • Group.Read.All
    • GroupMember.Read.All
    • User.Read.All

    You can add a permission by navigating to the permissions tab in your DataHub application on the Azure AD portal. Azure AD API Permissions

    You can view the necessary endpoints to configure by clicking on the Endpoints button in the Overview tab. Azure AD Endpoints

    Code Coordinates

    • Class Name: datahub.ingestion.source.identity.azure_ad.AzureADSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Azure AD, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/bigquery/index.html b/docs/generated/ingestion/sources/bigquery/index.html index cbf7e6f0177f2..6e49af64fc9b5 100644 --- a/docs/generated/ingestion/sources/bigquery/index.html +++ b/docs/generated/ingestion/sources/bigquery/index.html @@ -8,7 +8,7 @@ - + @@ -17,8 +17,8 @@ Certified

    Important Capabilities

    CapabilityStatusNotes
    Asset ContainersEnabled by default
    Data ProfilingOptionally enabled via configuration
    Dataset UsageEnabled by default, can be disabled via configuration include_usage_statistics
    DescriptionsEnabled by default
    Detect Deleted EntitiesOptionally enabled via stateful_ingestion.remove_stale_metadata
    DomainsSupported via the domain config field
    Platform InstancePlatform instance is pre-set to the BigQuery project id
    Schema MetadataEnabled by default
    Table-Level LineageOptionally enabled via configuration

    Prerequisites

    To understand how BigQuery ingestion needs to be set up, first familiarize yourself with the concepts in the diagram below:

    There are two important concepts to understand and identify:

    • Extractor Project: This is the project associated with a service-account, whose credentials you will be configuring in the connector. The connector uses this service-account to run jobs (including queries) within the project.
    • Bigquery Projects are the projects from which table metadata, lineage, usage, and profiling data need to be collected. By default, the extractor project is included in the list of projects that DataHub collects metadata from, but you can control that by passing in a specific list of project ids that you want to collect metadata from. Read the configuration section below to understand how to limit the list of projects that DataHub extracts metadata from.

    Create a datahub profile in GCP

    1. Create a custom role for datahub as per BigQuery docs.
    2. Follow the sections below to grant permissions to this role on this project and other projects.
    Basic Requirements (needed for metadata ingestion)
    1. Identify your Extractor Project where the service account will run queries to extract metadata.
    permission                      Description                                                                                                                        Capability                                                              
    bigquery.jobs.create          Run jobs (e.g. queries) within the project. This only needs for the extractor project where the service account belongs                                                                                                                       
    bigquery.jobs.list            Manage the queries that the service account has sent. This only needs for the extractor project where the service account belongs                                                                                                             
    bigquery.readsessions.create  Create a session for streaming large results. This only needs for the extractor project where the service account belongs                                                                                                                     
    bigquery.readsessions.getDataGet data from the read session. This only needs for the extractor project where the service account belongs                      
    1. Grant the following permissions to the Service Account on every project where you would like to extract metadata from
    info

    If you have multiple projects in your BigQuery setup, the role should be granted these permissions in each of the projects.

    permission                      Description                                                                                                Capability              Default GCP role which contains this permission                                                                
    bigquery.datasets.get        Retrieve metadata about a dataset.                                                                          Table Metadata Extraction          roles/bigquery.metadataViewer
    bigquery.datasets.getIamPolicyRead a dataset's IAM permissions.                                                                          Table Metadata Extraction          roles/bigquery.metadataViewer
    bigquery.tables.list          List BigQuery tables.                                                                                      Table Metadata Extraction          roles/bigquery.metadataViewer
    bigquery.tables.get          Retrieve metadata for a table.                                                                              Table Metadata Extraction          roles/bigquery.metadataViewer
    bigquery.routines.get          Get Routines. Needs to retrieve metadata for a table from system table.                                                                                      Table Metadata Extraction          roles/bigquery.metadataViewer
    bigquery.routines.list          List Routines. Needs to retrieve metadata for a table from system table                                                                              Table Metadata Extraction          roles/bigquery.metadataViewer
    resourcemanager.projects.get  Retrieve project names and metadata.                                                                        Table Metadata Extraction          roles/bigquery.metadataViewer
    bigquery.jobs.listAll        List all jobs (queries) submitted by any user. Needs for Lineage extraction.                                Lineage Extraction/Usage extractionroles/bigquery.resourceViewer
    logging.logEntries.list      Fetch log entries for lineage/usage data. Not required if use_exported_bigquery_audit_metadata is enabled.Lineage Extraction/Usage extractionroles/logging.privateLogViewer
    logging.privateLogEntries.listFetch log entries for lineage/usage data. Not required if use_exported_bigquery_audit_metadata is enabled.Lineage Extraction/Usage extractionroles/logging.privateLogViewer
    bigquery.tables.getData      Access table data to extract storage size, last updated at, data profiles etc.Profiling                                                                                                                                         

    Create a service account in the Extractor Project

    1. Setup a ServiceAccount as per BigQuery docs and assign the previously created role to this service account.
    2. Download a service account JSON keyfile. Example credential file:
    {
    "type": "service_account",
    "project_id": "project-id-1234567",
    "private_key_id": "d0121d0000882411234e11166c6aaa23ed5d74e0",
    "private_key": "-----BEGIN PRIVATE KEY-----\nMIIyourkey\n-----END PRIVATE KEY-----",
    "client_email": "test@suppproject-id-1234567.iam.gserviceaccount.com",
    "client_id": "113545814931671546333",
    "auth_uri": "https://accounts.google.com/o/oauth2/auth",
    "token_uri": "https://oauth2.googleapis.com/token",
    "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
    "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%suppproject-id-1234567.iam.gserviceaccount.com"
    }
    1. To provide credentials to the source, you can either:

      Set an environment variable:

      $ export GOOGLE_APPLICATION_CREDENTIALS="/path/to/keyfile.json"

      or

      Set credential config in your source based on the credential json file. For example:

      credential:
      project_id: project-id-1234567
      private_key_id: "d0121d0000882411234e11166c6aaa23ed5d74e0"
      private_key: "-----BEGIN PRIVATE KEY-----\nMIIyourkey\n-----END PRIVATE KEY-----\n"
      client_email: "test@suppproject-id-1234567.iam.gserviceaccount.com"
      client_id: "123456678890"
    Profiling Requirements

    To profile BigQuery external tables backed by Google Drive document, you need to grant document's "Viewer" access to service account's email address (client_email in credentials json file). To find the Google Drive document linked to BigQuery table, open the BigQuery console, locate the needed table, select "Details" from the drop-down menu in the top-right corner and refer "Source" field . To share access of Google Drive document, open the document, click "Share" in the top-right corner, add the service account's email address that needs "Viewer" access. Google Drive Sharing Dialog

    Lineage Computation Details

    When use_exported_bigquery_audit_metadata is set to true, lineage information will be computed using exported bigquery logs. On how to setup exported bigquery audit logs, refer to the following docs on BigQuery audit logs. Note that only protoPayloads with "type.googleapis.com/google.cloud.audit.BigQueryAuditMetadata" are supported by the current ingestion version. The bigquery_audit_metadata_datasets parameter will be used only if use_exported_bigquery_audit_metadat is set to true.

    Note: the bigquery_audit_metadata_datasets parameter receives a list of datasets, in the format $PROJECT.$DATASET. This way queries from a multiple number of projects can be used to compute lineage information.

    Note: Since bigquery source also supports dataset level lineage, the auth client will require additional permissions to be able to access the google audit logs. Refer the permissions section in bigquery-usage section below which also accesses the audit logs.

    Profiling Details

    For performance reasons, we only profile the latest partition for partitioned tables and the latest shard for sharded tables. -You can set partition explicitly with partition.partition_datetime property if you want, though note that partition config will be applied to all partitioned tables.

    Caveats

    • For materialized views, lineage is dependent on logs being retained. If your GCP logging is retained for 30 days (default) and 30 days have passed since the creation of the materialized view we won't be able to get lineage for them.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[bigquery]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: bigquery
    config:
    # `schema_pattern` for BQ Datasets
    schema_pattern:
    allow:
    - finance_bq_dataset
    table_pattern:
    deny:
    # The exact name of the table is revenue_table_name
    # The reason we have this `.*` at the beginning is because the current implmenetation of table_pattern is testing
    # project_id.dataset_name.table_name
    # We will improve this in the future
    - .*revenue_table_name
    include_table_lineage: true
    include_usage_statistics: true
    profiling:
    enabled: true
    profile_table_level_only: true

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    bigquery_audit_metadata_datasets
    array(string)
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    capture_dataset_label_as_tag
    boolean
    Capture BigQuery dataset labels as DataHub tag
    Default: False
    capture_table_label_as_tag
    boolean
    Capture BigQuery table labels as DataHub tag
    Default: False
    column_limit
    integer
    Maximum number of columns to process in a table. This is a low level config property which should be touched with care. This restriction is needed because excessively wide tables can result in failure to ingest the schema.
    Default: 300
    convert_urns_to_lowercase
    boolean
    Convert urns to lowercase.
    Default: False
    debug_include_full_payloads
    boolean
    Include full payload into events. It is only for debugging and internal use.
    Default: False
    enable_legacy_sharded_table_support
    boolean
    Use the legacy sharded table urn suffix added.
    Default: True
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    extra_client_options
    object
    Additional options to pass to google.cloud.logging_v2.client.Client.
    Default: {}
    extract_column_lineage
    boolean
    If enabled, generate column level lineage. Requires lineage_use_sql_parser to be enabled. This and incremental_lineage cannot both be enabled.
    Default: False
    extract_lineage_from_catalog
    boolean
    This flag enables the data lineage extraction from Data Lineage API exposed by Google Data Catalog. NOTE: This extractor can't build views lineage. It's recommended to enable the view's DDL parsing. Read the docs to have more information about: https://cloud.google.com/data-catalog/docs/concepts/about-data-lineage
    Default: False
    include_data_platform_instance
    boolean
    Whether to create a DataPlatformInstance aspect, equal to the BigQuery project id. If enabled, will cause redundancy in the browse path for BigQuery entities in the UI, because the project id is represented as the top-level container.
    Default: False
    include_external_url
    boolean
    Whether to populate BigQuery Console url to Datasets/Tables
    Default: True
    include_table_lineage
    boolean
    Option to enable/disable lineage generation. Is enabled by default.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_usage_statistics
    boolean
    Generate usage statistic
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    incremental_lineage
    boolean
    When enabled, emits lineage as incremental to existing lineage already in DataHub. When disabled, re-states lineage on each run.
    Default: True
    lineage_parse_view_ddl
    boolean
    Sql parse view ddl to get lineage.
    Default: True
    lineage_sql_parser_use_raw_names
    boolean
    This parameter ignores the lowercase pattern stipulated in the SQLParser. NOTE: Ignored if lineage_use_sql_parser is False.
    Default: False
    lineage_use_sql_parser
    boolean
    Use sql parser to resolve view/table lineage.
    Default: True
    log_page_size
    integer
    The number of log item will be queried per page for lineage collection
    Default: 1000
    match_fully_qualified_names
    boolean
    Whether dataset_pattern is matched against fully qualified dataset name <project_id>.<dataset_name>.
    Default: False
    max_query_duration
    number(time-delta)
    Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time.
    Default: 900.0
    number_of_datasets_process_in_batch_if_profiling_enabled
    integer
    Number of partitioned table queried in batch when getting metadata. This is a low level config property which should be touched with care. This restriction is needed because we query partitions system view which throws error if we try to touch too many tables.
    Default: 200
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    project_id
    string
    [deprecated] Use project_id_pattern or project_ids instead.
    project_ids
    array(string)
    project_on_behalf
    string
    [Advanced] The BigQuery project in which queries are executed. Will be passed when creating a job. If not passed, falls back to the project associated with the service account.
    rate_limit
    boolean
    Should we rate limit requests made to API.
    Default: False
    requests_per_min
    integer
    Used to control number of API calls made per min. Only used when rate_limit is set to True.
    Default: 60
    scheme
    string
    Default: bigquery
    sharded_table_pattern
    string
    The regex pattern to match sharded tables and group as one table. This is a very low level config parameter, only change if you know what you are doing,
    Default: ((.+)[_$])?(\d{8})$
    sql_parser_use_external_process
    boolean
    When enabled, sql parser will run in isolated in a separate process. This can affect processing time but can protect from sql parser's mem leak.
    Default: False
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    store_last_lineage_extraction_timestamp
    boolean
    Enable checking last lineage extraction date in store.
    Default: False
    store_last_profiling_timestamps
    boolean
    Enable storing last profile timestamp in store.
    Default: False
    store_last_usage_extraction_timestamp
    boolean
    Enable checking last usage timestamp in store.
    Default: True
    temp_table_dataset_prefix
    string
    If you are creating temp tables in a dataset with a particular prefix you can use this config to set the prefix for the dataset. This is to support workflows from before bigquery's introduction of temp tables. By default we use _ because of datasets that begin with an underscore are hidden by default https://cloud.google.com/bigquery/docs/datasets#dataset-naming.
    Default: _
    upstream_lineage_in_report
    boolean
    Useful for debugging lineage information. Set to True to see the raw lineage created internally.
    Default: False
    use_date_sharded_audit_log_tables
    boolean
    Whether to read date sharded tables or time partitioned tables when extracting usage from exported audit logs.
    Default: False
    use_exported_bigquery_audit_metadata
    boolean
    When configured, use BigQueryAuditMetadata in bigquery_audit_metadata_datasets to compute lineage information.
    Default: False
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    credential
    BigQueryCredential
    BigQuery credential informations
    credential.client_email 
    string
    Client email
    credential.client_id 
    string
    Client Id
    credential.private_key 
    string
    Private key in a form of '-----BEGIN PRIVATE KEY-----\nprivate-key\n-----END PRIVATE KEY-----\n'
    credential.private_key_id 
    string
    Private key id
    credential.project_id 
    string
    Project id to set the credentials
    credential.auth_provider_x509_cert_url
    string
    Auth provider x509 certificate url
    credential.auth_uri
    string
    Authentication uri
    credential.client_x509_cert_url
    string
    If not set it will be default to https://www.googleapis.com/robot/v1/metadata/x509/client_email
    credential.token_uri
    string
    Token uri
    credential.type
    string
    Authentication type
    Default: service_account
    dataset_pattern
    AllowDenyPattern
    Regex patterns for dataset to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    dataset_pattern.allow
    array(string)
    dataset_pattern.deny
    array(string)
    dataset_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    project_id_pattern
    AllowDenyPattern
    Regex patterns for project_id to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    project_id_pattern.allow
    array(string)
    project_id_pattern.deny
    array(string)
    project_id_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    usage
    BigQueryUsageConfig
    Usage related configs
    Default: {'bucket_duration': 'DAY', 'end_time': '2023-08-24...
    usage.apply_view_usage_to_tables
    boolean
    Whether to apply view's usage to its base tables. If set to False, uses sql parser and applies usage to views / tables mentioned in the query. If set to True, usage is applied to base tables only.
    Default: False
    usage.bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    usage.end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    usage.format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    usage.include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    usage.include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    usage.include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    usage.max_query_duration
    number(time-delta)
    Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time.
    Default: 900.0
    usage.start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    usage.top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    usage.user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    usage.user_email_pattern.allow
    array(string)
    usage.user_email_pattern.deny
    array(string)
    usage.user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.bigquery_v2.bigquery.BigqueryV2Source
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for BigQuery, feel free to ping us on our Slack.

    - +You can set partition explicitly with partition.partition_datetime property if you want, though note that partition config will be applied to all partitioned tables.

    Caveats

    • For materialized views, lineage is dependent on logs being retained. If your GCP logging is retained for 30 days (default) and 30 days have passed since the creation of the materialized view we won't be able to get lineage for them.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[bigquery]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: bigquery
    config:
    # `schema_pattern` for BQ Datasets
    schema_pattern:
    allow:
    - finance_bq_dataset
    table_pattern:
    deny:
    # The exact name of the table is revenue_table_name
    # The reason we have this `.*` at the beginning is because the current implmenetation of table_pattern is testing
    # project_id.dataset_name.table_name
    # We will improve this in the future
    - .*revenue_table_name
    include_table_lineage: true
    include_usage_statistics: true
    profiling:
    enabled: true
    profile_table_level_only: true

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    bigquery_audit_metadata_datasets
    array(string)
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    capture_dataset_label_as_tag
    boolean
    Capture BigQuery dataset labels as DataHub tag
    Default: False
    capture_table_label_as_tag
    boolean
    Capture BigQuery table labels as DataHub tag
    Default: False
    column_limit
    integer
    Maximum number of columns to process in a table. This is a low level config property which should be touched with care. This restriction is needed because excessively wide tables can result in failure to ingest the schema.
    Default: 300
    convert_urns_to_lowercase
    boolean
    Convert urns to lowercase.
    Default: False
    debug_include_full_payloads
    boolean
    Include full payload into events. It is only for debugging and internal use.
    Default: False
    enable_legacy_sharded_table_support
    boolean
    Use the legacy sharded table urn suffix added.
    Default: True
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    extra_client_options
    object
    Additional options to pass to google.cloud.logging_v2.client.Client.
    Default: {}
    extract_column_lineage
    boolean
    If enabled, generate column level lineage. Requires lineage_use_sql_parser to be enabled. This and incremental_lineage cannot both be enabled.
    Default: False
    extract_lineage_from_catalog
    boolean
    This flag enables the data lineage extraction from Data Lineage API exposed by Google Data Catalog. NOTE: This extractor can't build views lineage. It's recommended to enable the view's DDL parsing. Read the docs to have more information about: https://cloud.google.com/data-catalog/docs/concepts/about-data-lineage
    Default: False
    include_data_platform_instance
    boolean
    Whether to create a DataPlatformInstance aspect, equal to the BigQuery project id. If enabled, will cause redundancy in the browse path for BigQuery entities in the UI, because the project id is represented as the top-level container.
    Default: False
    include_external_url
    boolean
    Whether to populate BigQuery Console url to Datasets/Tables
    Default: True
    include_table_lineage
    boolean
    Option to enable/disable lineage generation. Is enabled by default.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_usage_statistics
    boolean
    Generate usage statistic
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    incremental_lineage
    boolean
    When enabled, emits lineage as incremental to existing lineage already in DataHub. When disabled, re-states lineage on each run.
    Default: True
    lineage_parse_view_ddl
    boolean
    Sql parse view ddl to get lineage.
    Default: True
    lineage_sql_parser_use_raw_names
    boolean
    This parameter ignores the lowercase pattern stipulated in the SQLParser. NOTE: Ignored if lineage_use_sql_parser is False.
    Default: False
    lineage_use_sql_parser
    boolean
    Use sql parser to resolve view/table lineage.
    Default: True
    log_page_size
    integer
    The number of log item will be queried per page for lineage collection
    Default: 1000
    match_fully_qualified_names
    boolean
    Whether dataset_pattern is matched against fully qualified dataset name <project_id>.<dataset_name>.
    Default: False
    max_query_duration
    number(time-delta)
    Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time.
    Default: 900.0
    number_of_datasets_process_in_batch_if_profiling_enabled
    integer
    Number of partitioned table queried in batch when getting metadata. This is a low level config property which should be touched with care. This restriction is needed because we query partitions system view which throws error if we try to touch too many tables.
    Default: 200
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    project_id
    string
    [deprecated] Use project_id_pattern or project_ids instead.
    project_ids
    array(string)
    project_on_behalf
    string
    [Advanced] The BigQuery project in which queries are executed. Will be passed when creating a job. If not passed, falls back to the project associated with the service account.
    rate_limit
    boolean
    Should we rate limit requests made to API.
    Default: False
    requests_per_min
    integer
    Used to control number of API calls made per min. Only used when rate_limit is set to True.
    Default: 60
    scheme
    string
    Default: bigquery
    sharded_table_pattern
    string
    The regex pattern to match sharded tables and group as one table. This is a very low level config parameter, only change if you know what you are doing,
    Default: ((.+)[_$])?(\d{8})$
    sql_parser_use_external_process
    boolean
    When enabled, sql parser will run in isolated in a separate process. This can affect processing time but can protect from sql parser's mem leak.
    Default: False
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    store_last_lineage_extraction_timestamp
    boolean
    Enable checking last lineage extraction date in store.
    Default: False
    store_last_profiling_timestamps
    boolean
    Enable storing last profile timestamp in store.
    Default: False
    store_last_usage_extraction_timestamp
    boolean
    Enable checking last usage timestamp in store.
    Default: True
    temp_table_dataset_prefix
    string
    If you are creating temp tables in a dataset with a particular prefix you can use this config to set the prefix for the dataset. This is to support workflows from before bigquery's introduction of temp tables. By default we use _ because of datasets that begin with an underscore are hidden by default https://cloud.google.com/bigquery/docs/datasets#dataset-naming.
    Default: _
    upstream_lineage_in_report
    boolean
    Useful for debugging lineage information. Set to True to see the raw lineage created internally.
    Default: False
    use_date_sharded_audit_log_tables
    boolean
    Whether to read date sharded tables or time partitioned tables when extracting usage from exported audit logs.
    Default: False
    use_exported_bigquery_audit_metadata
    boolean
    When configured, use BigQueryAuditMetadata in bigquery_audit_metadata_datasets to compute lineage information.
    Default: False
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    credential
    BigQueryCredential
    BigQuery credential informations
    credential.client_email 
    string
    Client email
    credential.client_id 
    string
    Client Id
    credential.private_key 
    string
    Private key in a form of '-----BEGIN PRIVATE KEY-----\nprivate-key\n-----END PRIVATE KEY-----\n'
    credential.private_key_id 
    string
    Private key id
    credential.project_id 
    string
    Project id to set the credentials
    credential.auth_provider_x509_cert_url
    string
    Auth provider x509 certificate url
    credential.auth_uri
    string
    Authentication uri
    credential.client_x509_cert_url
    string
    If not set it will be default to https://www.googleapis.com/robot/v1/metadata/x509/client_email
    credential.token_uri
    string
    Token uri
    credential.type
    string
    Authentication type
    Default: service_account
    dataset_pattern
    AllowDenyPattern
    Regex patterns for dataset to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    dataset_pattern.allow
    array(string)
    dataset_pattern.deny
    array(string)
    dataset_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    project_id_pattern
    AllowDenyPattern
    Regex patterns for project_id to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    project_id_pattern.allow
    array(string)
    project_id_pattern.deny
    array(string)
    project_id_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    usage
    BigQueryUsageConfig
    Usage related configs
    Default: {'bucket_duration': 'DAY', 'end_time': '2023-08-24...
    usage.apply_view_usage_to_tables
    boolean
    Whether to apply view's usage to its base tables. If set to False, uses sql parser and applies usage to views / tables mentioned in the query. If set to True, usage is applied to base tables only.
    Default: False
    usage.bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    usage.end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    usage.format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    usage.include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    usage.include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    usage.include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    usage.max_query_duration
    number(time-delta)
    Correction to pad start_time and end_time with. For handling the case where the read happens within our time range but the query completion event is delayed and happens after the configured end time.
    Default: 900.0
    usage.start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    usage.top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    usage.user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    usage.user_email_pattern.allow
    array(string)
    usage.user_email_pattern.deny
    array(string)
    usage.user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.bigquery_v2.bigquery.BigqueryV2Source
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for BigQuery, feel free to ping us on our Slack.

    + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/business-glossary/index.html b/docs/generated/ingestion/sources/business-glossary/index.html index e187fac2be173..f0e273c05bc48 100644 --- a/docs/generated/ingestion/sources/business-glossary/index.html +++ b/docs/generated/ingestion/sources/business-glossary/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ identifier, you can generate a custom ID for your term. It should be unique across the entire Glossary.

    Here's an example ID: id: "urn:li:glossaryTerm:41516e310acbfd9076fffc2c98d2d1a3"

    A note of caution: once you select a custom ID, it cannot be easily changed.

    Compatibility

    Compatible with version 1 of business glossary format. The source will be evolved as we publish newer versions of this format.

    Code Coordinates

    • Class Name: datahub.ingestion.source.metadata.business_glossary.BusinessGlossaryFileSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Business Glossary, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/clickhouse/index.html b/docs/generated/ingestion/sources/clickhouse/index.html index 680f8053d949c..d87d2b2cf1d48 100644 --- a/docs/generated/ingestion/sources/clickhouse/index.html +++ b/docs/generated/ingestion/sources/clickhouse/index.html @@ -8,13 +8,13 @@ - +

    ClickHouse

    There are 2 sources that provide integration with ClickHouse

    Source ModuleDocumentation

    clickhouse

    This plugin extracts the following:

    • Metadata for tables, views, materialized views and dictionaries
    • Column types associated with each table(except *AggregateFunction and DateTime with timezone)
    • Table, row, and column statistics via optional SQL profiling.
    • Table, view, materialized view and dictionary(with CLICKHOUSE source_type) lineage
    tip

    You can also get fine-grained usage statistics for ClickHouse using the clickhouse-usage source described below.

    Read more...

    clickhouse-usage

    This plugin has the below functionalities -

    1. For a specific dataset this plugin ingests the following statistics -
      1. top n queries.
      2. top users.
      3. usage of each column in the dataset.
    2. Aggregation of these statistics into buckets, by day or hour granularity.

    Usage information is computed by querying the system.query_log table. In case you have a cluster or need to apply additional transformation/filters you can create a view and put to the query_log_table setting.

    note

    This source only does usage statistics. To get the tables, views, and schemas in your ClickHouse warehouse, ingest using the clickhouse source described above.

    Read more...

    Module clickhouse

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    Detect Deleted EntitiesEnabled via stateful ingestion

    This plugin extracts the following:

    • Metadata for tables, views, materialized views and dictionaries
    • Column types associated with each table(except *AggregateFunction and DateTime with timezone)
    • Table, row, and column statistics via optional SQL profiling.
    • Table, view, materialized view and dictionary(with CLICKHOUSE source_type) lineage
    tip

    You can also get fine-grained usage statistics for ClickHouse using the clickhouse-usage source described below.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[clickhouse]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: clickhouse
    config:
    # Coordinates
    host_port: localhost:9000

    # Credentials
    username: user
    password: pass

    # Options
    platform_instance: DatabaseNameToBeIngested

    include_views: True # whether to include views, defaults to True
    include_tables: True # whether to include views, defaults to True

    sink:
    # sink configs

    #---------------------------------------------------------------------------
    # For the HTTP interface:
    #---------------------------------------------------------------------------
    source:
    type: clickhouse
    config:
    host_port: localhost:8443
    uri_opts:
    protocol: https

    #---------------------------------------------------------------------------
    # For the Native interface:
    #---------------------------------------------------------------------------

    source:
    type: clickhouse
    config:
    host_port: localhost:9440
    scheme: clickhouse+native
    uri_opts:
    secure: True

    #------------------------------------------------------------------------
    # Example: using ingestion with configured SSL-TLS and uri_opts
    # See https://clickhouse.com/docs/en/guides/sre/configuring-ssl
    # ------------------------------------------------------------------------
    source:
    type: clickhouse
    config:
    # Url form, prefered
    sqlalchemy_uri: 'clickhouse+native://user:pass@localhost:9000/db?&ca_certs=ca.crt'

    # Non url form
    username: user
    password: pass

    host_port: localhost:9000

    uri_opts:
    secure: True
    ca_certs: "ca.crt"
    certfile: "clickhouse.crt"
    keyfile: "clickhouse.key"

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    host_port
    string
    ClickHouse host URL.
    Default: localhost:8123
    include_materialized_views
    boolean
    Default: True
    include_table_lineage
    boolean
    Whether table lineage should be ingested.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    Default:
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    platform_instance_map
    map(str,string)
    protocol
    string
    secure
    boolean
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    uri_opts
    map(str,string)
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    database_pattern
    AllowDenyPattern
    Regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Deprecated in favour of database_pattern.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.clickhouse.ClickHouseSource
    • Browse on GitHub

    Module clickhouse-usage

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    Detect Deleted EntitiesEnabled via stateful ingestion

    This plugin has the below functionalities -

    1. For a specific dataset this plugin ingests the following statistics -
      1. top n queries.
      2. top users.
      3. usage of each column in the dataset.
    2. Aggregation of these statistics into buckets, by day or hour granularity.

    Usage information is computed by querying the system.query_log table. In case you have a cluster or need to apply additional transformation/filters you can create a view and put to the query_log_table setting.

    note

    This source only does usage statistics. To get the tables, views, and schemas in your ClickHouse warehouse, ingest using the clickhouse source described above.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[clickhouse-usage]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: clickhouse-usage
    config:
    # Coordinates
    host_port: db_host:port
    platform_instance: dev_cluster
    email_domain: acryl.io

    # Credentials
    username: username
    password: "password"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    email_domain 
    string
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    host_port
    string
    ClickHouse host URL.
    Default: localhost:8123
    include_materialized_views
    boolean
    Default: True
    include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    include_table_lineage
    boolean
    Whether table lineage should be ingested.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Default: {}
    password
    string(password)
    password
    Default:
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    platform_instance_map
    map(str,string)
    protocol
    string
    query_log_table
    string
    Default: system.query_log
    secure
    boolean
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    uri_opts
    map(str,string)
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    database_pattern
    AllowDenyPattern
    Regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Deprecated in favour of database_pattern.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    user_email_pattern.allow
    array(string)
    user_email_pattern.deny
    array(string)
    user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.usage.clickhouse_usage.ClickHouseUsageSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for ClickHouse, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/csv/index.html b/docs/generated/ingestion/sources/csv/index.html index 59dc7f85f6a7c..f6d191de1e59a 100644 --- a/docs/generated/ingestion/sources/csv/index.html +++ b/docs/generated/ingestion/sources/csv/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ be applied at the entity field. If a subresource is populated (as it is for the second and third rows), glossary terms and tags will be applied on the column. Every row MUST have a resource. Also note that owners can only be applied at the resource level.

    note

    This source will not work on very large csv files that do not fit in memory.

    CLI based Ingestion

    Install the Plugin

    The csv-enricher source works out of the box with acryl-datahub.

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: csv-enricher
    config:
    # relative path to your csv file to ingest
    filename: ./path/to/your/file.csv

    # Default sink is datahub-rest and doesn't need to be configured
    # See https://datahubproject.io/docs/metadata-ingestion/sink_docs/datahub for customization options

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    filename 
    string
    File path or URL of CSV file to ingest.
    array_delimiter
    string
    Delimiter to use when parsing array fields (tags, terms and owners)
    Default: |
    delimiter
    string
    Delimiter to use when parsing CSV
    Default: ,
    write_semantics
    string
    Whether the new tags, terms and owners to be added will override the existing ones added only by this source or not. Value for this config can be "PATCH" or "OVERRIDE"
    Default: PATCH

    Code Coordinates

    • Class Name: datahub.ingestion.source.csv_enricher.CSVEnricherSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for CSV, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/databricks/index.html b/docs/generated/ingestion/sources/databricks/index.html index b771409a6dddc..73c9a3d2b7d05 100644 --- a/docs/generated/ingestion/sources/databricks/index.html +++ b/docs/generated/ingestion/sources/databricks/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    Databricks

    DataHub supports integration with Databricks ecosystem using a multitude of connectors, depending on your exact setup.

    Databricks Hive

    The simplest way to integrate is usually via the Hive connector. The Hive starter recipe has a section describing how to connect to your Databricks workspace.

    Databricks Unity Catalog (new)

    The recently introduced Unity Catalog provides a new way to govern your assets within the Databricks lakehouse. If you have enabled Unity Catalog, you can use the unity-catalog source (see below) to integrate your metadata into DataHub as an alternate to the Hive pathway.

    Databricks Spark

    To complete the picture, we recommend adding push-based ingestion from your Spark jobs to see real-time activity and lineage between your Databricks tables and your Spark jobs. Use the Spark agent to push metadata to DataHub using the instructions here.

    Watch the DataHub Talk at the Data and AI Summit 2022

    For a deeper look at how to think about DataHub within and across your Databricks ecosystem, watch the recording of our talk at the Data and AI Summit 2022.

    IMAGE_ALT

    Incubating

    Important Capabilities

    CapabilityStatusNotes
    Asset ContainersEnabled by default
    Column-level LineageEnabled by default
    Dataset UsageEnabled by default
    DescriptionsEnabled by default
    Detect Deleted EntitiesOptionally enabled via stateful_ingestion.remove_stale_metadata
    DomainsSupported via the domain config field
    Extract OwnershipSupported via the include_ownership config
    Platform InstanceEnabled by default
    Schema MetadataEnabled by default
    Table-Level LineageEnabled by default

    This plugin extracts the following metadata from Databricks Unity Catalog:

    • metastores
    • schemas
    • tables and column lineage

    Prerequisities

    • Get your Databricks instance's workspace url
    • Create a Databricks Service Principal
      • You can skip this step and use your own account to get things running quickly, but we strongly recommend creating a dedicated service principal for production use.
    • Generate a Databricks Personal Access token following the following guides:
    • Provision your service account:
      • To ingest your workspace's metadata and lineage, your service principal must have all of the following:
        • One of: metastore admin role, ownership of, or USE CATALOG privilege on any catalogs you want to ingest
        • One of: metastore admin role, ownership of, or USE SCHEMA privilege on any schemas you want to ingest
        • Ownership of or SELECT privilege on any tables and views you want to ingest
        • Ownership documentation
        • Privileges documentation
      • To include_usage_statistics (enabled by default), your service principal must have CAN_MANAGE permissions on any SQL Warehouses you want to ingest: guide.
      • To ingest profiling information with call_analyze (enabled by default), your service principal must have ownership or MODIFY privilege on any tables you want to profile.
        • Alternatively, you can run ANALYZE TABLE yourself on any tables you want to profile, then set call_analyze to false. You will still need SELECT privilege on those tables to fetch the results.
    • Check the starter recipe below and replace workspace_url and token with your information from the previous steps.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[unity-catalog]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: unity-catalog
    config:
    workspace_url: https://my-workspace.cloud.databricks.com
    token: "mygenerated_databricks_token"
    #metastore_id_pattern:
    # deny:
    # - 11111-2222-33333-44-555555
    #catalog_pattern:
    # allow:
    # - my-catalog
    #schema_pattern:
    # deny:
    # - information_schema
    #table_pattern:
    # allow:
    # - test.lineagedemo.dinner
    # First you have to create domains on Datahub by following this guide -> https://datahubproject.io/docs/domains/#domains-setup-prerequisites-and-permissions
    #domain:
    # urn:li:domain:1111-222-333-444-555:
    # allow:
    # - main.*

    stateful_ingestion:
    enabled: true

    pipeline_name: acme-corp-unity


    # sink configs if needed

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    token 
    string
    Databricks personal access token
    workspace_url 
    string
    Databricks workspace url. e.g. https://my-workspace.cloud.databricks.com
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    include_column_lineage
    boolean
    Option to enable/disable lineage generation. Currently we have to call a rest call per column to get column level lineage due to the Databrick api which can slow down ingestion.
    Default: True
    include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    include_ownership
    boolean
    Option to enable/disable ownership generation for metastores, catalogs, schemas, and tables.
    Default: False
    include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    include_table_lineage
    boolean
    Option to enable/disable lineage generation.
    Default: True
    include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    include_usage_statistics
    boolean
    Generate usage statistics.
    Default: True
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    store_last_profiling_timestamps
    boolean
    Enable storing last profile timestamp in store.
    Default: False
    top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    workspace_name
    string
    Name of the workspace. Default to deployment name present in workspace_url
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    catalog_pattern
    AllowDenyPattern
    Regex patterns for catalogs to filter in ingestion. Specify regex to match the full metastore.catalog name.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    catalog_pattern.allow
    array(string)
    catalog_pattern.deny
    array(string)
    catalog_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to the full metastore.catalog.schema name. e.g. to match all tables in schema analytics, use the regex ^mymetastore\.mycatalog\.analytics$.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in catalog.schema.table format. e.g. to match all tables starting with customer in Customer catalog and public schema, use the regex Customer\.public\.customer.*.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    user_email_pattern.allow
    array(string)
    user_email_pattern.deny
    array(string)
    user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    UnityCatalogProfilerConfig
    Data profiling configuration
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.call_analyze
    boolean
    Whether to call ANALYZE TABLE as part of profile ingestion.If false, will ingest the results of the most recent ANALYZE TABLE call, if any.
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.max_wait_secs
    integer
    Maximum time to wait for an ANALYZE TABLE query to complete.
    Default: 3600
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only or include column-level profiling as well.
    Default: False
    profiling.warehouse_id
    string
    SQL Warehouse id, for running profiling queries.
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    profiling.pattern
    AllowDenyPattern
    Regex patterns to filter tables for profiling during ingestion. Specify regex to match the catalog.schema.table format. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profiling.pattern.allow
    array(string)
    profiling.pattern.deny
    array(string)
    profiling.pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Unity Catalog Stateful Ingestion Config.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Troubleshooting

    No data lineage captured or missing lineage

    Check that you meet the Unity Catalog lineage requirements.

    Also check the Unity Catalog limitations to make sure that lineage would be expected to exist in this case.

    Lineage extraction is too slow

    Currently, there is no way to get table or column lineage in bulk from the Databricks Unity Catalog REST api. Table lineage calls require one API call per table, and column lineage calls require one API call per column. If you find metadata extraction taking too long, you can turn off column level lineage extraction via the include_column_lineage config flag.

    Code Coordinates

    • Class Name: datahub.ingestion.source.unity.source.UnityCatalogSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Databricks, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/datahub/index.html b/docs/generated/ingestion/sources/datahub/index.html index a3d72850e5fe0..c7b09a2b5ac38 100644 --- a/docs/generated/ingestion/sources/datahub/index.html +++ b/docs/generated/ingestion/sources/datahub/index.html @@ -8,13 +8,13 @@ - +

    DataHub

    Testing

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[datahub]'

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    commit_state_interval
    integer
    Number of records to process before committing state
    Default: 1000
    commit_with_parse_errors
    boolean
    Whether to update createdon timestamp and kafka offset despite parse errors. Enable if you want to ignore the errors.
    Default: False
    include_all_versions
    boolean
    If enabled, include all versions of each aspect. Otherwise, only include the latest version of each aspect.
    Default: False
    kafka_topic_name
    string
    Name of kafka topic containing timeseries MCLs
    Default: MetadataChangeLog_Timeseries_v1
    mysql_batch_size
    integer
    Number of records to fetch from MySQL at a time
    Default: 10000
    mysql_table_name
    string
    Name of MySQL table containing all versioned aspects
    Default: metadata_aspect_v2
    kafka_connection
    KafkaConsumerConnectionConfig
    Kafka connection config
    Default: {'bootstrap': 'localhost:9092', 'schema_registry_u...
    kafka_connection.bootstrap
    string
    Default: localhost:9092
    kafka_connection.client_timeout_seconds
    integer
    The request timeout used when interacting with the Kafka APIs.
    Default: 60
    kafka_connection.consumer_config
    object
    Extra consumer config serialized as JSON. These options will be passed into Kafka's DeserializingConsumer. See https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html#deserializingconsumer and https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md .
    kafka_connection.schema_registry_config
    object
    Extra schema registry config serialized as JSON. These options will be passed into Kafka's SchemaRegistryClient. https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html?#schemaregistryclient
    kafka_connection.schema_registry_url
    string
    Default: http://localhost:8080/schema-registry/api/
    mysql_connection
    MySQLConnectionConfig
    MySQL connection config
    Default: {'username': None, 'host_port': 'localhost:3306', ...
    mysql_connection.database
    string
    database (catalog)
    mysql_connection.database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    mysql_connection.host_port
    string
    MySQL host URL.
    Default: localhost:3306
    mysql_connection.options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    mysql_connection.password
    string(password)
    password
    mysql_connection.scheme
    string
    Default: mysql+pymysql
    mysql_connection.sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    mysql_connection.username
    string
    username
    stateful_ingestion
    StatefulIngestionConfig
    Stateful Ingestion Config
    Default: {'enabled': True, 'max_checkpoint_state_size': 167...
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False

    Code Coordinates

    • Class Name: datahub.ingestion.source.datahub.datahub_source.DataHubSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for DataHub, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/dbt/index.html b/docs/generated/ingestion/sources/dbt/index.html index 7123481fe2766..7c6eddbdccbb2 100644 --- a/docs/generated/ingestion/sources/dbt/index.html +++ b/docs/generated/ingestion/sources/dbt/index.html @@ -8,7 +8,7 @@ - + @@ -24,7 +24,7 @@ The following recipe shows you how to emit only test results.

    source:
    type: dbt
    config:
    manifest_path: _path_to_manifest_json
    catalog_path: _path_to_catalog_json
    test_results_path: _path_to_run_results_json
    target_platform: postgres
    entities_enabled:
    test_results: Only

    Similarly, the following recipe shows you how to emit everything (i.e. models, sources, seeds, test definitions) but not test results:

    source:
    type: dbt
    config:
    manifest_path: _path_to_manifest_json
    catalog_path: _path_to_catalog_json
    run_results_path: _path_to_run_results_json
    target_platform: postgres
    entities_enabled:
    test_results: No

    Code Coordinates

    • Class Name: datahub.ingestion.source.dbt.dbt_core.DBTCoreSource
    • Browse on GitHub

    Module dbt-cloud

    Incubating

    Important Capabilities

    CapabilityStatusNotes
    Dataset Usage
    Detect Deleted EntitiesEnabled via stateful ingestion
    Table-Level LineageEnabled by default

    This source pulls dbt metadata directly from the dbt Cloud APIs.

    You'll need to have a dbt Cloud job set up to run your dbt project, and "Generate docs on run" should be enabled.

    The token should have the "read metadata" permission.

    To get the required IDs, go to the job details page (this is the one with the "Run History" table), and look at the URL. It should look something like this: https://cloud.getdbt.com/next/deploy/107298/projects/175705/jobs/148094. In this example, the account ID is 107298, the project ID is 175705, and the job ID is 148094.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[dbt-cloud]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "dbt-cloud"
    config:
    token: ${DBT_CLOUD_TOKEN}

    # In the URL https://cloud.getdbt.com/next/deploy/107298/projects/175705/jobs/148094,
    # 107298 is the account_id, 175705 is the project_id, and 148094 is the job_id

    account_id: # set to your dbt cloud account id
    project_id: # set to your dbt cloud project id
    job_id: # set to your dbt cloud job id
    run_id: # set to your dbt cloud run id. This is optional, and defaults to the latest run

    target_platform: postgres

    # Options
    target_platform: "my_target_platform_id" # e.g. bigquery/postgres/etc.

    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    account_id 
    integer
    The DBT Cloud account ID to use.
    job_id 
    integer
    The ID of the job to ingest metadata from.
    project_id 
    integer
    The dbt Cloud project ID to use.
    target_platform 
    string
    The platform that dbt is loading onto. (e.g. bigquery / redshift / postgres etc.)
    token 
    string
    The API token to use to authenticate with DBT Cloud.
    column_meta_mapping
    object
    mapping rules that will be executed against dbt column meta properties. Refer to the section below on dbt meta automated mappings.
    Default: {}
    convert_column_urns_to_lowercase
    boolean
    When enabled, converts column URNs to lowercase to ensure cross-platform compatibility. If target_platform is Snowflake, the default is True.
    Default: False
    enable_meta_mapping
    boolean
    When enabled, applies the mappings that are defined through the meta_mapping directives.
    Default: True
    enable_owner_extraction
    boolean
    When enabled, ownership info will be extracted from the dbt meta
    Default: True
    enable_query_tag_mapping
    boolean
    When enabled, applies the mappings that are defined through the query_tag_mapping directives.
    Default: True
    include_env_in_assertion_guid
    boolean
    Prior to version 0.9.4.2, the assertion GUIDs did not include the environment. If you're using multiple dbt ingestion that are only distinguished by env, then you should set this flag to True.
    Default: False
    incremental_lineage
    boolean
    When enabled, emits lineage as incremental to existing lineage already in DataHub. When disabled, re-states lineage on each run.
    Default: True
    meta_mapping
    object
    mapping rules that will be executed against dbt meta properties. Refer to the section below on dbt meta automated mappings.
    Default: {}
    metadata_endpoint
    string
    The dbt Cloud metadata API endpoint.
    owner_extraction_pattern
    string
    Regex string to extract owner from the dbt node using the (?P<name>...) syntax of the match object, where the group name must be owner. Examples: (1)r"(?P<owner>(.*)): (\w+) (\w+)" will extract jdoe as the owner from "jdoe: John Doe" (2) r"@(?P<owner>(.*))" will extract alice as the owner from "@alice".
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    query_tag_mapping
    object
    mapping rules that will be executed against dbt query_tag meta properties. Refer to the section below on dbt meta automated mappings.
    Default: {}
    run_id
    integer
    The ID of the run to ingest metadata from. If not specified, we'll default to the latest run.
    sql_parser_use_external_process
    boolean
    When enabled, sql parser will run in isolated in a separate process. This can affect processing time but can protect from sql parser's mem leak.
    Default: False
    strip_user_ids_from_email
    boolean
    Whether or not to strip email id while adding owners using dbt meta actions.
    Default: False
    tag_prefix
    string
    Prefix added to tags during ingestion.
    Default: dbt:
    target_platform_instance
    string
    The platform instance for the platform that dbt is operating on. Use this if you have multiple instances of the same platform (e.g. redshift) and need to distinguish between them.
    use_identifiers
    boolean
    Use model identifier instead of model name if defined (if not, default to model name).
    Default: False
    write_semantics
    string
    Whether the new tags, terms and owners to be added will override the existing ones added only by this source or not. Value for this config can be "PATCH" or "OVERRIDE"
    Default: PATCH
    env
    string
    Environment to use in namespace when constructing URNs.
    Default: PROD
    entities_enabled
    DBTEntitiesEnabled
    Controls for enabling / disabling metadata emission for different dbt entities (models, test definitions, test results, etc.)
    Default: {'models': 'YES', 'sources': 'YES', 'seeds': 'YES'...
    entities_enabled.models
    Enum
    Emit metadata for dbt models when set to Yes or Only
    Default: YES
    entities_enabled.seeds
    Enum
    Emit metadata for dbt seeds when set to Yes or Only
    Default: YES
    entities_enabled.snapshots
    Enum
    Emit metadata for dbt snapshots when set to Yes or Only
    Default: YES
    entities_enabled.sources
    Enum
    Emit metadata for dbt sources when set to Yes or Only
    Default: YES
    entities_enabled.test_definitions
    Enum
    Emit metadata for test definitions when enabled when set to Yes or Only
    Default: YES
    entities_enabled.test_results
    Enum
    Emit metadata for test results when set to Yes or Only
    Default: YES
    node_name_pattern
    AllowDenyPattern
    regex patterns for dbt model names to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    node_name_pattern.allow
    array(string)
    node_name_pattern.deny
    array(string)
    node_name_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    DBT Stateful Ingestion Config.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.dbt.dbt_cloud.DBTCloudSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for dbt, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/delta-lake/index.html b/docs/generated/ingestion/sources/delta-lake/index.html index 66192db1ad98c..199fa7d708e41 100644 --- a/docs/generated/ingestion/sources/delta-lake/index.html +++ b/docs/generated/ingestion/sources/delta-lake/index.html @@ -8,13 +8,13 @@ - +

    Delta Lake

    Incubating

    Important Capabilities

    CapabilityStatusNotes
    Extract TagsCan extract S3 object/bucket tags if enabled

    This plugin extracts:

    • Column types and schema associated with each delta table
    • Custom properties: number_of_files, partition_columns, table_creation_time, location, version etc.
    caution

    If you are ingesting datasets from AWS S3, we recommend running the ingestion on a server in the same region to avoid high egress costs.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[delta-lake]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: delta-lake
    config:
    env: "PROD"
    platform_instance: "my-delta-lake"
    base_path: "/path/to/data/folder"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    base_path 
    string
    Path to table (s3 or local file system). If path is not a delta table path then all subfolders will be scanned to detect and ingest delta tables.
    platform
    string
    The platform that this source connects to
    Default: delta-lake
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    relative_path
    string
    If set, delta-tables will be searched at location '<base_path>/<relative_path>' and URNs will be created using relative_path only.
    require_files
    boolean
    Whether DeltaTable should track files. Consider setting this to False for large delta tables, resulting in significant memory reduction for ingestion process.When set to False, number_of_files in delta table can not be reported.
    Default: True
    version_history_lookback
    integer
    Number of previous version histories to be ingested. Defaults to 1. If set to -1 all version history will be ingested.
    Default: 1
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    s3
    S3
    s3.use_s3_bucket_tags
    boolean
    Whether or not to create tags in datahub from the s3 bucket
    Default: False
    s3.use_s3_object_tags
    boolean
    # Whether or not to create tags in datahub from the s3 object
    Default: False
    s3.aws_config
    AwsConnectionConfig
    AWS configuration
    s3.aws_config.aws_region 
    string
    AWS region code.
    s3.aws_config.aws_access_key_id
    string
    AWS access key ID. Can be auto-detected, see the AWS boto3 docs for details.
    s3.aws_config.aws_endpoint_url
    string
    The AWS service endpoint. This is normally constructed automatically, but can be overridden here.
    s3.aws_config.aws_profile
    string
    Named AWS profile to use. Only used if access key / secret are unset. If not set the default will be used
    s3.aws_config.aws_proxy
    map(str,string)
    s3.aws_config.aws_secret_access_key
    string
    AWS secret access key. Can be auto-detected, see the AWS boto3 docs for details.
    s3.aws_config.aws_session_token
    string
    AWS session token. Can be auto-detected, see the AWS boto3 docs for details.
    s3.aws_config.aws_role
    One of string, union(anyOf), string, AwsAssumeRoleConfig
    AWS roles to assume. If using the string format, the role ARN can be specified directly. If using the object format, the role can be specified in the RoleArn field and additional available arguments are documented at https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts.html?highlight=assume_role#STS.Client.assume_role
    s3.aws_config.aws_role.RoleArn 
    string
    ARN of the role to assume.
    s3.aws_config.aws_role.ExternalId
    string
    External ID to use when assuming the role.
    table_pattern
    AllowDenyPattern
    regex patterns for tables to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True

    Usage Guide

    If you are new to Delta Lake and want to test out a simple integration with Delta Lake and DataHub, you can follow this guide.

    Delta Table on Local File System

    Step 1

    Create a delta table using the sample PySpark code below if you don't have a delta table you can point to.

    import uuid
    import random
    from pyspark.sql import SparkSession
    from delta.tables import DeltaTable

    def generate_data():
    return [(y, m, d, str(uuid.uuid4()), str(random.randrange(10000) % 26 + 65) * 3, random.random()*10000)
    for d in range(1, 29)
    for m in range(1, 13)
    for y in range(2000, 2021)]

    jar_packages = ["org.apache.hadoop:hadoop-aws:3.2.3", "io.delta:delta-core_2.12:1.2.1"]
    spark = SparkSession.builder \
    .appName("quickstart") \
    .master("local[*]") \
    .config("spark.jars.packages", ",".join(jar_packages)) \
    .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
    .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \
    .getOrCreate()

    table_path = "quickstart/my-table"
    columns = ["year", "month", "day", "sale_id", "customer", "total_cost"]
    spark.sparkContext.parallelize(generate_data()).toDF(columns).repartition(1).write.format("delta").save(table_path)

    df = spark.read.format("delta").load(table_path)
    df.show()

    Step 2

    Create a datahub ingestion yaml file (delta.dhub.yaml) to ingest metadata from the delta table you just created.

    source:
    type: "delta-lake"
    config:
    base_path: "quickstart/my-table"

    sink:
    type: "datahub-rest"
    config:
    server: "http://localhost:8080"

    Note: Make sure you run the Spark code as well as recipe from same folder otherwise use absolute paths.

    Step 3

    Execute the ingestion recipe:

    datahub ingest -c delta.dhub.yaml

    Delta Table on S3

    Step 1

    Set up your AWS credentials by creating an AWS credentials config file; typically in '$HOME/.aws/credentials'.

    [my-creds]
    aws_access_key_id: ######
    aws_secret_access_key: ######

    Step 2: Create a Delta Table using the PySpark sample code below unless you already have Delta Tables on your S3.

    from pyspark.sql import SparkSession
    from delta.tables import DeltaTable
    from configparser import ConfigParser
    import uuid
    import random
    def generate_data():
    return [(y, m, d, str(uuid.uuid4()), str(random.randrange(10000) % 26 + 65) * 3, random.random()*10000)
    for d in range(1, 29)
    for m in range(1, 13)
    for y in range(2000, 2021)]

    jar_packages = ["org.apache.hadoop:hadoop-aws:3.2.3", "io.delta:delta-core_2.12:1.2.1"]
    spark = SparkSession.builder \
    .appName("quickstart") \
    .master("local[*]") \
    .config("spark.jars.packages", ",".join(jar_packages)) \
    .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") \
    .config("spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog") \
    .getOrCreate()


    config_object = ConfigParser()
    config_object.read("$HOME/.aws/credentials")
    profile_info = config_object["my-creds"]
    access_id = profile_info["aws_access_key_id"]
    access_key = profile_info["aws_secret_access_key"]

    hadoop_conf = spark._jsc.hadoopConfiguration()
    hadoop_conf.set("fs.s3a.impl", "org.apache.hadoop.fs.s3a.S3AFileSystem")
    hadoop_conf.set("fs.s3a.aws.credentials.provider", "org.apache.hadoop.fs.s3a.SimpleAWSCredentialsProvider")
    hadoop_conf.set("fs.s3a.access.key", access_id)
    hadoop_conf.set("fs.s3a.secret.key", access_key)

    table_path = "s3a://my-bucket/my-folder/sales-table"
    columns = ["year", "month", "day", "sale_id", "customer", "total_cost"]
    spark.sparkContext.parallelize(generate_data()).toDF(columns).repartition(1).write.format("delta").save(table_path)
    df = spark.read.format("delta").load(table_path)
    df.show()

    Step 3

    Create a datahub ingestion yaml file (delta.s3.dhub.yaml) to ingest metadata from the delta table you just created.

    source:
    type: "delta-lake"
    config:
    base_path: "s3://my-bucket/my-folder/sales-table"
    s3:
    aws_config:
    aws_access_key_id: <<Access key>>
    aws_secret_access_key: <<secret key>>

    sink:
    type: "datahub-rest"
    config:
    server: "http://localhost:8080"

    Step 4

    Execute the ingestion recipe:

    datahub ingest -c delta.s3.dhub.yaml

    Note

    The above recipes are minimal recipes. Please refer to Config Details section for the full configuration.

    Code Coordinates

    • Class Name: datahub.ingestion.source.delta_lake.source.DeltaLakeSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Delta Lake, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/demo-data/index.html b/docs/generated/ingestion/sources/demo-data/index.html index 2ed7a3c54b8b2..268535af39d6d 100644 --- a/docs/generated/ingestion/sources/demo-data/index.html +++ b/docs/generated/ingestion/sources/demo-data/index.html @@ -8,13 +8,13 @@ - +

    Demo Data

    This source loads sample data into DataHub. It is intended for demo and testing purposes only.

    CLI based Ingestion

    Install the Plugin

    The demo-data source works out of the box with acryl-datahub.

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: demo-data
    config: {}

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription

    Code Coordinates

    • Class Name: datahub.ingestion.source.demo_data.DemoDataSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Demo Data, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/druid/index.html b/docs/generated/ingestion/sources/druid/index.html index b680e7c182c51..47a5c32a00678 100644 --- a/docs/generated/ingestion/sources/druid/index.html +++ b/docs/generated/ingestion/sources/druid/index.html @@ -8,13 +8,13 @@ - +

    Druid

    Incubating

    Important Capabilities

    CapabilityStatusNotes
    Platform InstanceEnabled by default

    This plugin extracts the following:

    • Metadata for databases, schemas, and tables
    • Column types associated with each table
    • Table, row, and column statistics via optional SQL profiling.

    Note: It is important to explicitly define the deny schema pattern for internal Druid databases (lookup & sys) if adding a schema pattern. Otherwise, the crawler may crash before processing relevant databases. This deny pattern is defined by default but is overriden by user-submitted configurations.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[druid]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: druid
    config:
    # Coordinates
    host_port: "localhost:8082"

    # Credentials
    username: admin
    password: password

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    host_port 
    string
    host URL
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    scheme
    string
    Default: druid
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    regex patterns for schemas to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': ['^(lookup|sysgit|view)....
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.druid.DruidSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Druid, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/elasticsearch/index.html b/docs/generated/ingestion/sources/elasticsearch/index.html index 0bab9075c7c03..2ab70f92a24c2 100644 --- a/docs/generated/ingestion/sources/elasticsearch/index.html +++ b/docs/generated/ingestion/sources/elasticsearch/index.html @@ -8,13 +8,13 @@ - +

    Elasticsearch

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Platform InstanceEnabled by default

    This plugin extracts the following:

    • Metadata for indexes
    • Column types associated with each index field

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[elasticsearch]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "elasticsearch"
    config:
    # Coordinates
    host: 'localhost:9200'

    # Credentials
    username: user # optional
    password: pass # optional

    # SSL support
    use_ssl: False
    verify_certs: False
    ca_certs: "./path/ca.cert"
    client_cert: "./path/client.cert"
    client_key: "./path/client.key"
    ssl_assert_hostname: False
    ssl_assert_fingerprint: "./path/cert.fingerprint"

    # Options
    url_prefix: "" # optional url_prefix
    env: "PROD"
    index_pattern:
    allow: [".*some_index_name_pattern*"]
    deny: [".*skip_index_name_pattern*"]
    ingest_index_templates: False
    index_template_pattern:
    allow: [".*some_index_template_name_pattern*"]

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    ca_certs
    string
    Path to a certificate authority (CA) certificate.
    client_cert
    string
    Path to the file containing the private key and the certificate, or cert only if using client_key.
    client_key
    string
    Path to the file containing the private key if using separate cert and key files.
    host
    string
    The elastic search host URI.
    Default: localhost:9200
    ingest_index_templates
    boolean
    Ingests ES index templates if enabled.
    Default: False
    password
    string
    The password credential.
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    ssl_assert_fingerprint
    string
    Verify the supplied certificate fingerprint if not None.
    ssl_assert_hostname
    boolean
    Use hostname verification if not False.
    Default: False
    url_prefix
    string
    There are cases where an enterprise would have multiple elastic search clusters. One way for them to manage is to have a single endpoint for all the elastic search clusters and use url_prefix for routing requests to different clusters.
    Default:
    use_ssl
    boolean
    Whether to use SSL for the connection or not.
    Default: False
    username
    string
    The username credential.
    verify_certs
    boolean
    Whether to verify SSL certificates.
    Default: False
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    collapse_urns
    CollapseUrns
    collapse_urns.urns_suffix_regex
    array(string)
    index_pattern
    AllowDenyPattern
    regex patterns for indexes to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': ['^_.*', '^ilm-history.*...
    index_pattern.allow
    array(string)
    index_pattern.deny
    array(string)
    index_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    index_template_pattern
    AllowDenyPattern
    The regex patterns for filtering index templates to ingest.
    Default: {'allow': ['.*'], 'deny': ['^_.*'], 'ignoreCase': ...
    index_template_pattern.allow
    array(string)
    index_template_pattern.deny
    array(string)
    index_template_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    ElasticProfiling
    profiling.enabled
    boolean
    Whether to enable profiling for the elastic search source.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.

    Code Coordinates

    • Class Name: datahub.ingestion.source.elastic_search.ElasticsearchSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Elasticsearch, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/feast/index.html b/docs/generated/ingestion/sources/feast/index.html index 730490a56151d..33e76233454d6 100644 --- a/docs/generated/ingestion/sources/feast/index.html +++ b/docs/generated/ingestion/sources/feast/index.html @@ -8,13 +8,13 @@ - +

    Feast

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Table-Level LineageEnabled by default

    This plugin extracts:

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[feast]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: feast
    config:
    # Coordinates
    path: "/path/to/repository/"
    # Options
    environment: "PROD"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    path 
    string
    Path to Feast repository
    environment
    string
    Environment to use when constructing URNs
    Default: PROD
    fs_yaml_file
    string
    Path to the feature_store.yaml file used to configure the feature store

    Code Coordinates

    • Class Name: datahub.ingestion.source.feast.FeastRepositorySource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Feast, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/file-based-lineage/index.html b/docs/generated/ingestion/sources/file-based-lineage/index.html index b7a7b4e07308a..40915908cf04f 100644 --- a/docs/generated/ingestion/sources/file-based-lineage/index.html +++ b/docs/generated/ingestion/sources/file-based-lineage/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ is 1.

    lineage: the top level key of the lineage file containing a list of EntityNodeConfig objects

    EntityNodeConfig:

    • entity: EntityConfig object
    • upstream: (optional) list of child EntityNodeConfig objects
    • fineGrainedLineages: (optional) list of FineGrainedLineageConfig objects

    EntityConfig:

    • name: identifier of the entity. Typically name or guid, as used in constructing entity urn.
    • type: type of the entity (only dataset is supported as of now)
    • env: the environment of this entity. Should match the values in the table here
    • platform: a valid platform like kafka, snowflake, etc..
    • platform_instance: optional string specifying the platform instance of this entity

    For example if dataset URN is urn:li:dataset:(urn:li:dataPlatform:redshift,userdb.public.customer_table,DEV) then EntityConfig will look like:

    name : userdb.public.customer_table
    type: dataset
    env: DEV
    platform: redshift

    FineGrainedLineageConfig:

    • upstreamType: type of upstream entity in a fine-grained lineage; default = "FIELD_SET"
    • upstreams: (optional) list of upstream schema field urns
    • downstreamType: type of downstream entity in a fine-grained lineage; default = "FIELD_SET"
    • downstreams: (optional) list of downstream schema field urns
    • transformOperation: (optional) transform operation applied to the upstream entities to produce the downstream field(s)
    • confidenceScore: (optional) the confidence in this lineage between 0 (low confidence) and 1 (high confidence); default = 1.0

    FineGrainedLineageConfig can be used to display fine grained lineage, also referred to as column-level lineage, for custom sources.

    You can also view an example lineage file checked in here

    Code Coordinates

    • Class Name: datahub.ingestion.source.metadata.lineage.LineageFileSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for File Based Lineage, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/file/index.html b/docs/generated/ingestion/sources/file/index.html index 822baf50733a4..70f86cf10ebb8 100644 --- a/docs/generated/ingestion/sources/file/index.html +++ b/docs/generated/ingestion/sources/file/index.html @@ -8,13 +8,13 @@ - +

    File

    Certified

    This plugin pulls metadata from a previously generated file. The file sink can produce such files, and a number of samples are included in the examples/mce_files directory.

    CLI based Ingestion

    Install the Plugin

    The file source works out of the box with acryl-datahub.

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: file
    config:
    # Coordinates
    filename: ./path/to/mce/file.json

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    path 
    string
    File path to folder or file to ingest, or URL to a remote file. If pointed to a folder, all files with extension {file_extension} (default json) within that folder will be processed.
    aspect
    string
    Set to an aspect to only read this aspect for ingestion.
    count_all_before_starting
    boolean
    When enabled, counts total number of records in the file before starting. Used for accurate estimation of completion time. Turn it off if startup time is too high.
    Default: True
    file_extension
    string
    When providing a folder to use to read files, set this field to control file extensions that you want the source to process. * is a special value that means process every file regardless of extension
    Default: .json
    read_mode
    Enum
    Default: AUTO

    Code Coordinates

    • Class Name: datahub.ingestion.source.file.GenericFileSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for File, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/gcs/index.html b/docs/generated/ingestion/sources/gcs/index.html index 191e8b0c3ae27..de17824854346 100644 --- a/docs/generated/ingestion/sources/gcs/index.html +++ b/docs/generated/ingestion/sources/gcs/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ JSON file schemas are inferred on the basis of the entire file (given the difficulty in extracting only the first few objects of the file), which may impact performance. We are working on using iterator-based JSON parsers to avoid reading in the entire JSON object.

    Prerequisites

    1. Create a service account with "Storage Object Viewer" Role - https://cloud.google.com/iam/docs/service-accounts-create
    2. Make sure you meet following requirements to generate HMAC key - https://cloud.google.com/storage/docs/authentication/managing-hmackeys#before-you-begin
    3. Create an HMAC key for service account created above - https://cloud.google.com/storage/docs/authentication/managing-hmackeys#create . Incubating

    Important Capabilities

    CapabilityStatusNotes
    Asset ContainersEnabled by default
    Data ProfilingNot supported
    Schema MetadataEnabled by default

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[gcs]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: gcs
    config:
    path_specs:
    - include: gs://gcs-ingestion-bucket/parquet_example/{table}/year={partition[0]}/*.parquet
    credential:
    hmac_access_id: <hmac access id>
    hmac_access_secret: <hmac access secret>

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    credential 
    HMACKey
    Google cloud storage HMAC keys
    credential.hmac_access_id 
    string
    Access ID
    credential.hmac_access_secret 
    string(password)
    Secret
    max_rows
    integer
    Maximum number of rows to use when inferring schemas for TSV and CSV files.
    Default: 100
    number_of_files_to_sample
    integer
    Number of files to list to sample for schema inference. This will be ignored if sample_files is set to False in the pathspec.
    Default: 100
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    path_specs
    array(object)
    path_specs.include 
    string
    Path to table. Name variable {table} is used to mark the folder with dataset. In absence of {table}, file level dataset will be created. Check below examples for more details.
    path_specs.default_extension
    string
    For files without extension it will assume the specified file type. If it is not set the files without extensions will be skipped.
    path_specs.enable_compression
    boolean
    Enable or disable processing compressed files. Currently .gz and .bz files are supported.
    Default: True
    path_specs.exclude
    array(string)
    path_specs.file_types
    array(string)
    path_specs.sample_files
    boolean
    Not listing all the files but only taking a handful amount of sample file to infer the schema. File count and file size calculation will be disabled. This can affect performance significantly if enabled
    Default: True
    path_specs.table_name
    string
    Display name of the dataset.Combination of named variables from include path and strings
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Path Specs

    Example - Dataset per file

    Bucket structure:

    test-gs-bucket
    ├── employees.csv
    └── food_items.csv

    Path specs config

    path_specs:
    - include: gs://test-gs-bucket/*.csv

    Example - Datasets with partitions

    Bucket structure:

    test-gs-bucket
    ├── orders
    │   └── year=2022
    │   └── month=2
    │   ├── 1.parquet
    │   └── 2.parquet
    └── returns
    └── year=2021
    └── month=2
    └── 1.parquet

    Path specs config:

    path_specs:
    - include: gs://test-gs-bucket/{table}/{partition_key[0]}={partition[0]}/{partition_key[1]}={partition[1]}/*.parquet

    Example - Datasets with partition and exclude

    Bucket structure:

    test-gs-bucket
    ├── orders
    │   └── year=2022
    │   └── month=2
    │   ├── 1.parquet
    │   └── 2.parquet
    └── tmp_orders
    └── year=2021
    └── month=2
    └── 1.parquet


    Path specs config:

    path_specs:
    - include: gs://test-gs-bucket/{table}/{partition_key[0]}={partition[0]}/{partition_key[1]}={partition[1]}/*.parquet
    exclude:
    - **/tmp_orders/**

    Example - Datasets of mixed nature

    Bucket structure:

    test-gs-bucket
    ├── customers
    │   ├── part1.json
    │   ├── part2.json
    │   ├── part3.json
    │   └── part4.json
    ├── employees.csv
    ├── food_items.csv
    ├── tmp_10101000.csv
    └── orders
       └── year=2022
        └── month=2
       ├── 1.parquet
       ├── 2.parquet
       └── 3.parquet

    Path specs config:

    path_specs:
    - include: gs://test-gs-bucket/*.csv
    exclude:
    - **/tmp_10101000.csv
    - include: gs://test-gs-bucket/{table}/*.json
    - include: gs://test-gs-bucket/{table}/{partition_key[0]}={partition[0]}/{partition_key[1]}={partition[1]}/*.parquet

    Valid path_specs.include

    gs://my-bucket/foo/tests/bar.avro # single file table   
    gs://my-bucket/foo/tests/*.* # mulitple file level tables
    gs://my-bucket/foo/tests/{table}/*.avro #table without partition
    gs://my-bucket/foo/tests/{table}/*/*.avro #table where partitions are not specified
    gs://my-bucket/foo/tests/{table}/*.* # table where no partitions as well as data type specified
    gs://my-bucket/{dept}/tests/{table}/*.avro # specifying keywords to be used in display name
    gs://my-bucket/{dept}/tests/{table}/{partition_key[0]}={partition[0]}/{partition_key[1]}={partition[1]}/*.avro # specify partition key and value format
    gs://my-bucket/{dept}/tests/{table}/{partition[0]}/{partition[1]}/{partition[2]}/*.avro # specify partition value only format
    gs://my-bucket/{dept}/tests/{table}/{partition[0]}/{partition[1]}/{partition[2]}/*.* # for all extensions
    gs://my-bucket/*/{table}/{partition[0]}/{partition[1]}/{partition[2]}/*.* # table is present at 2 levels down in bucket
    gs://my-bucket/*/*/{table}/{partition[0]}/{partition[1]}/{partition[2]}/*.* # table is present at 3 levels down in bucket

    Valid path_specs.exclude

    • **/tests/**
    • gs://my-bucket/hr/**
    • */tests/.csv
    • gs://my-bucket/foo/*/my_table/**

    Notes

    • {table} represents folder for which dataset will be created.
    • include path must end with (. or *.[ext]) to represent leaf level.
    • if *.[ext] is provided then only files with specified type will be scanned.
    • /*/ represents single folder.
    • {partition[i]} represents value of partition.
    • {partition_key[i]} represents name of the partition.
    • While extracting, “i” will be used to match partition_key to partition.
    • all folder levels need to be specified in include. Only exclude path can have ** like matching.
    • exclude path cannot have named variables ( {} ).
    • Folder names should not contain {, }, *, / in their names.
    • {folder} is reserved for internal working. please do not use in named variables.

    If you would like to write a more complicated function for resolving file names, then a {transformer} would be a good fit.

    caution

    Specify as long fixed prefix ( with out /*/ ) as possible in path_specs.include. This will reduce the scanning time and cost, specifically on Google Cloud Storage.

    caution

    If you are ingesting datasets from Google Cloud Storage, we recommend running the ingestion on a server in the same region to avoid high egress costs.

    Code Coordinates

    • Class Name: datahub.ingestion.source.gcs.gcs_source.GCSSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Google Cloud Storage, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/glue/index.html b/docs/generated/ingestion/sources/glue/index.html index 79252245966f1..e29e979e841b2 100644 --- a/docs/generated/ingestion/sources/glue/index.html +++ b/docs/generated/ingestion/sources/glue/index.html @@ -8,13 +8,13 @@ - +

    Glue

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Detect Deleted EntitiesEnabled by default when stateful ingestion is turned on.
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default

    Note: if you also have files in S3 that you'd like to ingest, we recommend you use Glue's built-in data catalog. See here for a quick guide on how to set up a crawler on Glue and ingest the outputs with DataHub.

    This plugin extracts the following:

    • Tables in the Glue catalog
    • Column types associated with each table
    • Table metadata, such as owner, description and parameters
    • Jobs and their component transformations, data sources, and data sinks

    IAM permissions

    For ingesting datasets, the following IAM permissions are required:

    {
    "Effect": "Allow",
    "Action": [
    "glue:GetDatabases",
    "glue:GetTables"
    ],
    "Resource": [
    "arn:aws:glue:$region-id:$account-id:catalog",
    "arn:aws:glue:$region-id:$account-id:database/*",
    "arn:aws:glue:$region-id:$account-id:table/*"
    ]
    }

    For ingesting jobs (extract_transforms: True), the following additional permissions are required:

    {
    "Effect": "Allow",
    "Action": [
    "glue:GetDataflowGraph",
    "glue:GetJobs",
    ],
    "Resource": "*"
    }

    plus s3:GetObject for the job script locations.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[glue]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: glue
    config:
    # Coordinates
    aws_region: "my-aws-region"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    aws_region 
    string
    AWS region code.
    aws_access_key_id
    string
    AWS access key ID. Can be auto-detected, see the AWS boto3 docs for details.
    aws_endpoint_url
    string
    The AWS service endpoint. This is normally constructed automatically, but can be overridden here.
    aws_profile
    string
    Named AWS profile to use. Only used if access key / secret are unset. If not set the default will be used
    aws_proxy
    map(str,string)
    aws_secret_access_key
    string
    AWS secret access key. Can be auto-detected, see the AWS boto3 docs for details.
    aws_session_token
    string
    AWS session token. Can be auto-detected, see the AWS boto3 docs for details.
    catalog_id
    string
    The aws account id where the target glue catalog lives. If None, datahub will ingest glue in aws caller's account.
    emit_s3_lineage
    boolean
    Whether to emit S3-to-Glue lineage.
    Default: False
    extract_owners
    boolean
    When enabled, extracts ownership from Glue directly and overwrites existing owners. When disabled, ownership is left empty for datasets.
    Default: True
    extract_transforms
    boolean
    Whether to extract Glue transform jobs.
    Default: True
    glue_s3_lineage_direction
    string
    If upstream, S3 is upstream to Glue. If downstream S3 is downstream to Glue.
    Default: upstream
    ignore_resource_links
    boolean
    If set to True, ignore database resource links.
    Default: False
    ignore_unsupported_connectors
    boolean
    Whether to ignore unsupported connectors. If disabled, an error will be raised.
    Default: True
    platform
    string
    The platform to use for the dataset URNs. Must be one of ['glue', 'athena'].
    Default: glue
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    use_s3_bucket_tags
    boolean
    If an S3 Buckets Tags should be created for the Tables ingested by Glue. Please Note that this will not apply tags to any folders ingested, only the files.
    Default: False
    use_s3_object_tags
    boolean
    If an S3 Objects Tags should be created for the Tables ingested by Glue.
    Default: False
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    aws_role
    One of string, union(anyOf), string, AwsAssumeRoleConfig
    AWS roles to assume. If using the string format, the role ARN can be specified directly. If using the object format, the role can be specified in the RoleArn field and additional available arguments are documented at https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts.html?highlight=assume_role#STS.Client.assume_role
    aws_role.RoleArn 
    string
    ARN of the role to assume.
    aws_role.ExternalId
    string
    External ID to use when assuming the role.
    database_pattern
    AllowDenyPattern
    regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    regex patterns for tables to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GlueProfilingConfig
    Configs to ingest data profiles from glue table
    profiling.column_count
    string
    The parameter name for column count in glue table.
    profiling.max
    string
    The parameter name for the max value of a column.
    profiling.mean
    string
    The parameter name for the mean value of a column.
    profiling.median
    string
    The parameter name for the median value of a column.
    profiling.min
    string
    The parameter name for the min value of a column.
    profiling.null_count
    string
    The parameter name for the count of null values in a column.
    profiling.null_proportion
    string
    The parameter name for the proportion of null values in a column.
    profiling.row_count
    string
    The parameter name for row count in glue table.
    profiling.stdev
    string
    The parameter name for the standard deviation of a column.
    profiling.unique_count
    string
    The parameter name for the count of unique value in a column.
    profiling.unique_proportion
    string
    The parameter name for the proportion of unique values in a column.
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    profiling.partition_patterns
    AllowDenyPattern
    Regex patterns for filtering partitions for profile. The pattern should be a string like: "{'key':'value'}".
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profiling.partition_patterns.allow
    array(string)
    profiling.partition_patterns.deny
    array(string)
    profiling.partition_patterns.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Concept Mapping

    Source ConceptDataHub ConceptNotes
    "glue"Data Platform
    Glue DatabaseContainerSubtype Database
    Glue TableDatasetSubtype Table
    Glue JobData Flow
    Glue Job TransformData Job
    Glue Job Data sourceDataset
    Glue Job Data sinkDataset

    Compatibility

    To capture lineage across Glue jobs and databases, a requirements must be met – otherwise the AWS API is unable to report any lineage. The job must be created in Glue Studio with the "Generate classic script" option turned on (this option can be accessed in the "Script" tab). Any custom scripts that do not have the proper annotations will not have reported lineage.

    Code Coordinates

    • Class Name: datahub.ingestion.source.aws.glue.GlueSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Glue, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/hana/index.html b/docs/generated/ingestion/sources/hana/index.html index a3c6a13474db7..62ddd34bf0891 100644 --- a/docs/generated/ingestion/sources/hana/index.html +++ b/docs/generated/ingestion/sources/hana/index.html @@ -8,13 +8,13 @@ - +

    SAP HANA

    Testing

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    Detect Deleted EntitiesEnabled via stateful ingestion
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[hana]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: hana
    config:
    # Coordinates
    host_port: localhost:39041
    database: dbname

    # Credentials
    username: ${HANA_USER}
    password: ${HANA_PASS}

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    host_port
    string
    Default: localhost:39041
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    scheme
    string
    Default: hana+hdbcli
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Integration Details

    The implementation uses the SQLAlchemy Dialect for SAP HANA. The SQLAlchemy Dialect for SAP HANA is an open-source project hosted at GitHub that is actively maintained by SAP SE, and is not part of a licensed SAP HANA edition or option. It is provided under the terms of the project license. Please notice that sqlalchemy-hana isn't an official SAP product and isn't covered by SAP support.

    Compatibility

    Under the hood, SQLAlchemy Dialect for SAP HANA uses the SAP HANA Python Driver hdbcli. Therefore it is compatible with HANA or HANA express versions since HANA SPS 2.

    Questions

    If you've got any questions on configuring this source, feel free to ping us on our Slack!

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.hana.HanaSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for SAP HANA, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/hive/index.html b/docs/generated/ingestion/sources/hive/index.html index 5a8491fee1c3d..37c8c563afd49 100644 --- a/docs/generated/ingestion/sources/hive/index.html +++ b/docs/generated/ingestion/sources/hive/index.html @@ -8,13 +8,13 @@ - +

    Hive

    Certified

    Important Capabilities

    CapabilityStatusNotes
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default

    This plugin extracts the following:

    • Metadata for databases, schemas, and tables
    • Column types associated with each table
    • Detailed table and storage information
    • Table, row, and column statistics via optional SQL profiling.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[hive]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: hive
    config:
    # Coordinates
    host_port: localhost:10000
    database: DemoDatabase # optional, if not specified, ingests from all databases

    # Credentials
    username: user # optional
    password: pass # optional

    # For more details on authentication, see the PyHive docs:
    # https://github.com/dropbox/PyHive#passing-session-configuration.
    # LDAP, Kerberos, etc. are supported using connect_args, which can be
    # added under the `options` config parameter.
    #options:
    # connect_args:
    # auth: KERBEROS
    # kerberos_service_name: hive
    #scheme: 'hive+http' # set this if Thrift should use the HTTP transport
    #scheme: 'hive+https' # set this if Thrift should use the HTTP with SSL transport
    #scheme: 'sparksql' # set this for Spark Thrift Server

    sink:
    # sink configs

    # ---------------------------------------------------------
    # Recipe (Azure HDInsight)
    # Connecting to Microsoft Azure HDInsight using TLS.
    # ---------------------------------------------------------

    source:
    type: hive
    config:
    # Coordinates
    host_port: <cluster_name>.azurehdinsight.net:443

    # Credentials
    username: admin
    password: password

    # Options
    options:
    connect_args:
    http_path: "/hive2"
    auth: BASIC

    sink:
    # sink configs

    # ---------------------------------------------------------
    # Recipe (Databricks)
    # Ensure that databricks-dbapi is installed. If not, use ```pip install databricks-dbapi``` to install.
    # Use the ```http_path``` from your Databricks cluster in the following recipe.
    # See (https://docs.databricks.com/integrations/bi/jdbc-odbc-bi.html#get-server-hostname-port-http-path-and-jdbc-url) for instructions to find ```http_path```.
    # ---------------------------------------------------------

    source:
    type: hive
    config:
    host_port: <databricks workspace URL>:443
    username: token / username
    password: <api token> / password
    scheme: 'databricks+pyhive'

    options:
    connect_args:
    http_path: 'sql/protocolv1/o/xxxyyyzzzaaasa/1234-567890-hello123'

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    host_port 
    string
    host URL
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    database_pattern
    AllowDenyPattern
    Regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Deprecated in favour of database_pattern.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.hive.HiveSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Hive, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/iceberg/index.html b/docs/generated/ingestion/sources/iceberg/index.html index 1baed6bd456bd..7205fd8f5a6e4 100644 --- a/docs/generated/ingestion/sources/iceberg/index.html +++ b/docs/generated/ingestion/sources/iceberg/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ A new version of the Iceberg Python library is currently in development and should fix this. Because of this limitation, this source plugin will only ingest HadoopCatalog-based tables that have a version-hint.text metadata file.

    Ingestion of tables happens in 2 steps:

    1. Discover Iceberg tables stored in file system.
    2. Load discovered tables using Iceberg python_legacy library

    The current implementation of the Iceberg source plugin will only discover tables stored in a local file system or in ADLS. Support for S3 could be added fairly easily.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[iceberg]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "iceberg"
    config:
    env: PROD
    adls:
    # Will be translated to https://{account_name}.dfs.core.windows.net
    account_name: my_adls_account
    # Can use sas_token or account_key
    sas_token: "${SAS_TOKEN}"
    # account_key: "${ACCOUNT_KEY}"
    container_name: warehouse
    base_path: iceberg
    platform_instance: my_iceberg_catalog
    table_pattern:
    allow:
    - marketing.*
    profiling:
    enabled: true

    sink:
    # sink configs


    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    group_ownership_property
    string
    Iceberg table property to look for a CorpGroup owner. Can only hold a single group value. If property has no value, no owner information will be emitted.
    localfs
    string
    Local path to crawl for Iceberg tables. This is one filesystem type supported by this source and only one can be configured.
    max_path_depth
    integer
    Maximum folder depth to crawl for Iceberg tables. Folders deeper than this value will be silently ignored.
    Default: 2
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    user_ownership_property
    string
    Iceberg table property to look for a CorpUser owner. Can only hold a single user value. If property has no value, no owner information will be emitted.
    Default: owner
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    adls
    AdlsSourceConfig
    Azure Data Lake Storage to crawl for Iceberg tables. This is one filesystem type supported by this source and only one can be configured.
    adls.account_name 
    string
    Name of the Azure storage account. See Microsoft official documentation on how to create a storage account.
    adls.container_name 
    string
    Azure storage account container name.
    adls.account_key
    string
    Azure storage account access key that can be used as a credential. An account key, a SAS token or a client secret is required for authentication.
    adls.base_path
    string
    Base folder in hierarchical namespaces to start from.
    Default: /
    adls.client_id
    string
    Azure client (Application) ID required when a client_secret is used as a credential.
    adls.client_secret
    string
    Azure client secret that can be used as a credential. An account key, a SAS token or a client secret is required for authentication.
    adls.sas_token
    string
    Azure storage account Shared Access Signature (SAS) token that can be used as a credential. An account key, a SAS token or a client secret is required for authentication.
    adls.tenant_id
    string
    Azure tenant (Directory) ID required when a client_secret is used as a credential.
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    IcebergProfilingConfig
    Default: {'enabled': False, 'include_field_null_count': Tru...
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Iceberg Stateful Ingestion Config.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Concept Mapping

    This ingestion source maps the following Source System Concepts to DataHub Concepts:

    Source ConceptDataHub ConceptNotes
    icebergData Platform
    TableDatasetEach Iceberg table maps to a Dataset named using the parent folders. If a table is stored under my/namespace/table, the dataset name will be my.namespace.table. If a Platform Instance is configured, it will be used as a prefix: <platform_instance>.my.namespace.table.
    Table propertyUser (a.k.a CorpUser)The value of a table property can be used as the name of a CorpUser owner. This table property name can be configured with the source option user_ownership_property.
    Table propertyCorpGroupThe value of a table property can be used as the name of a CorpGroup owner. This table property name can be configured with the source option group_ownership_property.
    Table parent folders (excluding warehouse catalog location)ContainerAvailable in a future release
    Table schemaSchemaFieldMaps to the fields defined within the Iceberg table schema definition.

    Troubleshooting

    [Common Issue]

    [Provide description of common issues with this integration and steps to resolve]

    Code Coordinates

    • Class Name: datahub.ingestion.source.iceberg.iceberg.IcebergSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Iceberg, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/json-schema/index.html b/docs/generated/ingestion/sources/json-schema/index.html index 9a31524c35e72..010f680748330 100644 --- a/docs/generated/ingestion/sources/json-schema/index.html +++ b/docs/generated/ingestion/sources/json-schema/index.html @@ -8,14 +8,14 @@ - +

    JSON Schemas

    Incubating

    Important Capabilities

    CapabilityStatusNotes
    DescriptionsExtracts descriptions at top level and field level
    Detect Deleted EntitiesWith stateful ingestion enabled, will remove entities from DataHub if they are no longer present in the source
    Extract OwnershipDoes not currently support extracting ownership
    Extract TagsDoes not currently support extracting tags
    Platform InstanceSupports platform instance via config
    Schema MetadataExtracts schemas, following references

    This source extracts metadata from a single JSON Schema or multiple JSON Schemas rooted at a particular path. It performs reference resolution based on the $ref keyword.

    Metadata mapping:

    • Schemas are mapped to Datasets with sub-type Schema
    • The name of the Schema (Dataset) is inferred from the $id property and if that is missing, the file name.
    • Browse paths are minted based on the path

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[json-schema]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    pipeline_name: json_schema_ingestion
    source:
    type: json-schema
    config:
    path: <path_to_json_file_or_directory or url> # e.g. https://json.schemastore.org/petstore-v1.0.json
    platform: <choose a platform that you want schemas to live under> # e.g. schemaregistry
    # platform_instance: <add a platform_instance if there are multiple schema repositories>
    stateful_ingestion:
    enabled: true # recommended to have this turned on

    # sink configs if needed

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    path 
    One of string(file-path), string(directory-path), string(uri)
    Set this to a single file-path or a directory-path (for recursive traversal) or a remote url. e.g. https://json.schemastore.org/petstore-v1.0.json
    platform 
    string
    Set this to a platform that you want all schemas to live under. e.g. schemaregistry / schemarepo etc.
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    use_id_as_base_uri
    boolean
    When enabled, uses the $id field in the json schema as the base uri for following references.
    Default: False
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    uri_replace_pattern
    URIReplacePattern
    Use this if URI-s need to be modified during reference resolution. Simple string match - replace capabilities are supported.
    uri_replace_pattern.match 
    string
    Pattern to match on uri-s as part of reference resolution. See replace field
    uri_replace_pattern.replace 
    string
    Pattern to replace with as part of reference resolution. See match field
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Configuration Notes

    • You must provide a platform field. Most organizations have custom project names for their schema repositories, so you can pick whatever name makes sense. For example, you might want to call your schema platform schemaregistry. After picking a custom platform, you can use the put platform command to register your custom platform into DataHub.

    Code Coordinates

    • Class Name: datahub.ingestion.source.schema.json_schema.JsonSchemaSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for JSON Schemas, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/kafka-connect/index.html b/docs/generated/ingestion/sources/kafka-connect/index.html index 64378291e7ce3..fe008c4085825 100644 --- a/docs/generated/ingestion/sources/kafka-connect/index.html +++ b/docs/generated/ingestion/sources/kafka-connect/index.html @@ -8,14 +8,14 @@ - +

    Kafka Connect

    Integration Details

    This plugin extracts the following:

    • Source and Sink Connectors in Kafka Connect as Data Pipelines
    • For Source connectors - Data Jobs to represent lineage information between source dataset to Kafka topic per {connector_name}:{source_dataset} combination
    • For Sink connectors - Data Jobs to represent lineage information between Kafka topic to destination dataset per {connector_name}:{topic} combination

    Concept Mapping

    This ingestion source maps the following Source System Concepts to DataHub Concepts:

    Source ConceptDataHub ConceptNotes
    "kafka-connect"Data Platform
    ConnectorDataFlow
    Kafka TopicDataset

    Current limitations

    Works only for

    • Source connectors: JDBC, Debezium, Mongo and Generic connectors with user-defined lineage graph
    • Sink connectors: BigQuery Certified

    Important Capabilities

    CapabilityStatusNotes
    Platform InstanceEnabled by default

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[kafka-connect]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "kafka-connect"
    config:
    # Coordinates
    connect_uri: "http://localhost:8083"

    # Credentials
    username: admin
    password: password

    # Optional
    # Platform instance mapping to use when constructing URNs.
    # Use if single instance of platform is referred across connectors.
    platform_instance_map:
    mysql: mysql_platform_instance

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    cluster_name
    string
    Cluster to ingest from.
    Default: connect-cluster
    connect_to_platform_map
    map(str,map)
    connect_uri
    string
    URI to connect to.
    Default: http://localhost:8083/
    convert_lineage_urns_to_lowercase
    boolean
    Whether to convert the urns of ingested lineage dataset to lowercase
    Default: False
    password
    string
    Kafka Connect password.
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    platform_instance_map
    map(str,string)
    username
    string
    Kafka Connect username.
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    connector_patterns
    AllowDenyPattern
    regex patterns for connectors to filter for ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    connector_patterns.allow
    array(string)
    connector_patterns.deny
    array(string)
    connector_patterns.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    generic_connectors
    array(object)
    generic_connectors.connector_name 
    string
    generic_connectors.source_dataset 
    string
    generic_connectors.source_platform 
    string
    provided_configs
    array(object)
    provided_configs.path_key 
    string
    provided_configs.provider 
    string
    provided_configs.value 
    string
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Advanced Configurations

    Working with Platform Instances

    If you've multiple instances of kafka OR source/sink systems that are referred in your kafka-connect setup, you'd need to configure platform instance for these systems in kafka-connect recipe to generate correct lineage edges. You must have already set platform_instance in recipes of original source/sink systems. Refer the document Working with Platform Instances to understand more about this.

    There are two options available to declare source/sink system's platform_instance in kafka-connect recipe. If single instance of platform is used across all kafka-connect connectors, you can use platform_instance_map to specify platform_instance to use for a platform when constructing URNs for lineage.

    Example:

        # Map of platform name to platform instance
    platform_instance_map:
    snowflake: snowflake_platform_instance
    mysql: mysql_platform_instance

    If multiple instances of platform are used across kafka-connect connectors, you'd need to specify platform_instance to use for platform for every connector.

    Example - Multiple MySQL Source Connectors each reading from different mysql instance

        # Map of platform name to platform instance per connector
    connect_to_platform_map:
    mysql_connector1:
    mysql: mysql_instance1

    mysql_connector2:
    mysql: mysql_instance2

    Here mysql_connector1 and mysql_connector2 are names of MySQL source connectors as defined in kafka-connect connector config.

    Example - Multiple MySQL Source Connectors each reading from difference mysql instance and writing to different kafka cluster

        connect_to_platform_map:
    mysql_connector1:
    mysql: mysql_instance1
    kafka: kafka_instance1

    mysql_connector2:
    mysql: mysql_instance2
    kafka: kafka_instance2

    You can also use combination of platform_instance_map and connect_to_platform_map in your recipe. Note that, the platform_instance specified for the connector in connect_to_platform_map will always take higher precedance even if platform_instance for same platform is set in platform_instance_map.

    If you do not use platform_instance in original source/sink recipes, you do not need to specify them in above configurations.

    Note that, you do not need to specify platform_instance for BigQuery.

    Example - Multiple BigQuery Sink Connectors each writing to different kafka cluster

        connect_to_platform_map:
    bigquery_connector1:
    kafka: kafka_instance1

    bigquery_connector2:
    kafka: kafka_instance2

    Provided Configurations from External Sources

    Kafka Connect supports pluggable configuration providers which can load configuration data from external sources at runtime. These values are not available to DataHub ingestion source through Kafka Connect APIs. If you are using such provided configurations to specify connection url (database, etc) in Kafka Connect connector configuration then you will need also add these in provided_configs section in recipe for DataHub to generate correct lineage.

        # Optional mapping of provider configurations if using
    provided_configs:
    - provider: env
    path_key: MYSQL_CONNECTION_URL
    value: jdbc:mysql://test_mysql:3306/librarydb

    Code Coordinates

    • Class Name: datahub.ingestion.source.kafka_connect.KafkaConnectSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Kafka Connect, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/kafka/index.html b/docs/generated/ingestion/sources/kafka/index.html index f011c81cb2133..1140d44db69f8 100644 --- a/docs/generated/ingestion/sources/kafka/index.html +++ b/docs/generated/ingestion/sources/kafka/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ custom implementation of the KafkaSchemaRegistryBase class, and implement the get_schema_metadata(topic, platform_urn) method that given a topic name would return object of SchemaMetadata containing schema for that topic. Please refer datahub.ingestion.source.confluent_schema_registry::ConfluentSchemaRegistry for sample implementation of this class.

    class KafkaSchemaRegistryBase(ABC):
    @abstractmethod
    def get_schema_metadata(
    self, topic: str, platform_urn: str
    ) -> Optional[SchemaMetadata]:
    pass

    The custom schema registry class can be configured using the schema_registry_class config param of the kafka source as shown below.

    source:
    type: "kafka"
    config:
    # Set the custom schema registry implementation class
    schema_registry_class: "datahub.ingestion.source.confluent_schema_registry.ConfluentSchemaRegistry"
    # Coordinates
    connection:
    bootstrap: "broker:9092"
    schema_registry_url: http://localhost:8081

    # sink configs

    Limitations of PROTOBUF schema types implementation

    The current implementation of the support for PROTOBUF schema type has the following limitations:

    • Recursive types are not supported.
    • If the schemas of different topics define a type in the same package, the source would raise an exception.

    In addition to this, maps are represented as arrays of messages. The following message,

    message MessageWithMap {
    map<int, string> map_1 = 1;
    }

    becomes:

    message Map1Entry {
    int key = 1;
    string value = 2/
    }
    message MessageWithMap {
    repeated Map1Entry map_1 = 1;
    }

    Code Coordinates

    • Class Name: datahub.ingestion.source.kafka.KafkaSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Kafka, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/ldap/index.html b/docs/generated/ingestion/sources/ldap/index.html index 8c933a6bc1cdd..20b27a2bc28ba 100644 --- a/docs/generated/ingestion/sources/ldap/index.html +++ b/docs/generated/ingestion/sources/ldap/index.html @@ -8,13 +8,13 @@ - +

    LDAP

    Certified

    This plugin extracts the following:

    • People
    • Names, emails, titles, and manager information for each person
    • List of groups

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[ldap]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "ldap"
    config:
    # Coordinates
    ldap_server: ldap://localhost

    # Credentials
    ldap_user: "cn=admin,dc=example,dc=org"
    ldap_password: "admin"

    # Options
    base_dn: "dc=example,dc=org"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    base_dn 
    string
    LDAP DN.
    ldap_password 
    string
    LDAP password.
    ldap_server 
    string
    LDAP server URL.
    ldap_user 
    string
    LDAP user.
    attrs_list
    array(string)
    custom_props_list
    array(string)
    drop_missing_first_last_name
    boolean
    If set to true, any users without first and last names will be dropped.
    Default: True
    filter
    string
    LDAP extractor filter.
    Default: (objectClass=*)
    group_attrs_map
    object
    Default: {}
    manager_filter_enabled
    boolean
    Use LDAP extractor filter to search managers.
    Default: True
    manager_pagination_enabled
    boolean
    [deprecated] Use pagination_enabled
    Default: True
    page_size
    integer
    Size of each page to fetch when extracting metadata.
    Default: 20
    pagination_enabled
    boolean
    Use pagination while do search query (enabled by default).
    Default: True
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    use_email_as_username
    boolean
    Use email for users' usernames instead of username (disabled by default). If enabled, the user and group urn would be having email as the id part of the urn.
    Default: False
    user_attrs_map
    object
    Default: {}
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.ldap.LDAPSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for LDAP, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/looker/index.html b/docs/generated/ingestion/sources/looker/index.html index d8ef426972506..0fe36376bcee7 100644 --- a/docs/generated/ingestion/sources/looker/index.html +++ b/docs/generated/ingestion/sources/looker/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ As this package doesn't officially support all the SQL dialects that Looker supports, the result might not be correct. You can, however, implement a custom parser and take it into use by setting the sql_parser configuration value. A custom SQL parser must inherit from datahub.utilities.sql_parser.SQLParser and must be made available to Datahub by ,for example, installing it. The configuration then needs to be set to module_name.ClassName of the parser.

    Multi-Project LookML (Advanced)

    Looker projects support organization as multiple git repos, with remote includes that can refer to projects that are stored in a different repo. If your Looker implementation uses multi-project setup, you can configure the LookML source to pull in metadata from your remote projects as well.

    If you are using local or remote dependencies, you will see include directives in your lookml files that look like this:

    include: "//e_flights/views/users.view.lkml"
    include: "//e_commerce/public/orders.view.lkml"

    Also, you will see projects that are being referred to listed in your manifest.lkml file. Something like this:

    project_name: this_project

    local_dependency: {
    project: "my-remote-project"
    }

    remote_dependency: ga_360_block {
    url: "https://github.com/llooker/google_ga360"
    ref: "0bbbef5d8080e88ade2747230b7ed62418437c21"
    }

    To ingest Looker repositories that are including files defined in other projects, you will need to use the project_dependencies directive within the configuration section. Consider the following scenario:

    • Your primary project refers to a remote project called my_remote_project
    • The remote project is homed in the GitHub repo my_org/my_remote_project
    • You have provisioned a GitHub deploy key and stored the credential in the environment variable (or UI secret), ${MY_REMOTE_PROJECT_DEPLOY_KEY}

    In this case, you can add this section to your recipe to activate multi-project LookML ingestion.

    source:
    type: lookml
    config:
    ... other config variables

    project_dependencies:
    my_remote_project:
    repo: my_org/my_remote_project
    deploy_key: ${MY_REMOTE_PROJECT_DEPLOY_KEY}

    Under the hood, DataHub will check out your remote repository using the provisioned deploy key, and use it to navigate includes that you have in the model files from your primary project.

    If you have the remote project checked out locally, and do not need DataHub to clone the project for you, you can provide DataHub directly with the path to the project like the config snippet below:

    source:
    type: lookml
    config:
    ... other config variables

    project_dependencies:
    my_remote_project: /path/to/local_git_clone_of_remote_project
    note

    This is not the same as ingesting the remote project as a primary Looker project because DataHub will not be processing the model files that might live in the remote project. If you want to additionally include the views accessible via the models in the remote project, create a second recipe where your remote project is the primary project.

    Code Coordinates

    • Class Name: datahub.ingestion.source.looker.lookml_source.LookMLSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Looker, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/mariadb/index.html b/docs/generated/ingestion/sources/mariadb/index.html index 4dae5ddc485d2..28961edba220d 100644 --- a/docs/generated/ingestion/sources/mariadb/index.html +++ b/docs/generated/ingestion/sources/mariadb/index.html @@ -8,13 +8,13 @@ - +

    MariaDB

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    Detect Deleted EntitiesEnabled via stateful ingestion
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[mariadb]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: mariadb
    config:
    # Coordinates
    host_port: localhost:3306
    database: dbname

    # Credentials
    username: root
    password: example

    # If you need to use SSL with MariaDB:
    # options:
    # connect_args:
    # ssl_ca: "path_to/server-ca.pem"
    # ssl_cert: "path_to/client-cert.pem"
    # ssl_key: "path_to/client-key.pem"


    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    host_port
    string
    MySQL host URL.
    Default: localhost:3306
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    scheme
    string
    Default: mysql+pymysql
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    database_pattern
    AllowDenyPattern
    Regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Deprecated in favour of database_pattern.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.mariadb.MariaDBSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for MariaDB, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/metabase/index.html b/docs/generated/ingestion/sources/metabase/index.html index 8af5ccc667193..866aeab370d37 100644 --- a/docs/generated/ingestion/sources/metabase/index.html +++ b/docs/generated/ingestion/sources/metabase/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ payload. However, the name can be overridden from database_alias_map for a given database connected to Metabase.

    If several platform instances with the same platform (e.g. from several distinct clickhouse clusters) are present in DataHub, the mapping between database id in Metabase and platform instance in DataHub may be configured with the following map:

      database_id_to_instance_map:
    "42": platform_instance_in_datahub

    The key in this map must be string, not integer although Metabase API provides id as number. If database_id_to_instance_map is not specified, platform_instance_map is used for platform instance mapping. If none of the above are specified, platform instance is not used when constructing urn when searching for dataset relations.

    Compatibility

    Metabase version v0.41.2

    Code Coordinates

    • Class Name: datahub.ingestion.source.metabase.MetabaseSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Metabase, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/mode/index.html b/docs/generated/ingestion/sources/mode/index.html index f5156000f157d..4b5e05783fd7a 100644 --- a/docs/generated/ingestion/sources/mode/index.html +++ b/docs/generated/ingestion/sources/mode/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ on PostgreSQL database.

    Report

    /api/{account}/reports/{report} endpoint is used to retrieve the following report information.

    • Title and description
    • Last edited by
    • Owner
    • Link to the Report in Mode for exploration
    • Associated charts within the report

    Chart

    /api/{workspace}/reports/{report}/queries/{query}/charts' endpoint is used to retrieve the following information.

    • Title and description
    • Last edited by
    • Owner
    • Link to the chart in Metabase
    • Datasource and lineage information from Report queries.

    The following properties for a chart are ingested in DataHub.

    Chart Information

    NameDescription
    FiltersFilters applied to the chart
    MetricsFields or columns used for aggregation
    XFields used in X-axis
    X2Fields used in second X-axis
    YFields used in Y-axis
    Y2Fields used in second Y-axis

    Table Information

    NameDescription
    ColumnsColumn names in a table
    FiltersFilters applied to the table

    Pivot Table Information

    NameDescription
    ColumnsColumn names in a table
    FiltersFilters applied to the table
    MetricsFields or columns used for aggregation
    RowsRow names in a table

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[mode]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: mode
    config:
    # Coordinates
    connect_uri: http://app.mode.com

    # Credentials
    token: token
    password: pass

    # Options
    workspace: "datahub"
    default_schema: "public"
    owner_username_instead_of_email: False
    api_options:
    retry_backoff_multiplier: 2
    max_retry_interval: 10
    max_attempts: 5

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    password 
    string(password)
    Mode password for authentication.
    token 
    string
    Mode user token.
    connect_uri
    string
    Mode host URL.
    default_schema
    string
    Default schema to use when schema is not provided in an SQL query
    Default: public
    owner_username_instead_of_email
    boolean
    Use username for owner URN instead of Email
    Default: True
    platform_instance_map
    map(str,string)
    workspace
    string
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    api_options
    ModeAPIConfig
    Retry/Wait settings for Mode API to avoid "Too many Requests" error. See Mode API Options below
    Default: {'retry_backoff_multiplier': 2, 'max_retry_interva...
    api_options.max_attempts
    integer
    Maximum number of attempts to retry before failing
    Default: 5
    api_options.max_retry_interval
    One of integer, number
    Maximum interval to wait when retrying
    Default: 10
    api_options.retry_backoff_multiplier
    One of integer, number
    Multiplier for exponential backoff when waiting to retry
    Default: 2

    See Mode's Authentication documentation on how to generate token and password.

    Code Coordinates

    • Class Name: datahub.ingestion.source.mode.ModeSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Mode, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/mongodb/index.html b/docs/generated/ingestion/sources/mongodb/index.html index 72a23039d45f5..e131c2357210b 100644 --- a/docs/generated/ingestion/sources/mongodb/index.html +++ b/docs/generated/ingestion/sources/mongodb/index.html @@ -8,14 +8,14 @@ - +

    MongoDB

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Schema MetadataEnabled by default

    This plugin extracts the following:

    • Databases and associated metadata
    • Collections in each database and schemas for each collection (via schema inference)

    By default, schema inference samples 1,000 documents from each collection. Setting schemaSamplingSize: null will scan the entire collection. Moreover, setting useRandomSampling: False will sample the first documents found without random selection, which may be faster for large collections.

    Note that schemaSamplingSize has no effect if enableSchemaInference: False is set.

    Really large schemas will be further truncated to a maximum of 300 schema fields. This is configurable using the maxSchemaSize parameter.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[mongodb]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "mongodb"
    config:
    # Coordinates
    connect_uri: "mongodb://localhost"

    # Credentials
    username: admin
    password: password
    authMechanism: "DEFAULT"

    # Options
    enableSchemaInference: True
    useRandomSampling: True
    maxSchemaSize: 300

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    authMechanism
    string
    MongoDB authentication mechanism.
    connect_uri
    string
    MongoDB connection URI.
    Default: mongodb://localhost
    enableSchemaInference
    boolean
    Whether to infer schemas.
    Default: True
    maxDocumentSize
    integer
    Default: 16793600
    maxSchemaSize
    integer
    Maximum number of fields to include in the schema.
    Default: 300
    options
    object
    Additional options to pass to pymongo.MongoClient().
    Default: {}
    password
    string
    MongoDB password.
    schemaSamplingSize
    integer
    Number of documents to use when inferring schema size. If set to 0, all documents will be scanned.
    Default: 1000
    useRandomSampling
    boolean
    If documents for schema inference should be randomly selected. If False, documents will be selected from start.
    Default: True
    username
    string
    MongoDB username.
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    collection_pattern
    AllowDenyPattern
    regex patterns for collections to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    collection_pattern.allow
    array(string)
    collection_pattern.deny
    array(string)
    collection_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    database_pattern
    AllowDenyPattern
    regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.mongodb.MongoDBSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for MongoDB, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/mssql/index.html b/docs/generated/ingestion/sources/mssql/index.html index 1e073a657309b..a4b9e0c300530 100644 --- a/docs/generated/ingestion/sources/mssql/index.html +++ b/docs/generated/ingestion/sources/mssql/index.html @@ -8,14 +8,14 @@ - +

    Microsoft SQL Server

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    DescriptionsEnabled by default
    Detect Deleted EntitiesEnabled via stateful ingestion
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default

    This plugin extracts the following:

    • Metadata for databases, schemas, views and tables
    • Column types associated with each table/view
    • Table, row, and column statistics via optional SQL profiling We have two options for the underlying library used to connect to SQL Server: (1) python-tds and (2) pyodbc. The TDS library is pure Python and hence easier to install, but only PyODBC supports encrypted connections.

    Prerequisites

    If you want to ingest MSSQL Jobs and stored procedures (with code) the user credentials needs the proper privileges.

    Script for granting the privileges:

    USE MSDB
    GRANT SELECT ON OBJECT::msdb.dbo.sysjobsteps TO 'USERNAME'
    GRANT SELECT ON OBJECT::msdb.dbo.sysjobs TO 'USERNAME'

    USE 'DATA_DB_NAME'
    GRANT VIEW DEFINITION TO 'USERNAME'
    GRANT SELECT ON OBJECT::sys.sql_expression_dependencies TO 'USERNAME'

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[mssql]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: mssql
    config:
    # Coordinates
    host_port: localhost:1433
    database: DemoDatabase

    # Credentials
    username: user
    password: pass

    sink:
    # sink configs

    #------------------------------------------------------------------------
    #Example: using ingestion with ODBC and encryption
    #This requires you to have already installed the Microsoft ODBC Driver for SQL Server.
    #See https://docs.microsoft.com/en-us/sql/connect/python/pyodbc/step-1-configure-development-environment-for-pyodbc-python-development?view=sql-server-ver15
    # ------------------------------------------------------------------------

    source:
    type: mssql
    config:
    # Coordinates
    host_port: localhost:1433
    database: DemoDatabase

    # Credentials
    username: admin
    password: password

    # Options
    use_odbc: "True"
    uri_args:
    driver: "ODBC Driver 17 for SQL Server"
    Encrypt: "yes"
    TrustServerCertificate: "Yes"
    ssl: "True"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    convert_urns_to_lowercase
    boolean
    Enable to convert the SQL Server assets urns to lowercase
    Default: False
    database
    string
    database (catalog). If set to Null, all databases will be considered for ingestion.
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    host_port
    string
    MSSQL host URL.
    Default: localhost:1433
    include_descriptions
    boolean
    Include table descriptions information.
    Default: True
    include_jobs
    boolean
    Include ingest of MSSQL Jobs. Requires access to the 'msdb' and 'sys' schema.
    Default: True
    include_stored_procedures
    boolean
    Include ingest of stored procedures. Requires access to the 'sys' schema.
    Default: True
    include_stored_procedures_code
    boolean
    Include information about object code.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    uri_args
    map(str,string)
    use_odbc
    boolean
    See https://docs.sqlalchemy.org/en/14/dialects/mssql.html#module-sqlalchemy.dialects.mssql.pyodbc.
    Default: False
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    database_pattern
    AllowDenyPattern
    Regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.mssql.source.SQLServerSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Microsoft SQL Server, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/mysql/index.html b/docs/generated/ingestion/sources/mysql/index.html index 15f9735a880a0..12dfc78b401e4 100644 --- a/docs/generated/ingestion/sources/mysql/index.html +++ b/docs/generated/ingestion/sources/mysql/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    MySQL

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    Detect Deleted EntitiesEnabled via stateful ingestion
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default

    This plugin extracts the following:

    Metadata for databases, schemas, and tables Column types and schema associated with each table Table, row, and column statistics via optional SQL profiling

    Prerequisites

    In order to execute this source the user credentials needs the following privileges

    • grant select on DATABASE.* to 'USERNAME'@'%'
    • grant show view on DATABASE.* to 'USERNAME'@'%'

    select is required to see the table structure as well as for profiling.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[mysql]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: mysql
    config:
    # Coordinates
    host_port: localhost:3306
    database: dbname

    # Credentials
    username: root
    password: example

    # If you need to use SSL with MySQL:
    # options:
    # connect_args:
    # ssl_ca: "path_to/server-ca.pem"
    # ssl_cert: "path_to/client-cert.pem"
    # ssl_key: "path_to/client-key.pem"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    host_port
    string
    MySQL host URL.
    Default: localhost:3306
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    scheme
    string
    Default: mysql+pymysql
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    database_pattern
    AllowDenyPattern
    Regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Deprecated in favour of database_pattern.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.mysql.MySQLSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for MySQL, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/nifi/index.html b/docs/generated/ingestion/sources/nifi/index.html index 37efc45720f88..e39ece02042f4 100644 --- a/docs/generated/ingestion/sources/nifi/index.html +++ b/docs/generated/ingestion/sources/nifi/index.html @@ -8,13 +8,13 @@ - +

    NiFi

    Certified

    This plugin extracts the following:

    • NiFi flow as DataFlow entity
    • Ingress, egress processors, remote input and output ports as DataJob entity
    • Input and output ports receiving remote connections as Dataset entity
    • Lineage information between external datasets and ingress/egress processors by analyzing provenance events

    Current limitations:

    • Limited ingress/egress processors are supported
      • S3: ListS3, FetchS3Object, PutS3Object
      • SFTP: ListSFTP, FetchSFTP, GetSFTP, PutSFTP

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[nifi]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "nifi"
    config:
    # Coordinates
    site_url: "https://localhost:8443/nifi/"

    # Credentials
    auth: SINGLE_USER
    username: admin
    password: password

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    site_url 
    string
    URL for Nifi, ending with /nifi/. e.g. https://mynifi.domain/nifi/
    auth
    Enum
    Nifi authentication. must be one of : NO_AUTH, SINGLE_USER, CLIENT_CERT, KERBEROS
    Default: NO_AUTH
    ca_file
    One of boolean, string
    Path to PEM file containing certs for the root CA(s) for the NiFi
    client_cert_file
    string
    Path to PEM file containing the public certificates for the user/client identity, must be set for auth = "CLIENT_CERT"
    client_key_file
    string
    Path to PEM file containing the client’s secret key
    client_key_password
    string
    The password to decrypt the client_key_file
    password
    string
    Nifi password, must be set for auth = "SINGLE_USER"
    provenance_days
    integer
    time window to analyze provenance events for external datasets
    Default: 7
    site_name
    string
    Site name to identify this site with, useful when using input and output ports receiving remote connections
    Default: default
    site_url_to_site_name
    map(str,string)
    username
    string
    Nifi username, must be set for auth = "SINGLE_USER"
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    process_group_pattern
    AllowDenyPattern
    regex patterns for filtering process groups
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    process_group_pattern.allow
    array(string)
    process_group_pattern.deny
    array(string)
    process_group_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True

    Authentication

    This connector supports following authentication mechanisms

    Single User Authentication (auth: SINGLE_USER)

    Connector will pass this username and password as used on Nifi Login Page over /access/token REST endpoint. This mode also works when Kerberos login identity provider is set up for Nifi.

    Client Certificates Authentication (auth: CLIENT_CERT)

    Connector will use client_cert_file(required) and client_key_file(optional), client_key_password(optional) for mutual TLS authentication.

    Kerberos Authentication via SPNEGO (auth: Kerberos)

    If nifi has been configured to use Kerberos SPNEGO, connector will pass user’s Kerberos ticket to nifi over /access/kerberos REST endpoint. It is assumed that user's Kerberos ticket is already present on the machine on which ingestion runs. This is usually done by installing krb5-user and then running kinit for user.

    sudo apt install krb5-user
    kinit user@REALM

    Basic Authentication (auth: BASIC_AUTH)

    Connector will use HTTPBasicAuth with username and password.

    No Authentication (auth: NO_AUTH)

    This is useful for testing purposes.

    Access Policies

    This connector requires following access policies to be set in Nifi for ingestion user.

    Global Access Policies

    PolicyPrivilegeResourceAction
    view the UIAllows users to view the UI/flowR
    query provenanceAllows users to submit a Provenance Search and request Event Lineage/provenanceR

    Component level Access Policies (required to be set on root process group)

    PolicyPrivilegeResourceAction
    view the componentAllows users to view component configuration details/<component-type>/<component-UUID>R
    view the dataAllows users to view metadata and content for this component in flowfile queues in outbound connections and through provenance events/data/<component-type>/<component-UUID>R
    view provenanceAllows users to view provenance events generated by this component/provenance-data/<component-type>/<component-UUID>R

    Code Coordinates

    • Class Name: datahub.ingestion.source.nifi.NifiSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for NiFi, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/okta/index.html b/docs/generated/ingestion/sources/okta/index.html index 79efcea939fbf..70674c7cdb138 100644 --- a/docs/generated/ingestion/sources/okta/index.html +++ b/docs/generated/ingestion/sources/okta/index.html @@ -8,7 +8,7 @@ - + @@ -29,7 +29,7 @@ users, set either the okta_users_filter or okta_users_search flag (only one can be set at a time). For groups, set either the okta_groups_filter or okta_groups_search flag. Note that these are not regular expressions. See below for full configuration options.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[okta]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: okta
    config:
    # Coordinates
    okta_domain: "dev-35531955.okta.com"

    # Credentials
    okta_api_token: "11be4R_M2MzDqXawbTHfKGpKee0kuEOfX1RCQSRx99"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    okta_api_token 
    string
    An API token generated for the DataHub application inside your Okta Developer Console. e.g. 00be4R_M2MzDqXawbWgfKGpKee0kuEOfX1RCQSRx00
    okta_domain 
    string
    The location of your Okta Domain, without a protocol. Can be found in Okta Developer console. e.g. dev-33231928.okta.com
    delay_seconds
    One of number, integer
    Number of seconds to wait between calls to Okta's REST APIs. (Okta rate limits). Defaults to 10ms.
    Default: 0.01
    include_deprovisioned_users
    boolean
    Whether to ingest users in the DEPROVISIONED state from Okta.
    Default: False
    include_suspended_users
    boolean
    Whether to ingest users in the SUSPENDED state from Okta.
    Default: False
    ingest_group_membership
    boolean
    Whether group membership should be ingested into DataHub. ingest_groups must be True if this is True.
    Default: True
    ingest_groups
    boolean
    Whether groups should be ingested into DataHub.
    Default: True
    ingest_users
    boolean
    Whether users should be ingested into DataHub.
    Default: True
    mask_group_id
    boolean
    Default: True
    mask_user_id
    boolean
    Default: True
    okta_groups_filter
    string
    Okta filter expression (not regex) for ingesting groups. Only one of okta_groups_filter and okta_groups_search can be set. See (https://developer.okta.com/docs/reference/api/groups/#filters) for more info.
    okta_groups_search
    string
    Okta search expression (not regex) for ingesting groups. Only one of okta_groups_filter and okta_groups_search can be set. See (https://developer.okta.com/docs/reference/api/groups/#list-groups-with-search) for more info.
    okta_profile_to_group_name_attr
    string
    Which Okta Group Profile attribute to use as input to DataHub group name mapping.
    Default: name
    okta_profile_to_group_name_regex
    string
    A regex used to parse the DataHub group name from the attribute specified in okta_profile_to_group_name_attr.
    Default: (.*)
    okta_profile_to_username_attr
    string
    Which Okta User Profile attribute to use as input to DataHub username mapping. Common values used are - login, email.
    Default: email
    okta_profile_to_username_regex
    string
    A regex used to parse the DataHub username from the attribute specified in okta_profile_to_username_attr.
    Default: (.*)
    okta_users_filter
    string
    Okta filter expression (not regex) for ingesting users. Only one of okta_users_filter and okta_users_search can be set. See (https://developer.okta.com/docs/reference/api/users/#list-users-with-a-filter) for more info.
    okta_users_search
    string
    Okta search expression (not regex) for ingesting users. Only one of okta_users_filter and okta_users_search can be set. See (https://developer.okta.com/docs/reference/api/users/#list-users-with-search) for more info.
    page_size
    integer
    The number of entities requested from Okta's REST APIs in one request.
    Default: 100
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Okta Stateful Ingestion Config.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    As a prerequisite, you should create a DataHub Application within the Okta Developer Console with full permissions to read your organization's Users and Groups.

    Compatibility

    Validated against Okta API Versions:

    • 2021.07.2

      Validated against load:

    • User Count: 1000

    • Group Count: 100

    • Group Membership Edges: 1000 (1 per User)

    • Run Time (Wall Clock): 2min 7sec

    Code Coordinates

    • Class Name: datahub.ingestion.source.identity.okta.OktaSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Okta, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/openapi/index.html b/docs/generated/ingestion/sources/openapi/index.html index db96e4ee2fbbc..76b952bbbf5cf 100644 --- a/docs/generated/ingestion/sources/openapi/index.html +++ b/docs/generated/ingestion/sources/openapi/index.html @@ -8,7 +8,7 @@ - + @@ -28,7 +28,7 @@ unable to guess the URL. In such cases you can still manually specify it in the forced_examples part of the configuration file.

    As example, if in your swagger file you have

    paths:
    /accounts/groupname/{name}/:
    get:
    tags: [ "Groups" ]
    operationID: GetGroup
    description: Retrieve group data
    responses:
    '200':
    description: Return details about the group

    and the plugin did not find an example in its previous calls, the tool has no idea about what to substitute for the {name} part.

    By specifying in the configuration file

        forced_examples:  # optionals
    /accounts/groupname/{name}: ['test']

    the plugin is able to build a correct URL, as follows:

    https://test_endpoint.com/accounts/groupname/test

    Code Coordinates

    • Class Name: datahub.ingestion.source.openapi.OpenApiSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for OpenAPI, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/oracle/index.html b/docs/generated/ingestion/sources/oracle/index.html index 2b2be55bb1748..e8069e9d4e9c6 100644 --- a/docs/generated/ingestion/sources/oracle/index.html +++ b/docs/generated/ingestion/sources/oracle/index.html @@ -8,13 +8,13 @@ - +

    Oracle

    Certified

    Important Capabilities

    CapabilityStatusNotes
    DomainsEnabled by default

    This plugin extracts the following:

    • Metadata for databases, schemas, and tables
    • Column types associated with each table
    • Table, row, and column statistics via optional SQL profiling

    Using the Oracle source requires that you've also installed the correct drivers; see the cx_Oracle docs. The easiest one is the Oracle Instant Client.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[oracle]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: oracle
    config:
    # Coordinates
    host_port: localhost:5432
    database: dbname

    # Credentials
    username: user
    password: pass

    # Options
    service_name: svc # omit database if using this option

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    host_port 
    string
    host URL
    add_database_name_to_urn
    boolean
    Add oracle database name to urn, default urn is schema.table
    Default: False
    database
    string
    If using, omit service_name.
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    scheme
    string
    Will be set automatically to default value.
    Default: oracle+cx_oracle
    service_name
    string
    Oracle service name. If using, omit database.
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    As a SQL-based service, the Oracle integration is also supported by our SQL profiler. See here for more details on configuration.

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.oracle.OracleSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Oracle, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/postgres/index.html b/docs/generated/ingestion/sources/postgres/index.html index 24ed9ad1581fe..c203413e254e1 100644 --- a/docs/generated/ingestion/sources/postgres/index.html +++ b/docs/generated/ingestion/sources/postgres/index.html @@ -8,13 +8,13 @@ - +

    Postgres

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    DomainsEnabled by default
    Platform InstanceEnabled by default
    Table-Level LineageOptionally enabled via configuration

    This plugin extracts the following:

    • Metadata for databases, schemas, views, and tables
    • Column types associated with each table
    • Also supports PostGIS extensions
    • database_alias (optional) can be used to change the name of database to be ingested
    • Table, row, and column statistics via optional SQL profiling

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[postgres]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: postgres
    config:
    # Coordinates
    host_port: localhost:5432
    database: DemoDatabase

    # Credentials
    username: user
    password: pass

    # Options
    database_alias: DatabaseNameToBeIngested

    # Optional: SSL configuration.
    # options:
    # connect_args:
    # sslcert: "<<path to sslcert>>"
    # sslkey: "<<path to sslkey>>"
    # sslrootcert: "<<path to verification ca chain>>"
    # sslmode: "verify-full"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    host_port 
    string
    host URL
    database
    string
    database (catalog). If set to Null, all databases will be considered for ingestion.
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_view_lineage
    boolean
    Include table lineage for views
    Default: False
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    initial_database
    string
    Initial database used to query for the list of databases, when ingesting multiple databases. Note: this is not used if database or sqlalchemy_uri are provided.
    Default: postgres
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    scheme
    string
    database scheme
    Default: postgresql+psycopg2
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    database_pattern
    AllowDenyPattern
    Regex patterns for databases to filter in ingestion. Note: this is not used if database or sqlalchemy_uri are provided.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Default: {'allow': ['.*'], 'deny': ['information_schema'], ...
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.postgres.PostgresSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Postgres, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/powerbi/index.html b/docs/generated/ingestion/sources/powerbi/index.html index 2e5d8d4e61281..b94fb6019814e 100644 --- a/docs/generated/ingestion/sources/powerbi/index.html +++ b/docs/generated/ingestion/sources/powerbi/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ It extracts the following:

    Metadata that can be ingested:

    • report name
    • report description
    • ownership(can add existing users in DataHub as owners)
    • transfer folders structure to DataHub as it is in Report Server
    • webUrl to report in Report Server

    Due to limits of PBIRS REST API, it's impossible to ingest next data for now:

    • tiles info
    • datasource of report
    • dataset of report

    Next types of report can be ingested:

    • PowerBI report(.pbix)
    • Paginated report(.rdl)
    • Linked report Read more...

    Module powerbi

    Certified

    Important Capabilities

    CapabilityStatusNotes
    DescriptionsEnabled by default
    Extract OwnershipDisabled by default, configured using extract_ownership
    Platform InstanceEnabled by default

    This plugin extracts the following:

    • Power BI dashboards, tiles and datasets
    • Names, descriptions and URLs of dashboard and tile
    • Owners of dashboards

    Configuration Notes

    1. Refer Microsoft AD App Creation doc to create a Microsoft AD Application. Once Microsoft AD Application is created you can configure client-credential i.e. client_id and client_secret in recipe for ingestion.

    2. Enable admin access only if you want to ingest dataset, lineage and endorsement tags. Refer section Admin Ingestion vs. Basic Ingestion for more detail.

      Login to PowerBI as Admin and from Admin API settings allow below permissions

      • Allow service principals to use read-only admin APIs
      • Enhance admin APIs responses with detailed metadata
      • Enhance admin APIs responses with DAX and mashup expressions

    Concept mapping

    PowerBIDatahub
    DashboardDashboard
    Dataset's TableDataset
    TileChart
    Report.webUrlChart.externalUrl
    WorkspaceContainer
    ReportDashboard
    PageChart

    If Tile is created from report then Chart.externalUrl is set to Report.webUrl.

    Lineage

    This source extract table lineage for tables present in PowerBI Datasets. Lets consider a PowerBI Dataset SALES_REPORT and a PostgreSQL database is configured as data-source in SALES_REPORT dataset.

    Consider SALES_REPORT PowerBI Dataset has a table SALES_ANALYSIS which is backed by SALES_ANALYSIS_VIEW of PostgreSQL Database then in this case SALES_ANALYSIS_VIEW will appear as upstream dataset for SALES_ANALYSIS table.

    You can control table lineage ingestion using extract_lineage configuration parameter, by default it is set to true.

    PowerBI Source extracts the lineage information by parsing PowerBI M-Query expression.

    PowerBI Source supports M-Query expression for below listed PowerBI Data Sources

    1. Snowflake
    2. Oracle
    3. PostgreSQL
    4. Microsoft SQL Server
    5. Google BigQuery

    Native SQL query parsing is supported for Snowflake and Amazon Redshift data-sources and only first table from FROM clause will be ingested as upstream table. Advance SQL construct like JOIN and SUB-QUERIES in FROM clause are not supported.

    For example refer below native SQL query. The table OPERATIONS_ANALYTICS.TRANSFORMED_PROD.V_UNIT_TARGET will be ingested as upstream table.

    let
    Source = Value.NativeQuery(
    Snowflake.Databases(
    "sdfsd788.ws-east-2.fakecomputing.com",
    "operations_analytics_prod",
    [Role = "OPERATIONS_ANALYTICS_MEMBER"]
    ){[Name = "OPERATIONS_ANALYTICS"]}[Data],
    "select #(lf)UPPER(REPLACE(AGENT_NAME,\'-\',\'\')) AS Agent,#(lf)TIER,#(lf)UPPER(MANAGER),#(lf)TEAM_TYPE,#(lf)DATE_TARGET,#(lf)MONTHID,#(lf)TARGET_TEAM,#(lf)SELLER_EMAIL,#(lf)concat((UPPER(REPLACE(AGENT_NAME,\'-\',\'\'))), MONTHID) as AGENT_KEY,#(lf)UNIT_TARGET AS SME_Quota,#(lf)AMV_TARGET AS Revenue_Quota,#(lf)SERVICE_QUOTA,#(lf)BL_TARGET,#(lf)SOFTWARE_QUOTA as Software_Quota#(lf)#(lf)from OPERATIONS_ANALYTICS.TRANSFORMED_PROD.V_UNIT_TARGETS#(lf)#(lf)where YEAR_TARGET >= 2020#(lf)and TEAM_TYPE = \'foo\'#(lf)and TARGET_TEAM = \'bar\'",
    null,
    [EnableFolding = true]
    ),
    #"Added Conditional Column" = Table.AddColumn(
    Source,
    "Has PS Software Quota?",
    each
    if [TIER] = "Expansion (Medium)" then
    "Yes"
    else if [TIER] = "Acquisition" then
    "Yes"
    else
    "No"
    )
    in
    #"Added Conditional Column"

    Use full-table-name in from clause. For example dev.public.category

    M-Query Pattern Supported For Lineage Extraction

    Lets consider a M-Query which combine two PostgreSQL tables. Such M-Query can be written as per below patterns.

    Pattern-1

    let
    Source = PostgreSQL.Database("localhost", "book_store"),
    book_date = Source{[Schema="public",Item="book"]}[Data],
    issue_history = Source{[Schema="public",Item="issue_history"]}[Data],
    combine_result = Table.Combine({book_date, issue_history})
    in
    combine_result

    Pattern-2

    let
    Source = PostgreSQL.Database("localhost", "book_store"),
    combine_result = Table.Combine({Source{[Schema="public",Item="book"]}[Data], Source{[Schema="public",Item="issue_history"]}[Data]})
    in
    combine_result

    Pattern-2 is not supported for upstream table lineage extraction as it uses nested item-selector i.e. {Source{[Schema="public",Item="book"]}[Data], Source{[Schema="public",Item="issue_history"]}[Data]} as argument to M-QUery table function i.e. Table.Combine

    Pattern-1 is supported as it first assign the table from schema to variable and then variable is used in M-Query Table function i.e. Table.Combine

    Extract endorsements to tags

    By default, extracting endorsement information to tags is disabled. The feature may be useful if organization uses endorsements to identify content quality.

    Please note that the default implementation overwrites tags for the ingested entities, if you need to preserve existing tags, consider using a transformer with semantics: PATCH tags instead of OVERWRITE.

    Admin Ingestion vs. Basic Ingestion

    PowerBI provides two sets of API i.e. Basic API and Admin API.

    The Basic API returns metadata of PowerBI resources where service principal has granted access explicitly on resources whereas Admin API returns metadata of all PowerBI resources irrespective of whether service principal has granted or doesn't granted access explicitly on resources.

    The Admin Ingestion (explain below) is the recommended way to execute PowerBI ingestion as this ingestion can extract most of the metadata.

    Admin Ingestion: Service Principal As Admin in Tenant Setting and Added as Member In Workspace

    To grant admin access to the service principal, visit your PowerBI tenant Settings.

    If you have added service principal as member in workspace and also allowed below permissions from PowerBI tenant Settings

    • Allow service principal to use read-only PowerBI Admin APIs
    • Enhance admin APIs responses with detailed metadata
    • Enhance admin APIs responses with DAX and mashup expressions

    PowerBI Source would be able to ingest below listed metadata of that particular workspace

    • Lineage
    • PowerBI Dataset
    • Endorsement as tag
    • Dashboards
    • Reports
    • Dashboard's Tiles
    • Report's Pages

    If you don't want to add a service principal as a member in your workspace, then you can enable the admin_apis_only: true in recipe to use PowerBI Admin API only.

    Caveats of setting admin_apis_only to true:

    • Report's pages would not get ingested as page API is not available in PowerBI Admin API
    • PowerBI Parameters would not get resolved to actual values while processing M-Query for table lineage

    Basic Ingestion: Service Principal As Member In Workspace

    If you have added service principal as member in workspace then PowerBI Source would be able ingest below metadata of that particular workspace

    • Dashboards
    • Reports
    • Dashboard's Tiles
    • Report's Pages

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[powerbi]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "powerbi"
    config:
    # Your Power BI tenant identifier
    tenant_id: a949d688-67c0-4bf1-a344-e939411c6c0a

    # Azure AD Application identifier
    client_id: foo
    # Azure AD App client secret
    client_secret: bar

    # Ingest elements of below PowerBi Workspace into Datahub
    workspace_id_pattern:
    allow:
    - 4bd10256-e999-45dd-8e56-571c77153a5f
    deny:

    # Enable / Disable ingestion of ownership information for dashboards
    extract_ownership: true

    # Enable/Disable extracting workspace information to DataHub containers
    extract_workspaces_to_containers: true

    # Enable / Disable ingestion of endorsements.
    # Please notice that this may overwrite any existing tags defined to ingested entities!
    extract_endorsements_to_tags: false

    # Optional -- This mapping is optional and only required to configure platform-instance for upstream tables
    # A mapping of PowerBI datasource's server i.e host[:port] to data platform instance.
    # :port is optional and only needed if your datasource server is running on non-standard port.
    # For Google BigQuery the datasource's server is google bigquery project name
    server_to_platform_instance:
    ap-south-1.snowflakecomputing.com:
    platform_instance: operational_instance
    env: DEV
    oracle-server:1920:
    platform_instance: high_performance_production_unit
    env: PROD
    big-query-sales-project:
    platform_instance: sn-2
    env: QA

    # Need admin_api, only ingest workspace that are modified since...
    modified_since: "2023-02-10T00:00:00.0000000Z"

    ownership:
    # create powerbi user as datahub corpuser, false will still extract ownership of workspace/ dashboards
    create_corp_user: false
    # use email to build user urn instead of powerbi user identifier
    use_powerbi_email: true
    # remove email suffix like @acryl.io
    remove_email_suffix: true
    # only ingest user with certain authority
    owner_criteria: ["ReadWriteReshareExplore","Owner","Admin"]
    # wrap powerbi tables (datahub dataset) under 1 powerbi dataset (datahub container)
    extract_datasets_to_containers: true
    # only ingest dataset that are endorsed, like "Certified"
    filter_dataset_endorsements:
    allow:
    - Certified

    # extract powerbi dashboards and tiles
    extract_dashboards: false
    # extract powerbi dataset table schema
    extract_dataset_schema: true


    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    client_id 
    string
    Azure app client identifier
    client_secret 
    string
    Azure app client secret
    tenant_id 
    string
    PowerBI tenant identifier
    admin_apis_only
    boolean
    Retrieve metadata using PowerBI Admin API only. If this is enabled, then Report Pages will not be extracted. Admin API access is required if this setting is enabled
    Default: False
    convert_lineage_urns_to_lowercase
    boolean
    Whether to convert the urns of ingested lineage dataset to lowercase
    Default: True
    convert_urns_to_lowercase
    boolean
    Whether to convert the PowerBI assets urns to lowercase
    Default: False
    enable_advance_lineage_sql_construct
    boolean
    Whether to enable advance native sql construct for parsing like join, sub-queries. along this flag , the native_query_parsing should be enabled. By default convert_lineage_urns_to_lowercase is enabled, in-case if you have disabled it in previous ingestion execution then it may break lineage as this option generates the upstream datasets URN in lowercase.
    Default: False
    extract_dashboards
    boolean
    Whether to ingest PBI Dashboard and Tiles as Datahub Dashboard and Chart
    Default: True
    extract_dataset_schema
    boolean
    Whether to ingest PBI Dataset Table columns and measures
    Default: False
    extract_datasets_to_containers
    boolean
    PBI tables will be grouped under a Datahub Container, the container reflect a PBI Dataset
    Default: False
    extract_endorsements_to_tags
    boolean
    Whether to extract endorsements to tags, note that this may overwrite existing tags. Admin API access is required is this setting is enabled
    Default: False
    extract_independent_datasets
    boolean
    Whether to extract datasets not used in any PowerBI visualization
    Default: False
    extract_lineage
    boolean
    Whether lineage should be ingested between X and Y. Admin API access is required if this setting is enabled
    Default: True
    extract_ownership
    boolean
    Whether ownership should be ingested. Admin API access is required if this setting is enabled. Note that enabling this may overwrite owners that you've added inside DataHub's web application.
    Default: False
    extract_reports
    boolean
    Whether reports should be ingested
    Default: True
    extract_workspaces_to_containers
    boolean
    Extract workspaces to DataHub containers
    Default: True
    modified_since
    string
    Get only recently modified workspaces based on modified_since datetime '2023-02-10T00:00:00.0000000Z', excludePersonalWorkspaces and excludeInActiveWorkspaces limit to last 30 days
    native_query_parsing
    boolean
    Whether PowerBI native query should be parsed to extract lineage
    Default: True
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    scan_batch_size
    integer
    batch size for sending workspace_ids to PBI, 100 is the limit
    Default: 1
    scan_timeout
    integer
    timeout for PowerBI metadata scanning
    Default: 60
    workspace_id_as_urn_part
    boolean
    Highly recommend changing this to True, as you can have the same workspace nameTo maintain backward compatability, this is set to False which uses workspace name
    Default: False
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    filter_dataset_endorsements
    AllowDenyPattern
    Filter and ingest datasets which are 'Certified' or 'Promoted' endorsement. If both are added, dataset which are 'Certified' or 'Promoted' will be ingested . Default setting allows all dataset to be ingested
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    filter_dataset_endorsements.allow
    array(string)
    filter_dataset_endorsements.deny
    array(string)
    filter_dataset_endorsements.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    ownership
    OwnershipMapping
    Configure how is ownership ingested
    Default: {'create_corp_user': True, 'use_powerbi_email': Fa...
    ownership.create_corp_user
    boolean
    Whether ingest PowerBI user as Datahub Corpuser
    Default: True
    ownership.dataset_configured_by_as_owner
    boolean
    Take PBI dataset configuredBy as dataset owner if exist
    Default: False
    ownership.owner_criteria
    array(string)
    ownership.remove_email_suffix
    boolean
    Remove PowerBI User email suffix for example, @acryl.io
    Default: False
    ownership.use_powerbi_email
    boolean
    Use PowerBI User email to ingest as corpuser, default is powerbi user identifier
    Default: False
    server_to_platform_instance
    map(str,PlatformDetail)
    server_to_platform_instance.key.platform_instance
    string
    DataHub platform instance name. To generate correct urn for upstream dataset, this should match with platform instance name used in ingestion recipe of other datahub sources.
    server_to_platform_instance.key.env
    string
    The environment that all assets produced by DataHub platform ingestion source belong to
    Default: PROD
    workspace_id_pattern
    AllowDenyPattern
    Regex patterns to filter PowerBI workspaces in ingestion
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    workspace_id_pattern.allow
    array(string)
    workspace_id_pattern.deny
    array(string)
    workspace_id_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    PowerBI Stateful Ingestion Config.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.powerbi.powerbi.PowerBiDashboardSource
    • Browse on GitHub

    Module powerbi-report-server

    Incubating

    Important Capabilities

    CapabilityStatusNotes
    Extract OwnershipEnabled by default

    Use this plugin to connect to PowerBI Report Server. It extracts the following:

    Metadata that can be ingested:

    • report name
    • report description
    • ownership(can add existing users in DataHub as owners)
    • transfer folders structure to DataHub as it is in Report Server
    • webUrl to report in Report Server

    Due to limits of PBIRS REST API, it's impossible to ingest next data for now:

    • tiles info
    • datasource of report
    • dataset of report

    Next types of report can be ingested:

    • PowerBI report(.pbix)
    • Paginated report(.rdl)
    • Linked report

    Configuration Notes

    See the

    1. Microsoft Grant user access to a Report Server doc
    2. Use your user credentials from previous step in yaml file

    Concept mapping

    Power BI Report ServerDatahub
    Paginated ReportDashboard
    Power BI ReportDashboard
    Mobile ReportDashboard
    Linked ReportDashboard
    Dataset, DatasourceN/A

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[powerbi-report-server]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: powerbi-report-server
    config:
    # Your Power BI Report Server Windows username
    username: username
    # Your Power BI Report Server Windows password
    password: password
    # Your Workstation name
    workstation_name: workstation_name
    # Your Power BI Report Server host URL, example: localhost:80
    host_port: host_port
    # Your alias for Power BI Report Server host URL, example: local_powerbi_report_server
    server_alias: server_alias
    # Workspace's dataset environments, example: (PROD, DEV, QA, STAGE)
    env: DEV
    # Your Power BI Report Server base virtual directory name for reports
    report_virtual_directory_name: Reports
    # Your Power BI Report Server base virtual directory name for report server
    report_server_virtual_directory_name: ReportServer
    # Enable/Disable extracting ownership information of Dashboard
    extract_ownership: True
    # Set ownership type
    ownership_type: TECHNICAL_OWNER


    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    host_port 
    string
    Power BI Report Server host URL
    password 
    string
    Windows account password
    report_server_virtual_directory_name 
    string
    Report Server Virtual Directory URL name
    report_virtual_directory_name 
    string
    Report Virtual Directory URL name
    username 
    string
    Windows account username
    extract_ownership
    boolean
    Whether ownership should be ingested
    Default: True
    graphql_url
    string
    [deprecated] Not used
    ownership_type
    string
    Ownership type of owner
    Default: NONE
    platform_name
    string
    Default: powerbi
    platform_urn
    string
    Default: urn:li:dataPlatform:powerbi
    server_alias
    string
    Alias for Power BI Report Server host URL
    Default:
    workstation_name
    string
    Workstation name
    Default: localhost
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    chart_pattern
    AllowDenyPattern
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    chart_pattern.allow
    array(string)
    chart_pattern.deny
    array(string)
    chart_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    report_pattern
    AllowDenyPattern
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    report_pattern.allow
    array(string)
    report_pattern.deny
    array(string)
    report_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.powerbi_report_server.report_server.PowerBiReportServerDashboardSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for PowerBI, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/presto-on-hive/index.html b/docs/generated/ingestion/sources/presto-on-hive/index.html index 84a9cef3dffc2..762f18f3ec1a8 100644 --- a/docs/generated/ingestion/sources/presto-on-hive/index.html +++ b/docs/generated/ingestion/sources/presto-on-hive/index.html @@ -8,13 +8,13 @@ - +

    Presto on Hive

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    Detect Deleted EntitiesEnabled via stateful ingestion

    This plugin extracts the following:

    • Metadata for Presto views and Hive tables (external / managed)
    • Column types associated with each table / view
    • Detailed table / view property info

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[presto-on-hive]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: presto-on-hive
    config:
    # Hive metastore DB connection
    host_port: localhost:5432
    database: metastore

    # specify the schema where metastore tables reside
    schema_pattern:
    allow:
    - "^public"

    # credentials
    username: user # optional
    password: pass # optional

    #scheme: 'postgresql+psycopg2' # set this if metastore db is using postgres
    #scheme: 'mysql+pymysql' # set this if metastore db is using mysql, default if unset

    # set this to have advanced filters on what to ingest
    #views_where_clause_suffix: AND d."name" in ('db1')
    #tables_where_clause_suffix: AND d."name" in ('db1')

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    enable_properties_merge
    boolean
    By default, the connector overwrites properties every time. Set this to True to enable merging of properties with what exists on the server.
    Default: False
    host_port
    string
    Host URL and port to connect to. Example: localhost:3306
    Default: localhost:3306
    include_catalog_name_in_ids
    boolean
    Add the Presto catalog name (e.g. hive) to the generated dataset urns. urn:li:dataset:(urn:li:dataPlatform:hive,hive.user.logging_events,PROD) versus urn:li:dataset:(urn:li:dataPlatform:hive,user.logging_events,PROD)
    Default: False
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    ingestion_job_id
    string
    Default:
    metastore_db_name
    string
    Name of the Hive metastore's database (usually: metastore). For backward compatibility, if this field is not provided, the database field will be used. If both the 'database' and 'metastore_db_name' fields are set then the 'database' field will be used to filter the hive/presto/trino database
    mode
    Enum
    The ingested data will be stored under this platform. Valid options: ['hive', 'presto', 'presto-on-hive', 'trino']
    Default: presto-on-hive
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    schemas_where_clause_suffix
    string
    Where clause to specify what Hive schemas should be ingested.
    Default:
    simplify_nested_field_paths
    boolean
    Simplify v2 field paths to v1 by default. If the schema has Union or Array types, still falls back to v2
    Default: False
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    tables_where_clause_suffix
    string
    Where clause to specify what Hive tables should be ingested.
    Default:
    use_catalog_subtype
    boolean
    Container Subtype name to be 'Database' or 'Catalog' Valid options: ['True', 'False']
    Default: True
    use_dataset_pascalcase_subtype
    boolean
    Dataset Subtype name to be 'Table' or 'View' Valid options: ['True', 'False']
    Default: False
    username
    string
    username
    views_where_clause_suffix
    string
    Where clause to specify what Presto views should be ingested.
    Default:
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    database_pattern
    AllowDenyPattern
    Regex patterns for hive/presto database to filter in ingestion. Specify regex to only match the database name. e.g. to match all tables in database analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.presto_on_hive.PrestoOnHiveSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Presto on Hive, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/presto/index.html b/docs/generated/ingestion/sources/presto/index.html index c140e16c2e5cd..0a8dfcf6abd36 100644 --- a/docs/generated/ingestion/sources/presto/index.html +++ b/docs/generated/ingestion/sources/presto/index.html @@ -8,13 +8,13 @@ - +

    Presto

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    DomainsSupported via the domain config field

    This plugin extracts the following:

    • Metadata for databases, schemas, and tables
    • Column types and schema associated with each table
    • Table, row, and column statistics via optional SQL profiling

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[presto]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: presto
    config:
    # Coordinates
    host_port: localhost:5300
    database: dbname

    # Credentials
    username: foo
    password: datahub

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    host_port 
    string
    host URL
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.presto.PrestoSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Presto, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/pulsar/index.html b/docs/generated/ingestion/sources/pulsar/index.html index e03252b67152a..3687f37275c78 100644 --- a/docs/generated/ingestion/sources/pulsar/index.html +++ b/docs/generated/ingestion/sources/pulsar/index.html @@ -8,14 +8,14 @@ - +

    Pulsar

    Integration Details

    The Datahub Pulsar source plugin extracts topic and schema metadata from an Apache Pulsar instance and ingest the information into Datahub. The plugin uses the Pulsar admin Rest API interface to interact with the Pulsar instance. The following APIs are used in order to:

    The data is extracted on tenant and namespace basis, topics with corresponding schema (if available) are ingested as Dataset into Datahub. Some additional values like schema description, schema_version, schema_type and partitioned are included as DatasetProperties.

    Concept Mapping

    This ingestion source maps the following Source System Concepts to DataHub Concepts:

    Source ConceptDataHub ConceptNotes
    pulsarData Platform
    Pulsar TopicDatasetsubType: topic
    Pulsar SchemaSchemaFieldMaps to the fields defined within the Avro or JSON schema definition.

    Metadata Ingestion Quickstart

    For context on getting started with ingestion, check out our metadata ingestion guide. Incubating

    Important Capabilities

    CapabilityStatusNotes
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default

    PulsarSource(config: datahub.ingestion.source_config.pulsar.PulsarSourceConfig, ctx: datahub.ingestion.api.common.PipelineContext)

    NOTE: Always use TLS encryption in a production environment and use variable substitution for sensitive information (e.g. ${CLIENT_ID} and ${CLIENT_SECRET}).

    Prerequisites

    In order to ingest metadata from Apache Pulsar, you will need:

    • Access to a Pulsar Instance, if authentication is enabled a valid access token.
    • Pulsar version >= 2.7.0

    NOTE: A superUser role is required for listing all existing tenants within a Pulsar instance.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[pulsar]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: "pulsar"
    config:
    env: "TEST"
    platform_instance: "local"
    ## Pulsar client connection config ##
    web_service_url: "https://localhost:8443"
    verify_ssl: "/opt/certs/ca.cert.pem"
    # Issuer url for auth document, for example "http://localhost:8083/realms/pulsar"
    issuer_url: <issuer_url>
    client_id: ${CLIENT_ID}
    client_secret: ${CLIENT_SECRET}
    # Tenant list to scrape
    tenants:
    - tenant_1
    - tenant_2
    # Topic filter pattern
    topic_patterns:
    allow:
    - ".*sales.*"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    client_id
    string
    The application's client ID
    client_secret
    string
    The application's client secret
    exclude_individual_partitions
    boolean
    Extract each individual partitioned topic. e.g. when turned off a topic with 100 partitions will result in 100 Datasets.
    Default: True
    issuer_url
    string
    The complete URL for a Custom Authorization Server. Mandatory for OAuth based authentication.
    oid_config
    object
    Placeholder for OpenId discovery document
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    tenants
    array(string)
    timeout
    integer
    Timout setting, how long to wait for the Pulsar rest api to send data before giving up
    Default: 5
    token
    string
    The access token for the application. Mandatory for token based authentication.
    verify_ssl
    One of boolean, string
    Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use.
    Default: True
    web_service_url
    string
    The web URL for the cluster.
    Default: http://localhost:8080
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    namespace_patterns
    AllowDenyPattern
    List of regex patterns for namespaces to include/exclude from ingestion. By default the functions namespace is denied.
    Default: {'allow': ['.*'], 'deny': ['public/functions'], 'i...
    namespace_patterns.allow
    array(string)
    namespace_patterns.deny
    array(string)
    namespace_patterns.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    tenant_patterns
    AllowDenyPattern
    List of regex patterns for tenants to include/exclude from ingestion. By default all tenants are allowed.
    Default: {'allow': ['.*'], 'deny': ['pulsar'], 'ignoreCase'...
    tenant_patterns.allow
    array(string)
    tenant_patterns.deny
    array(string)
    tenant_patterns.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    topic_patterns
    AllowDenyPattern
    List of regex patterns for topics to include/exclude from ingestion. By default the Pulsar system topics are denied.
    Default: {'allow': ['.*'], 'deny': ['/__.*$'], 'ignoreCase'...
    topic_patterns.allow
    array(string)
    topic_patterns.deny
    array(string)
    topic_patterns.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    see Stateful Ingestion
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.pulsar.PulsarSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Pulsar, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/redash/index.html b/docs/generated/ingestion/sources/redash/index.html index 54d47941abe3f..e10c7e1b0612d 100644 --- a/docs/generated/ingestion/sources/redash/index.html +++ b/docs/generated/ingestion/sources/redash/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ As this package doesn't officially support all the SQL dialects that Redash supports, the result might not be correct. You can, however, implement a custom parser and take it into use by setting the sql_parser configuration value. A custom SQL parser must inherit from datahub.utilities.sql_parser.SQLParser and must be made available to Datahub by ,for example, installing it. The configuration then needs to be set to module_name.ClassName of the parser.

    Code Coordinates

    • Class Name: datahub.ingestion.source.redash.RedashSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Redash, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/redshift/index.html b/docs/generated/ingestion/sources/redshift/index.html index 8cd0f2d018cdd..b3710953833ec 100644 --- a/docs/generated/ingestion/sources/redshift/index.html +++ b/docs/generated/ingestion/sources/redshift/index.html @@ -8,7 +8,7 @@ - + @@ -30,7 +30,7 @@ discover lineage between tables. Pros:

    • Fast
    • Reliable

    Cons:

    • Does not work with Spectrum/external tables because those scans do not show up in stl_scan table.
    • If a table is depending on a view then the view won't be listed as dependency. Instead the table will be connected with the view's dependencies.

    sql_based

    The sql_based based collector uses Redshift's stl_insert to discover all the insert queries and uses sql parsing to discover the dependecies.

    Pros:

    • Works with Spectrum tables
    • Views are connected properly if a table depends on it

    Cons:

    • Slow.
    • Less reliable as the query parser can fail on certain queries

    mixed

    Using both collector above and first applying the sql based and then the stl_scan based one.

    Pros:

    • Works with Spectrum tables
    • Views are connected properly if a table depends on it
    • A bit more reliable than the sql_based one only

    Cons:

    • Slow
    • May be incorrect at times as the query parser can fail on certain queries
    note

    The redshift stl redshift tables which are used for getting data lineage only retain approximately two to five days of log history. This means you cannot extract lineage from queries issued outside that window.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[redshift-legacy]'

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    host_port 
    string
    host URL
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    capture_lineage_query_parser_failures
    boolean
    Whether to capture lineage query parser errors with dataset properties for debuggings
    Default: False
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    default_schema
    string
    The default schema to use if the sql parser fails to parse the schema with sql_based lineage collector
    Default: public
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    include_copy_lineage
    boolean
    Whether lineage should be collected from copy commands
    Default: True
    include_table_lineage
    boolean
    Whether table lineage should be ingested.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_unload_lineage
    boolean
    Whether lineage should be collected from unload commands
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    platform_instance_map
    map(str,string)
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    table_lineage_mode
    Enum
    Which table lineage collector mode to use. Available modes are: [stl_scan_based, sql_based, mixed]
    Default: stl_scan_based
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    s3_lineage_config
    S3LineageProviderConfig
    Common config for S3 lineage generation
    s3_lineage_config.path_specs
    array(object)
    s3_lineage_config.path_specs.include 
    string
    Path to table. Name variable {table} is used to mark the folder with dataset. In absence of {table}, file level dataset will be created. Check below examples for more details.
    s3_lineage_config.path_specs.default_extension
    string
    For files without extension it will assume the specified file type. If it is not set the files without extensions will be skipped.
    s3_lineage_config.path_specs.enable_compression
    boolean
    Enable or disable processing compressed files. Currently .gz and .bz files are supported.
    Default: True
    s3_lineage_config.path_specs.exclude
    array(string)
    s3_lineage_config.path_specs.file_types
    array(string)
    s3_lineage_config.path_specs.sample_files
    boolean
    Not listing all the files but only taking a handful amount of sample file to infer the schema. File count and file size calculation will be disabled. This can affect performance significantly if enabled
    Default: True
    s3_lineage_config.path_specs.table_name
    string
    Display name of the dataset.Combination of named variables from include path and strings
    schema_pattern
    AllowDenyPattern
    Default: {'allow': ['.*'], 'deny': ['information_schema'], ...
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.redshift.RedshiftSource
    • Browse on GitHub

    Module redshift-usage-legacy

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Platform InstanceEnabled by default

    This plugin extracts usage statistics for datasets in Amazon Redshift.

    Note: Usage information is computed by querying the following system tables -

    1. stl_scan
    2. svv_table_info
    3. stl_query
    4. svl_user_info

    To grant access this plugin for all system tables, please alter your datahub Redshift user the following way:

    ALTER USER datahub_user WITH SYSLOG ACCESS UNRESTRICTED;

    This plugin has the below functionalities -

    1. For a specific dataset this plugin ingests the following statistics -
      1. top n queries.
      2. top users.
    2. Aggregation of these statistics into buckets, by day or hour granularity.
    note

    This source only does usage statistics. To get the tables, views, and schemas in your Redshift warehouse, ingest using the redshift source described above.

    note

    Redshift system tables have some latency in getting data from queries. In addition, these tables only maintain logs for 2-5 days. You can find more information from the official documentation here.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[redshift-usage-legacy]'

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    email_domain 
    string
    Email domain of your organisation so users can be displayed on UI appropriately.
    host_port 
    string
    host URL
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    capture_lineage_query_parser_failures
    boolean
    Whether to capture lineage query parser errors with dataset properties for debuggings
    Default: False
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    default_schema
    string
    The default schema to use if the sql parser fails to parse the schema with sql_based lineage collector
    Default: public
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    include_copy_lineage
    boolean
    Whether lineage should be collected from copy commands
    Default: True
    include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    include_table_lineage
    boolean
    Whether table lineage should be ingested.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    include_unload_lineage
    boolean
    Whether lineage should be collected from unload commands
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy's create_engine as kwargs.See https://docs.sqlalchemy.org/en/14/core/engines.html#sqlalchemy.create_engine for details.
    Default: {}
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    platform_instance_map
    map(str,string)
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    table_lineage_mode
    Enum
    Which table lineage collector mode to use. Available modes are: [stl_scan_based, sql_based, mixed]
    Default: stl_scan_based
    top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    s3_lineage_config
    S3LineageProviderConfig
    Common config for S3 lineage generation
    s3_lineage_config.path_specs
    array(object)
    s3_lineage_config.path_specs.include 
    string
    Path to table. Name variable {table} is used to mark the folder with dataset. In absence of {table}, file level dataset will be created. Check below examples for more details.
    s3_lineage_config.path_specs.default_extension
    string
    For files without extension it will assume the specified file type. If it is not set the files without extensions will be skipped.
    s3_lineage_config.path_specs.enable_compression
    boolean
    Enable or disable processing compressed files. Currently .gz and .bz files are supported.
    Default: True
    s3_lineage_config.path_specs.exclude
    array(string)
    s3_lineage_config.path_specs.file_types
    array(string)
    s3_lineage_config.path_specs.sample_files
    boolean
    Not listing all the files but only taking a handful amount of sample file to infer the schema. File count and file size calculation will be disabled. This can affect performance significantly if enabled
    Default: True
    s3_lineage_config.path_specs.table_name
    string
    Display name of the dataset.Combination of named variables from include path and strings
    schema_pattern
    AllowDenyPattern
    Default: {'allow': ['.*'], 'deny': ['information_schema'], ...
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    user_email_pattern.allow
    array(string)
    user_email_pattern.deny
    array(string)
    user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.usage.redshift_usage.RedshiftUsageSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Redshift, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/s3/index.html b/docs/generated/ingestion/sources/s3/index.html index 43185623954ca..fe211431fdac5 100644 --- a/docs/generated/ingestion/sources/s3/index.html +++ b/docs/generated/ingestion/sources/s3/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ While we've done our best to limit the expensiveness of the queries the profiler runs, you should be prudent about the set of tables profiling is enabled on or the frequency of the profiling runs.

    caution

    If you are ingesting datasets from AWS S3, we recommend running the ingestion on a server in the same region to avoid high egress costs.

    Compatibility

    Profiles are computed with PyDeequ, which relies on PySpark. Therefore, for computing profiles, we currently require Spark 3.0.3 with Hadoop 3.2 to be installed and the SPARK_HOME and SPARK_VERSION environment variables to be set. The Spark+Hadoop binary can be downloaded here.

    For an example guide on setting up PyDeequ on AWS, see this guide.

    Code Coordinates

    • Class Name: datahub.ingestion.source.s3.source.S3Source
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for S3 Data Lake, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/sagemaker/index.html b/docs/generated/ingestion/sources/sagemaker/index.html index d8b9c89194f0a..edd6168cc9094 100644 --- a/docs/generated/ingestion/sources/sagemaker/index.html +++ b/docs/generated/ingestion/sources/sagemaker/index.html @@ -8,13 +8,13 @@ - +

    SageMaker

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Table-Level LineageEnabled by default

    This plugin extracts the following:

    • Feature groups
    • Models, jobs, and lineage between the two (e.g. when jobs output a model or a model is used by a job)

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[sagemaker]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: sagemaker
    config:
    # Coordinates
    aws_region: "my-aws-region"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    aws_region 
    string
    AWS region code.
    aws_access_key_id
    string
    AWS access key ID. Can be auto-detected, see the AWS boto3 docs for details.
    aws_endpoint_url
    string
    The AWS service endpoint. This is normally constructed automatically, but can be overridden here.
    aws_profile
    string
    Named AWS profile to use. Only used if access key / secret are unset. If not set the default will be used
    aws_proxy
    map(str,string)
    aws_secret_access_key
    string
    AWS secret access key. Can be auto-detected, see the AWS boto3 docs for details.
    aws_session_token
    string
    AWS session token. Can be auto-detected, see the AWS boto3 docs for details.
    extract_feature_groups
    boolean
    Whether to extract feature groups.
    Default: True
    extract_jobs
    One of string, boolean
    Whether to extract AutoML jobs.
    Default: True
    extract_models
    boolean
    Whether to extract models.
    Default: True
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    aws_role
    One of string, union(anyOf), string, AwsAssumeRoleConfig
    AWS roles to assume. If using the string format, the role ARN can be specified directly. If using the object format, the role can be specified in the RoleArn field and additional available arguments are documented at https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/sts.html?highlight=assume_role#STS.Client.assume_role
    aws_role.RoleArn 
    string
    ARN of the role to assume.
    aws_role.ExternalId
    string
    External ID to use when assuming the role.
    database_pattern
    AllowDenyPattern
    regex patterns for databases to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    regex patterns for tables to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.aws.sagemaker.SagemakerSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for SageMaker, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/salesforce/index.html b/docs/generated/ingestion/sources/salesforce/index.html index a29df68a28481..391f8f0adc9e3 100644 --- a/docs/generated/ingestion/sources/salesforce/index.html +++ b/docs/generated/ingestion/sources/salesforce/index.html @@ -8,14 +8,14 @@ - +

    Salesforce

    Incubating

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOnly table level profiling is supported via profiling.enabled config field
    Detect Deleted EntitiesNot supported yet
    DomainsSupported via the domain config field
    Platform InstanceCan be equivalent to Salesforce organization

    Prerequisites

    In order to ingest metadata from Salesforce, you will need one of:

    • Salesforce username, password, security token
    • Salesforce username, consumer key and private key for JSON web token access
    • Salesforce instance url and access token/session id (suitable for one-shot ingestion only, as access token typically expires after 2 hours of inactivity)

    The account used to access Salesforce requires the following permissions for this integration to work:

    • View Setup and Configuration
    • View All Data

    Integration Details

    This plugin extracts Salesforce Standard and Custom Objects and their details (fields, record count, etc) from a Salesforce instance. Python library simple-salesforce is used for authenticating and calling Salesforce REST API to retrive details from Salesforce instance.

    REST API Resources used in this integration

    Concept Mapping

    This ingestion source maps the following Source System Concepts to DataHub Concepts:

    Source ConceptDataHub ConceptNotes
    SalesforceData Platform
    Standard ObjectDatasetsubtype "Standard Object"
    Custom ObjectDatasetsubtype "Custom Object"

    Caveats

    • This connector has only been tested with Salesforce Developer Edition.
    • This connector only supports table level profiling (Row and Column counts) as of now. Row counts are approximate as returned by Salesforce RecordCount REST API.
    • This integration does not support ingesting Salesforce External Objects

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[salesforce]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    pipeline_name: my_salesforce_pipeline
    source:
    type: "salesforce"
    config:
    instance_url: "https://mydomain.my.salesforce.com/"
    username: user@company
    password: password_for_user
    security_token: security_token_for_user
    platform_instance: mydomain-dev-ed
    domain:
    sales:
    allow:
    - "Opportunity$"
    - "Lead$"

    object_pattern:
    allow:
    - "Account$"
    - "Opportunity$"
    - "Lead$"

    sink:
    type: "datahub-rest"
    config:
    server: "http://localhost:8080"

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    access_token
    string
    Access token for instance url
    auth
    Enum
    Default: USERNAME_PASSWORD
    consumer_key
    string
    Consumer key for Salesforce JSON web token access
    ingest_tags
    boolean
    Ingest Tags from source. This will override Tags entered from UI
    Default: False
    instance_url
    string
    Salesforce instance url. e.g. https://MyDomainName.my.salesforce.com
    is_sandbox
    boolean
    Connect to Sandbox instance of your Salesforce
    Default: False
    password
    string
    Password for Salesforce user
    platform
    string
    Default: salesforce
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    private_key
    string
    Private key as a string for Salesforce JSON web token access
    security_token
    string
    Security token for Salesforce username
    username
    string
    Salesforce username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    object_pattern
    AllowDenyPattern
    Regex patterns for Salesforce objects to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    object_pattern.allow
    array(string)
    object_pattern.deny
    array(string)
    object_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns for profiles to filter in ingestion, allowed by the object_pattern.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    SalesforceProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.enabled
    boolean
    Whether profiling should be done. Supports only table-level profiling at this stage
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.

    Code Coordinates

    • Class Name: datahub.ingestion.source.salesforce.SalesforceSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Salesforce, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/snowflake/index.html b/docs/generated/ingestion/sources/snowflake/index.html index e7cdbc6e6c99d..c2b8593466915 100644 --- a/docs/generated/ingestion/sources/snowflake/index.html +++ b/docs/generated/ingestion/sources/snowflake/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ this permission is not required.
  • usage is required for us to run queries using the warehouse
  • usage on database and schema are required because without it tables and views inside them are not accessible. If an admin does the required grants on table but misses the grants on schema or the database in which the table/view exists then we will not be able to get metadata for the table/view.
  • If metadata is required only on some schemas then you can grant the usage privilieges only on a particular schema like
  • grant usage on schema "<your-database>"."<your-schema>" to role datahub_role;

    This represents the bare minimum privileges required to extract databases, schemas, views, tables from Snowflake.

    If you plan to enable extraction of table lineage, via the include_table_lineage config flag, extraction of usage statistics, via the include_usage_stats config, or extraction of tags (without lineage), via the extract_tags config, you'll also need to grant access to the Account Usage system tables, using which the DataHub source extracts information. This can be done by granting access to the snowflake database.

    grant imported privileges on database snowflake to role datahub_role;

    Authentication

    Authentication is most simply done via a Snowflake user and password.

    Alternatively, other authentication methods are supported via the authentication_type config option.

    Okta OAuth

    To set up Okta OAuth authentication, roughly follow the four steps in this guide.

    Pass in the following values, as described in the article, for your recipe's oauth_config:

    • provider: okta
    • client_id: <OAUTH_CLIENT_ID>
    • client_secret: <OAUTH_CLIENT_SECRET>
    • authority_url: <OKTA_OAUTH_TOKEN_ENDPOINT>
    • scopes: The list of your Okta scopes, i.e. with the session:role: prefix

    Datahub only supports two OAuth grant types: client_credentials and password. The steps slightly differ based on which you decide to use.

    Client Credentials Grant Type (Simpler)
    • When creating an Okta App Integration, choose type API Services
      • Ensure client authentication method is Client secret
      • Note your Client ID
    • Create a Snowflake user to correspond to your newly created Okta client credentials
      • Ensure the user's Login Name matches your Okta application's Client ID
      • Ensure the user has been granted your datahub role
    Password Grant Type
    • When creating an Okta App Integration, choose type OIDC -> Native Application
      • Add Grant Type Resource Owner Password
      • Ensure client authentication method is Client secret
    • Create an Okta user to sign into, noting the Username and Password
    • Create a Snowflake user to correspond to your newly created Okta client credentials
      • Ensure the user's Login Name matches your Okta user's Username (likely an email)
      • Ensure the user has been granted your datahub role
    • When running ingestion, provide the required oauth_config fields, including client_id and client_secret, plus your Okta user's Username and Password
      • Note: the username and password config options are not nested under oauth_config

    Snowflake Shares

    If you are using Snowflake Shares to share data across different snowflake accounts, and you have set up DataHub recipes for ingesting metadata from all these accounts, you may end up having multiple similar dataset entities corresponding to virtual versions of same table in different snowflake accounts. DataHub Snowflake connector can automatically link such tables together through Siblings and Lineage relationship if user provides information necessary to establish the relationship using configuration shares in recipe.

    Example

    • Snowflake account account1 (ingested as platform_instance instance1) owns a database db1. A share X is created in account1 that includes database db1 along with schemas and tables inside it.
    • Now, X is shared with snowflake account account2 (ingested as platform_instance instance2). A database db1_from_X is created from inbound share X in account2. In this case, all tables and views included in share X will also be present in instance2.db1_from_X.
    • This can be represented in shares configuration section as
      shares:
      X: # name of the share
      database_name: db1
      platform_instance: instance1
      consumers: # list of all databases created from share X
      - database_name: db1_from_X
      platform_instance: instance2

    • If share X is shared with more snowflake accounts and database is created from share X in those account then additional entries need to be added in consumers list for share X, one per snowflake account. The same shares config can then be copied across recipes of all accounts.

    Caveats

    • Some of the features are only available in the Snowflake Enterprise Edition. This doc has notes mentioning where this applies.
    • The underlying Snowflake views that we use to get metadata have a latency of 45 minutes to 3 hours. So we would not be able to get very recent metadata in some cases like queries you ran within that time period etc. This is applicable particularly for lineage, usage and tags (without lineage) extraction.
    • If there is any incident going on for Snowflake we will not be able to get the metadata until that incident is resolved.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[snowflake]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: snowflake
    config:
    # This option is recommended to be used to ingest all lineage
    ignore_start_time_lineage: true

    # Coordinates
    account_id: "abc48144"
    warehouse: "COMPUTE_WH"

    # Credentials
    username: "${SNOWFLAKE_USER}"
    password: "${SNOWFLAKE_PASS}"
    role: "datahub_role"

    # (Optional) Uncomment and update this section to filter ingested datasets
    # database_pattern:
    # allow:
    # - "^ACCOUNTING_DB$"
    # - "^MARKETING_DB$"

    profiling:
    # Change to false to disable profiling
    enabled: true
    # This option is recommended to reduce profiling time and costs.
    turn_off_expensive_profiling_metrics: true

    # (Optional) Uncomment and update this section to filter profiled tables
    # profile_pattern:
    # allow:
    # - "ACCOUNTING_DB.*.*"
    # - "MARKETING_DB.*.*"

    # Default sink is datahub-rest and doesn't need to be configured
    # See https://datahubproject.io/docs/metadata-ingestion/sink_docs/datahub for customization options

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    account_id 
    string
    Snowflake account identifier. e.g. xy12345, xy12345.us-east-2.aws, xy12345.us-central1.gcp, xy12345.central-us.azure, xy12345.us-west-2.privatelink. Refer Account Identifiers for more details.
    apply_view_usage_to_tables
    boolean
    Whether to apply view's usage to its base tables. If set to True, usage is applied to base tables only.
    Default: False
    authentication_type
    string
    The type of authenticator to use when connecting to Snowflake. Supports "DEFAULT_AUTHENTICATOR", "OAUTH_AUTHENTICATOR", "EXTERNAL_BROWSER_AUTHENTICATOR" and "KEY_PAIR_AUTHENTICATOR".
    Default: DEFAULT_AUTHENTICATOR
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    connect_args
    object
    Connect args to pass to Snowflake SqlAlchemy driver
    convert_urns_to_lowercase
    boolean
    Default: True
    email_as_user_identifier
    boolean
    Format user urns as an email, if the snowflake user's email is set. If email_domain is provided, generates email addresses for snowflake users with unset emails, based on their username.
    Default: True
    email_domain
    string
    Email domain of your organisation so users can be displayed on UI appropriately.
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    extract_tags
    Enum
    Optional. Allowed values are without_lineage, with_lineage, and skip (default). without_lineage only extracts tags that have been applied directly to the given entity. with_lineage extracts both directly applied and propagated tags, but will be significantly slower. See the Snowflake documentation for information about tag lineage/propagation.
    Default: skip
    format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    ignore_start_time_lineage
    boolean
    Default: False
    include_column_lineage
    boolean
    Populates table->table and view->table column lineage. Requires appropriate grants given to the role and the Snowflake Enterprise Edition or above.
    Default: True
    include_external_url
    boolean
    Whether to populate Snowsight url for Snowflake Objects
    Default: True
    include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    include_table_lineage
    boolean
    If enabled, populates the snowflake table-to-table and s3-to-snowflake table lineage. Requires appropriate grants given to the role and Snowflake Enterprise Edition or above.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_technical_schema
    boolean
    If enabled, populates the snowflake technical schema and descriptions.
    Default: True
    include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    include_usage_stats
    boolean
    If enabled, populates the snowflake usage statistics. Requires appropriate grants given to the role.
    Default: True
    include_view_column_lineage
    boolean
    Populates view->view and table->view column lineage.
    Default: False
    include_view_lineage
    boolean
    If enabled, populates the snowflake view->table and table->view lineages. Requires appropriate grants given to the role, and include_table_lineage to be True. view->table lineage requires Snowflake Enterprise Edition or above.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    match_fully_qualified_names
    boolean
    Whether schema_pattern is matched against fully qualified schema name <catalog>.<schema>.
    Default: False
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    Snowflake password.
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    private_key
    string
    Private key in a form of '-----BEGIN PRIVATE KEY-----\nprivate-key\n-----END PRIVATE KEY-----\n' if using key pair authentication. Encrypted version of private key will be in a form of '-----BEGIN ENCRYPTED PRIVATE KEY-----\nencrypted-private-key\n-----END ECNCRYPTED PRIVATE KEY-----\n' See: https://docs.snowflake.com/en/user-guide/key-pair-auth.html
    private_key_password
    string(password)
    Password for your private key. Required if using key pair authentication with encrypted private key.
    private_key_path
    string
    The path to the private key if using key pair authentication. Ignored if private_key is set. See: https://docs.snowflake.com/en/user-guide/key-pair-auth.html
    role
    string
    Snowflake role.
    scheme
    string
    Default: snowflake
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    store_last_profiling_timestamps
    boolean
    Enable storing last profile timestamp in store.
    Default: False
    store_last_usage_extraction_timestamp
    boolean
    Enable checking last usage timestamp in store.
    Default: True
    temporary_tables_pattern
    array(string)
    top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    upstream_lineage_in_report
    boolean
    Default: False
    username
    string
    Snowflake username.
    validate_upstreams_against_patterns
    boolean
    Whether to validate upstream snowflake tables against allow-deny patterns
    Default: True
    warehouse
    string
    Snowflake warehouse.
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    classification
    ClassificationConfig
    For details, refer Classification.
    Default: {'enabled': False, 'sample_size': 100, 'max_worker...
    classification.enabled
    boolean
    Whether classification should be used to auto-detect glossary terms
    Default: False
    classification.info_type_to_term
    map(str,string)
    classification.max_workers
    integer
    Number of worker threads to use for classification. Set to 1 to disable.
    Default: 2
    classification.sample_size
    integer
    Number of sample values used for classification.
    Default: 100
    classification.classifiers
    array(object)
    classification.classifiers.type 
    string
    The type of the classifier to use. For DataHub, use datahub
    classification.classifiers.config
    object
    The configuration required for initializing the classifier. If not specified, uses defaults for classifer type.
    classification.column_pattern
    AllowDenyPattern
    Regex patterns to filter columns for classification. This is used in combination with other patterns in parent config. Specify regex to match the column name in database.schema.table.column format.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    classification.column_pattern.allow
    array(string)
    classification.column_pattern.deny
    array(string)
    classification.column_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    classification.table_pattern
    AllowDenyPattern
    Regex patterns to filter tables for classification. This is used in combination with other patterns in parent config. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    classification.table_pattern.allow
    array(string)
    classification.table_pattern.deny
    array(string)
    classification.table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    database_pattern
    AllowDenyPattern
    Default: {'allow': ['.*'], 'deny': ['^UTIL_DB$', '^SNOWFLAK...
    database_pattern.allow
    array(string)
    database_pattern.deny
    array(string)
    database_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    oauth_config
    OAuthConfiguration
    oauth configuration - https://docs.snowflake.com/en/user-guide/python-connector-example.html#connecting-with-oauth
    oauth_config.authority_url 
    string
    Authority url of your identity provider
    oauth_config.client_id 
    string
    client id of your registered application
    oauth_config.provider 
    Enum
    Identity provider for oauth.Supported providers are microsoft and okta.
    oauth_config.client_secret
    string(password)
    client secret of the application if use_certificate = false
    oauth_config.encoded_oauth_private_key
    string
    base64 encoded private key content if use_certificate = true
    oauth_config.encoded_oauth_public_key
    string
    base64 encoded certificate content if use_certificate = true
    oauth_config.scopes
    array(string)
    oauth_config.use_certificate
    boolean
    Do you want to use certificate and private key to authenticate using oauth
    Default: False
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    shares
    map(str,SnowflakeShareConfig)
    shares.key.database 
    string
    Database from which share is created.
    shares.key.platform_instance 
    string
    Platform instance for snowflake account in which share is created.
    shares.key.consumers
    array(object)
    shares.key.consumers.database 
    string
    Database created from share in consumer account.
    shares.key.consumers.platform_instance 
    string
    Platform instance of consumer snowflake account.
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    tag_pattern
    AllowDenyPattern
    List of regex patterns for tags to include in ingestion. Only used if extract_tags is enabled.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    tag_pattern.allow
    array(string)
    tag_pattern.deny
    array(string)
    tag_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    user_email_pattern.allow
    array(string)
    user_email_pattern.deny
    array(string)
    user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.snowflake.snowflake_v2.SnowflakeV2Source
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Snowflake, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/sql-queries/index.html b/docs/generated/ingestion/sources/sql-queries/index.html index 7570095d0fbbc..39c9ac9588a15 100644 --- a/docs/generated/ingestion/sources/sql-queries/index.html +++ b/docs/generated/ingestion/sources/sql-queries/index.html @@ -8,13 +8,13 @@ - +
    -

    SQL Queries

    Testing

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[sql-queries]'

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    platform 
    string
    The platform for which to generate data, e.g. snowflake
    query_file 
    string
    Path to file to ingest
    default_db
    string
    The default database to use for unqualified table names
    default_schema
    string
    The default schema to use for unqualified table names
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    usage
    BaseUsageConfig
    The usage config to use when generating usage statistics
    Default: {'bucket_duration': 'DAY', 'end_time': '2023-08-24...
    usage.bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    usage.end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    usage.format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    usage.include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    usage.include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    usage.include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    usage.start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    usage.top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    usage.user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    usage.user_email_pattern.allow
    array(string)
    usage.user_email_pattern.deny
    array(string)
    usage.user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql_queries.SqlQueriesSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for SQL Queries, feel free to ping us on our Slack.

    - +

    SQL Queries

    Testing

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[sql-queries]'

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    platform 
    string
    The platform for which to generate data, e.g. snowflake
    query_file 
    string
    Path to file to ingest
    default_db
    string
    The default database to use for unqualified table names
    default_schema
    string
    The default schema to use for unqualified table names
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    usage
    BaseUsageConfig
    The usage config to use when generating usage statistics
    Default: {'bucket_duration': 'DAY', 'end_time': '2023-08-24...
    usage.bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    usage.end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    usage.format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    usage.include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    usage.include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    usage.include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    usage.start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    usage.top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    usage.user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    usage.user_email_pattern.allow
    array(string)
    usage.user_email_pattern.deny
    array(string)
    usage.user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql_queries.SqlQueriesSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for SQL Queries, feel free to ping us on our Slack.

    + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/sqlalchemy/index.html b/docs/generated/ingestion/sources/sqlalchemy/index.html index 76e46ed1f5a97..57d2dec324d38 100644 --- a/docs/generated/ingestion/sources/sqlalchemy/index.html +++ b/docs/generated/ingestion/sources/sqlalchemy/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    SQLAlchemy

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    DomainsSupported via the domain config field

    The sqlalchemy source is useful if we don't have a pre-built source for your chosen database system, but there is an SQLAlchemy dialect defined elsewhere. In order to use this, you must pip install the required dialect packages yourself.

    This plugin extracts the following:

    • Metadata for databases, schemas, views, and tables
    • Column types associated with each table
    • Table, row, and column statistics via optional SQL profiling.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[sqlalchemy]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: sqlalchemy
    config:
    # Coordinates
    connect_uri: "dialect+driver://username:password@host:port/database"

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    connect_uri 
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls
    platform 
    string
    Name of platform being ingested, used in constructing URNs.
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.sql_generic.SQLAlchemyGenericSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for SQLAlchemy, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/superset/index.html b/docs/generated/ingestion/sources/superset/index.html index 89853e069a15f..2a4143954963e 100644 --- a/docs/generated/ingestion/sources/superset/index.html +++ b/docs/generated/ingestion/sources/superset/index.html @@ -8,13 +8,13 @@ - +

    Superset

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Detect Deleted EntitiesOptionally enabled via stateful_ingestion

    This plugin extracts the following:

    • Charts, dashboards, and associated metadata

    See documentation for superset's /security/login at https://superset.apache.org/docs/rest-api for more details on superset's login api.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[superset]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: superset
    config:
    # Coordinates
    connect_uri: http://localhost:8088

    # Credentials
    username: user
    password: pass
    provider: ldap

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    connect_uri
    string
    Superset host URL.
    Default: http://localhost:8088
    database_alias
    map(str,string)
    display_uri
    string
    optional URL to use in links (if connect_uri is only for ingestion)
    options
    object
    Default: {}
    password
    string
    Superset password.
    provider
    string
    Superset provider.
    Default: db
    username
    string
    Superset username.
    env
    string
    Environment to use in namespace when constructing URNs
    Default: PROD
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Superset Stateful Ingestion Config.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    If you were using database_alias in one of your other ingestions to rename your databases to something else based on business needs you can rename them in superset also

    source:
    type: superset
    config:
    # Coordinates
    connect_uri: http://localhost:8088

    # Credentials
    username: user
    password: pass
    provider: ldap
    database_alias:
    example_name_1: business_name_1
    example_name_2: business_name_2

    sink:
    # sink configs

    Code Coordinates

    • Class Name: datahub.ingestion.source.superset.SupersetSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Superset, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/tableau/index.html b/docs/generated/ingestion/sources/tableau/index.html index 7c2c43b04bcc3..6d825d711adfb 100644 --- a/docs/generated/ingestion/sources/tableau/index.html +++ b/docs/generated/ingestion/sources/tableau/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    Tableau

    Certified

    Important Capabilities

    CapabilityStatusNotes
    Dataset UsageDashboard/Chart view counts, enabled using extract_usage_stats config
    DescriptionsEnabled by default
    Detect Deleted EntitiesEnabled by default when stateful ingestion is turned on.
    DomainsRequires transformer
    Extract OwnershipRequires recipe configuration
    Extract TagsRequires recipe configuration
    Platform InstanceEnabled by default
    Table-Level LineageEnabled by default

    Prerequisites

    In order to ingest metadata from Tableau, you will need:

    • Tableau Server Version 2021.1.10 and above. It may also work for older versions.
    • Enable the Tableau Metadata API for Tableau Server, if its not already enabled.
    • Tableau Credentials (Username/Password or Personal Access Token)
    • The user or token must have Site Administrator Explorer permissions.

    Ingestion through UI

    The following video shows you how to get started with ingesting Tableau metadata through the UI.

    Integration Details

    This plugin extracts Sheets, Dashboards, Embedded and Published Data sources metadata within Workbooks in a given project on a Tableau site. Tableau's GraphQL interface is used to extract metadata information. Queries used to extract metadata are located in metadata-ingestion/src/datahub/ingestion/source/tableau_common.py

    Concept Mapping

    This ingestion source maps the following Source System Concepts to DataHub Concepts:

    Source ConceptDataHub ConceptNotes
    "Tableau"Data Platform
    ProjectContainerSubType "Project"
    Embedded DataSourceDatasetSubType "Embedded Data Source"
    Published DataSourceDatasetSubType "Published Data Source"
    Custom SQL TableDatasetSubTypes "View", "Custom SQL"
    Embedded or External TablesDataset
    SheetChart
    DashboardDashboard
    UserUser (a.k.a CorpUser)Optionally Extracted
    WorkbookContainerSubType "Workbook"
    TagTagOptionally Extracted

    Lineage

    Lineage is emitted as received from Tableau's metadata API for

    • Sheets contained within a Dashboard
    • Embedded or Published Data Sources depended on by a Sheet
    • Published Data Sources upstream to Embedded datasource
    • Tables upstream to Embedded or Published Data Source
    • Custom SQL datasources upstream to Embedded or Published Data Source
    • Tables upstream to Custom SQL Data Source

    Caveats

    • Tableau metadata API might return incorrect schema name for tables for some databases, leading to incorrect metadata in DataHub. This source attempts to extract correct schema from databaseTable's fully qualified name, wherever possible. Read Using the databaseTable object in query for caveats in using schema attribute.

    Troubleshooting

    Why are only some workbooks/custom SQLs/published datasources ingested from the specified project?

    This may happen when the Tableau API returns NODE_LIMIT_EXCEEDED error in response to metadata query and returns partial results with message "Showing partial results. , The request exceeded the ‘n’ node limit. Use pagination, additional filtering, or both in the query to adjust results." To resolve this, consider

    • reducing the page size using the page_size config param in datahub recipe (Defaults to 10).
    • increasing tableau configuration metadata query node limit to higher value.

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[tableau]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: tableau
    config:
    # Coordinates
    connect_uri: https://prod-ca-a.online.tableau.com
    site: acryl
    platform_instance: acryl_instance
    project_pattern: ["^default$", "^Project 2$", "^/Project A/Nested Project B$"]

    # Credentials
    username: "${TABLEAU_USER}"
    password: "${TABLEAU_PASSWORD}"

    # Options
    ingest_tags: True
    ingest_owner: True
    default_schema_map:
    mydatabase: public
    anotherdatabase: anotherschema

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    connect_uri 
    string
    Tableau host URL.
    default_schema_map
    object
    Default schema to use when schema is not found.
    Default: {}
    extract_column_level_lineage
    boolean
    When enabled, extracts column-level lineage from Tableau Datasources
    Default: True
    extract_lineage_from_unsupported_custom_sql_queries
    boolean
    [Experimental] Whether to extract lineage from unsupported custom sql queries using SQL parsing
    Default: False
    extract_project_hierarchy
    boolean
    Whether to extract entire project hierarchy for nested projects.
    Default: True
    extract_usage_stats
    boolean
    [experimental] Extract usage statistics for dashboards and charts.
    Default: False
    ingest_embed_url
    boolean
    Ingest a URL to render an embedded Preview of assets within Tableau.
    Default: False
    ingest_external_links_for_charts
    boolean
    Ingest a URL to link out to from charts.
    Default: True
    ingest_external_links_for_dashboards
    boolean
    Ingest a URL to link out to from dashboards.
    Default: True
    ingest_owner
    boolean
    Ingest Owner from source. This will override Owner info entered from UI
    Default: False
    ingest_tables_external
    boolean
    Ingest details for tables external to (not embedded in) tableau as entities.
    Default: False
    ingest_tags
    boolean
    Ingest Tags from source. This will override Tags entered from UI
    Default: False
    page_size
    integer
    [advanced] Number of metadata objects (e.g. CustomSQLTable, PublishedDatasource, etc) to query at a time using the Tableau API.
    Default: 10
    password
    string
    Tableau password, must be set if authenticating using username/password.
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    platform_instance_map
    map(str,string)
    project_path_separator
    string
    The separator used for the project_pattern field between project names. By default, we use a slash. You can change this if your Tableau projects contain slashes in their names, and you'd like to filter by project.
    Default: /
    projects
    array(string)
    site
    string
    Tableau Site. Always required for Tableau Online. Use emptystring to connect with Default site on Tableau Server.
    Default:
    ssl_verify
    One of boolean, string
    Whether to verify SSL certificates. If using self-signed certificates, set to false or provide the path to the .pem certificate bundle.
    Default: True
    token_name
    string
    Tableau token name, must be set if authenticating using a personal access token.
    token_value
    string
    Tableau token value, must be set if authenticating using a personal access token.
    username
    string
    Tableau username, must be set if authenticating using username/password.
    workbook_page_size
    integer
    [advanced] Number of workbooks to query at a time using the Tableau API.
    Default: 1
    env
    string
    Environment to use in namespace when constructing URNs.
    Default: PROD
    lineage_overrides
    TableauLineageOverrides
    Mappings to change generated dataset urns. Use only if you really know what you are doing.
    lineage_overrides.database_override_map
    map(str,string)
    lineage_overrides.platform_override_map
    map(str,string)
    project_pattern
    AllowDenyPattern
    Filter for specific Tableau projects. For example, use 'My Project' to ingest a root-level Project with name 'My Project', or 'My Project/Nested Project' to ingest a nested Project with name 'Nested Project'. By default, all Projects nested inside a matching Project will be included in ingestion. You can both allow and deny projects based on their name using their name, or a Regex pattern. Deny patterns always take precedence over allow patterns. By default, all projects will be ingested.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    project_pattern.allow
    array(string)
    project_pattern.deny
    array(string)
    project_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.tableau.TableauSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Tableau, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/trino/index.html b/docs/generated/ingestion/sources/trino/index.html index e1235fad68da9..38ebecd0adfa9 100644 --- a/docs/generated/ingestion/sources/trino/index.html +++ b/docs/generated/ingestion/sources/trino/index.html @@ -8,7 +8,7 @@ - + @@ -22,7 +22,7 @@ https://docs.starburst.io/354-e/security/event-logger.html#security-event-logger--page-root https://docs.starburst.io/354-e/security/event-logger.html#analyzing-the-event-log

  • Install starbust-trino-usage plugin Run pip install 'acryl-datahub[starburst-trino-usage]'.

  • CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[starburst-trino-usage]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: starburst-trino-usage
    config:
    # Coordinates
    host_port: yourtrinohost:port
    # The name of the catalog from getting the usage
    database: hive
    # Credentials
    username: trino_username
    password: trino_password
    email_domain: test.com
    audit_catalog: audit
    audit_schema: audit_schema

    sink:
    type: "datahub-rest"
    config:
    server: "http://localhost:8080"

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    audit_catalog 
    string
    The catalog name where the audit table can be found
    audit_schema 
    string
    The schema name where the audit table can be found
    database 
    string
    The name of the catalog from getting the usage
    email_domain 
    string
    The email domain which will be appended to the users
    host_port 
    string
    host URL
    bucket_duration
    Enum
    Size of the time window to aggregate usage stats.
    Default: DAY
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    end_time
    string(date-time)
    Latest date of lineage/usage to consider. Default: Current time in UTC
    format_sql_queries
    boolean
    Whether to format sql queries
    Default: False
    include_operational_stats
    boolean
    Whether to display operational stats.
    Default: True
    include_read_operational_stats
    boolean
    Whether to report read operational stats. Experimental.
    Default: False
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_top_n_queries
    boolean
    Whether to ingest the top_n_queries.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Default: {}
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    start_time
    string(date-time)
    Earliest date of lineage/usage to consider. Default: Last full day in UTC (or hour, depending on bucket_duration). You can also specify relative time with respect to end_time such as '-7 days' Or '-7d'.
    top_n_queries
    integer
    Number of top queries to save to each table.
    Default: 10
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    user_email_pattern
    AllowDenyPattern
    regex patterns for user emails to filter in usage.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    user_email_pattern.allow
    array(string)
    user_email_pattern.deny
    array(string)
    user_email_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.usage.starburst_trino_usage.TrinoUsageSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Trino, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/ingestion/sources/vertica/index.html b/docs/generated/ingestion/sources/vertica/index.html index b0d68076c5fa5..dd41378561cf4 100644 --- a/docs/generated/ingestion/sources/vertica/index.html +++ b/docs/generated/ingestion/sources/vertica/index.html @@ -8,14 +8,14 @@ - +

    Vertica

    Integration Details

    The DataHub Vertica Plugin extracts the following:

    • Metadata for databases, schemas, views, tables, and projections
    • Table level lineage
    • Metadata for ML Models

    Concept Mapping

    This ingestion source maps the following Source System Concepts to DataHub Concepts:

    Source ConceptDataHub ConceptNotes
    VerticaData Platform
    TableDataset
    ViewDataset
    ProjectionsDataset

    Metadata Ingestion Quickstart

    For context on getting started with ingestion, check out our metadata ingestion guide. Certified

    Important Capabilities

    CapabilityStatusNotes
    Data ProfilingOptionally enabled via configuration
    Detect Deleted EntitiesOptionally enabled via stateful_ingestion.remove_stale_metadata
    DomainsSupported via the domain config field
    Platform InstanceEnabled by default
    Table-Level LineageEnabled by default, can be disabled via configuration include_view_lineage and include_projection_lineage

    Prerequisites

    In order to ingest metadata from Vertica, you will need:

    • Vertica Server Version 10.1.1-0 and above. It may also work with, but is not been tested with, older versions .
    • Vertica Credentials (Username/Password)

    CLI based Ingestion

    Install the Plugin

    pip install 'acryl-datahub[vertica]'

    Starter Recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    type: vertica
    config:
    # Coordinates
    host_port: localhost:5433
    database: DATABASE_NAME

    # Credentials
    username: "${VERTICA_USER}"
    password: "${VERTICA_PASSWORD}"

    include_tables: true
    include_views: true
    include_projections: true
    include_models: true
    include_view_lineage: true
    include_projection_lineage: true

    sink:
    # sink configs

    Config Details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldDescription
    host_port 
    string
    host URL
    database
    string
    database (catalog)
    database_alias
    string
    [Deprecated] Alias to apply to database when ingesting.
    include_models
    boolean
    Whether Models should be ingested.
    Default: True
    include_projection_lineage
    boolean
    If the source supports it, include view lineage to the underlying storage location.
    Default: True
    include_projections
    boolean
    Whether projections should be ingested.
    Default: True
    include_table_location_lineage
    boolean
    If the source supports it, include table lineage to the underlying storage location.
    Default: True
    include_tables
    boolean
    Whether tables should be ingested.
    Default: True
    include_view_lineage
    boolean
    If the source supports it, include view lineage to the underlying storage location.
    Default: True
    include_views
    boolean
    Whether views should be ingested.
    Default: True
    options
    object
    Any options specified here will be passed to SQLAlchemy.create_engine as kwargs.
    password
    string(password)
    password
    platform_instance
    string
    The instance of the platform that all assets produced by this recipe belong to
    scheme
    string
    Default: vertica+vertica_python
    sqlalchemy_uri
    string
    URI of database to connect to. See https://docs.sqlalchemy.org/en/14/core/engines.html#database-urls. Takes precedence over other connection parameters.
    username
    string
    username
    env
    string
    The environment that all assets produced by this connector belong to
    Default: PROD
    domain
    map(str,AllowDenyPattern)
    A class to store allow deny regexes
    domain.key.allow
    array(string)
    domain.key.deny
    array(string)
    domain.key.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    models_pattern
    AllowDenyPattern
    Regex patterns for ml models to filter in ingestion.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    models_pattern.allow
    array(string)
    models_pattern.deny
    array(string)
    models_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profile_pattern
    AllowDenyPattern
    Regex patterns to filter tables (or specific columns) for profiling during ingestion. Note that only tables allowed by the table_pattern will be considered.
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    profile_pattern.allow
    array(string)
    profile_pattern.deny
    array(string)
    profile_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    schema_pattern
    AllowDenyPattern
    Regex patterns for schemas to filter in ingestion. Specify regex to only match the schema name. e.g. to match all tables in schema analytics, use the regex 'analytics'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    schema_pattern.allow
    array(string)
    schema_pattern.deny
    array(string)
    schema_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    table_pattern
    AllowDenyPattern
    Regex patterns for tables to filter in ingestion. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allow
    array(string)
    table_pattern.deny
    array(string)
    table_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    view_pattern
    AllowDenyPattern
    Regex patterns for views to filter in ingestion. Note: Defaults to table_pattern if not specified. Specify regex to match the entire view name in database.schema.view format. e.g. to match all views starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'
    Default: {'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    view_pattern.allow
    array(string)
    view_pattern.deny
    array(string)
    view_pattern.ignoreCase
    boolean
    Whether to ignore case sensitivity during pattern matching.
    Default: True
    profiling
    GEProfilingConfig
    Default: {'enabled': False, 'operation_config': {'lower_fre...
    profiling.catch_exceptions
    boolean
    Default: True
    profiling.enabled
    boolean
    Whether profiling should be done.
    Default: False
    profiling.field_sample_values_limit
    integer
    Upper limit for number of sample values to collect for all columns.
    Default: 20
    profiling.include_field_distinct_count
    boolean
    Whether to profile for the number of distinct values for each column.
    Default: True
    profiling.include_field_distinct_value_frequencies
    boolean
    Whether to profile for distinct value frequencies.
    Default: False
    profiling.include_field_histogram
    boolean
    Whether to profile for the histogram for numeric fields.
    Default: False
    profiling.include_field_max_value
    boolean
    Whether to profile for the max value of numeric columns.
    Default: True
    profiling.include_field_mean_value
    boolean
    Whether to profile for the mean value of numeric columns.
    Default: True
    profiling.include_field_median_value
    boolean
    Whether to profile for the median value of numeric columns.
    Default: True
    profiling.include_field_min_value
    boolean
    Whether to profile for the min value of numeric columns.
    Default: True
    profiling.include_field_null_count
    boolean
    Whether to profile for the number of nulls for each column.
    Default: True
    profiling.include_field_quantiles
    boolean
    Whether to profile for the quantiles of numeric columns.
    Default: False
    profiling.include_field_sample_values
    boolean
    Whether to profile for the sample values for all columns.
    Default: True
    profiling.include_field_stddev_value
    boolean
    Whether to profile for the standard deviation of numeric columns.
    Default: True
    profiling.limit
    integer
    Max number of documents to profile. By default, profiles all documents.
    profiling.max_number_of_fields_to_profile
    integer
    A positive integer that specifies the maximum number of columns to profile for any table. None implies all columns. The cost of profiling goes up significantly as the number of columns to profile goes up.
    profiling.max_workers
    integer
    Number of worker threads to use for profiling. Set to 1 to disable.
    Default: 10
    profiling.offset
    integer
    Offset in documents to profile. By default, uses no offset.
    profiling.partition_datetime
    string(date-time)
    For partitioned datasets profile only the partition which matches the datetime or profile the latest one if not set. Only Bigquery supports this.
    profiling.partition_profiling_enabled
    boolean
    Default: True
    profiling.profile_if_updated_since_days
    number
    Profile table only if it has been updated since these many number of days. If set to null, no constraint of last modified time for tables to profile. Supported only in snowflake and BigQuery.
    profiling.profile_table_level_only
    boolean
    Whether to perform profiling at table-level only, or include column-level profiling as well.
    Default: False
    profiling.profile_table_row_count_estimate_only
    boolean
    Use an approximate query for row count. This will be much faster but slightly less accurate. Only supported for Postgres and MySQL.
    Default: False
    profiling.profile_table_row_limit
    integer
    Profile tables only if their row count is less then specified count. If set to null, no limit on the row count of tables to profile. Supported only in snowflake and BigQuery
    Default: 5000000
    profiling.profile_table_size_limit
    integer
    Profile tables only if their size is less then specified GBs. If set to null, no limit on the size of tables to profile. Supported only in snowflake and BigQuery
    Default: 5
    profiling.query_combiner_enabled
    boolean
    This feature is still experimental and can be disabled if it causes issues. Reduces the total number of queries issued and speeds up profiling by dynamically combining SQL queries where possible.
    Default: True
    profiling.report_dropped_profiles
    boolean
    Whether to report datasets or dataset columns which were not profiled. Set to True for debugging purposes.
    Default: False
    profiling.turn_off_expensive_profiling_metrics
    boolean
    Whether to turn off expensive profiling or not. This turns off profiling for quantiles, distinct_value_frequencies, histogram & sample_values. This also limits maximum number of fields being profiled to 10.
    Default: False
    profiling.operation_config
    OperationConfig
    Experimental feature. To specify operation configs.
    profiling.operation_config.lower_freq_profile_enabled
    boolean
    Whether to do profiling at lower freq or not. This does not do any scheduling just adds additional checks to when not to run profiling.
    Default: False
    profiling.operation_config.profile_date_of_month
    integer
    Number between 1 to 31 for date of month (both inclusive). If not specified, defaults to Nothing and this field does not take affect.
    profiling.operation_config.profile_day_of_week
    integer
    Number between 0 to 6 for day of week (both inclusive). 0 is Monday and 6 is Sunday. If not specified, defaults to Nothing and this field does not take affect.
    stateful_ingestion
    StatefulStaleMetadataRemovalConfig
    Base specialized config for Stateful Ingestion with stale metadata removal capability.
    stateful_ingestion.enabled
    boolean
    The type of the ingestion state provider registered with datahub.
    Default: False
    stateful_ingestion.remove_stale_metadata
    boolean
    Soft-deletes the entities present in the last successful run but missing in the current run with stateful_ingestion enabled.
    Default: True

    Code Coordinates

    • Class Name: datahub.ingestion.source.sql.vertica.VerticaSource
    • Browse on GitHub

    Questions

    If you've got any questions on configuring ingestion for Vertica, feel free to ping us on our Slack.

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/assertion/index.html b/docs/generated/metamodel/entities/assertion/index.html index 8945f8d680562..502d5db1f8941 100644 --- a/docs/generated/metamodel/entities/assertion/index.html +++ b/docs/generated/metamodel/entities/assertion/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ Parameters: Value, Min Value, Max Value

    Assertion Run Events

    Evaluation status and results for an assertion tracked over time.

    Python SDK: Emit assertion info and results for dataset
    # Inlined from /metadata-ingestion/examples/library/data_quality_mcpw_rest.py
    import json
    import time

    import datahub.emitter.mce_builder as builder
    from datahub.emitter.mcp import MetadataChangeProposalWrapper
    from datahub.emitter.rest_emitter import DatahubRestEmitter
    from datahub.metadata.com.linkedin.pegasus2avro.assertion import (
    AssertionInfo,
    AssertionResult,
    AssertionResultType,
    AssertionRunEvent,
    AssertionRunStatus,
    AssertionStdAggregation,
    AssertionStdOperator,
    AssertionStdParameter,
    AssertionStdParameters,
    AssertionStdParameterType,
    AssertionType,
    DatasetAssertionInfo,
    DatasetAssertionScope,
    )
    from datahub.metadata.com.linkedin.pegasus2avro.common import DataPlatformInstance
    from datahub.metadata.com.linkedin.pegasus2avro.dataset import DatasetProperties
    from datahub.metadata.com.linkedin.pegasus2avro.timeseries import PartitionSpec


    def datasetUrn(tbl: str) -> str:
    return builder.make_dataset_urn("postgres", tbl)


    def fldUrn(tbl: str, fld: str) -> str:
    return f"urn:li:schemaField:({datasetUrn(tbl)}, {fld})"


    def assertionUrn(info: AssertionInfo) -> str:
    return "urn:li:assertion:432475190cc846f2894b5b3aa4d55af2"


    def emitAssertionResult(assertionResult: AssertionRunEvent) -> None:
    dataset_assertionRunEvent_mcp = MetadataChangeProposalWrapper(
    entityUrn=assertionResult.assertionUrn,
    aspect=assertionResult,
    )

    # Emit BatchAssertion Result! (timeseries aspect)
    emitter.emit_mcp(dataset_assertionRunEvent_mcp)


    # Create an emitter to the GMS REST API.
    emitter = DatahubRestEmitter("http://localhost:8080")

    datasetProperties = DatasetProperties(
    name="bazTable",
    )
    # Construct a MetadataChangeProposalWrapper object for dataset
    dataset_mcp = MetadataChangeProposalWrapper(
    entityUrn=datasetUrn("bazTable"),
    aspect=datasetProperties,
    )

    # Emit Dataset entity properties aspect! (Skip if dataset is already present)
    emitter.emit_mcp(dataset_mcp)

    # Construct an assertion object.
    assertion_maxVal = AssertionInfo(
    type=AssertionType.DATASET,
    datasetAssertion=DatasetAssertionInfo(
    scope=DatasetAssertionScope.DATASET_COLUMN,
    operator=AssertionStdOperator.BETWEEN,
    nativeType="expect_column_max_to_be_between",
    aggregation=AssertionStdAggregation.MAX,
    fields=[fldUrn("bazTable", "col1")],
    dataset=datasetUrn("bazTable"),
    nativeParameters={"max_value": "99", "min_value": "89"},
    parameters=AssertionStdParameters(
    minValue=AssertionStdParameter(
    type=AssertionStdParameterType.NUMBER, value="89"
    ),
    maxValue=AssertionStdParameter(
    type=AssertionStdParameterType.NUMBER, value="99"
    ),
    ),
    ),
    customProperties={"suite_name": "demo_suite"},
    )

    # Construct a MetadataChangeProposalWrapper object.
    assertion_maxVal_mcp = MetadataChangeProposalWrapper(
    entityUrn=assertionUrn(assertion_maxVal),
    aspect=assertion_maxVal,
    )

    # Emit Assertion entity info aspect!
    emitter.emit_mcp(assertion_maxVal_mcp)

    # Construct an assertion platform object.
    assertion_dataPlatformInstance = DataPlatformInstance(
    platform=builder.make_data_platform_urn("great-expectations")
    )

    # Construct a MetadataChangeProposalWrapper object for assertion platform
    assertion_dataPlatformInstance_mcp = MetadataChangeProposalWrapper(
    entityUrn=assertionUrn(assertion_maxVal),
    aspect=assertion_dataPlatformInstance,
    )
    # Emit Assertion entity platform aspect!
    emitter.emit(assertion_dataPlatformInstance_mcp)


    # Construct batch assertion result object for partition 1 batch
    assertionResult_maxVal_batch_partition1 = AssertionRunEvent(
    timestampMillis=int(time.time() * 1000),
    assertionUrn=assertionUrn(assertion_maxVal),
    asserteeUrn=datasetUrn("bazTable"),
    partitionSpec=PartitionSpec(partition=json.dumps([{"country": "IN"}])),
    runId="uuid1",
    status=AssertionRunStatus.COMPLETE,
    result=AssertionResult(
    type=AssertionResultType.SUCCESS,
    externalUrl="http://example.com/uuid1",
    actualAggValue=90,
    ),
    )

    emitAssertionResult(
    assertionResult_maxVal_batch_partition1,
    )

    # Construct batch assertion result object for partition 2 batch
    assertionResult_maxVal_batch_partition2 = AssertionRunEvent(
    timestampMillis=int(time.time() * 1000),
    assertionUrn=assertionUrn(assertion_maxVal),
    asserteeUrn=datasetUrn("bazTable"),
    partitionSpec=PartitionSpec(partition=json.dumps([{"country": "US"}])),
    runId="uuid1",
    status=AssertionRunStatus.COMPLETE,
    result=AssertionResult(
    type=AssertionResultType.FAILURE,
    externalUrl="http://example.com/uuid1",
    actualAggValue=101,
    ),
    )

    emitAssertionResult(
    assertionResult_maxVal_batch_partition2,
    )

    # Construct batch assertion result object for full table batch.
    assertionResult_maxVal_batch_fulltable = AssertionRunEvent(
    timestampMillis=int(time.time() * 1000),
    assertionUrn=assertionUrn(assertion_maxVal),
    asserteeUrn=datasetUrn("bazTable"),
    runId="uuid1",
    status=AssertionRunStatus.COMPLETE,
    result=AssertionResult(
    type=AssertionResultType.SUCCESS,
    externalUrl="http://example.com/uuid1",
    actualAggValue=93,
    ),
    )

    emitAssertionResult(
    assertionResult_maxVal_batch_fulltable,
    )

    Aspects

    assertionInfo

    Information about an assertion

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "assertionInfo"
    },
    "name": "AssertionInfo",
    "namespace": "com.linkedin.assertion",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "type": {
    "type": "enum",
    "name": "AssertionType",
    "namespace": "com.linkedin.assertion",
    "symbols": [
    "DATASET"
    ]
    },
    "name": "type",
    "doc": "Type of assertion. Assertion types can evolve to span Datasets, Flows (Pipelines), Models, Features etc."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "DatasetAssertionInfo",
    "namespace": "com.linkedin.assertion",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "dataset"
    ],
    "name": "Asserts"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "dataset",
    "doc": "The dataset targeted by this assertion."
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "DATASET_COLUMN": "This assertion applies to dataset columns",
    "DATASET_ROWS": "This assertion applies to entire rows of the dataset",
    "DATASET_SCHEMA": "This assertion applies to the schema of the dataset",
    "UNKNOWN": "The scope of the assertion is unknown"
    },
    "name": "DatasetAssertionScope",
    "namespace": "com.linkedin.assertion",
    "symbols": [
    "DATASET_COLUMN",
    "DATASET_ROWS",
    "DATASET_SCHEMA",
    "UNKNOWN"
    ]
    },
    "name": "scope",
    "doc": "Scope of the Assertion. What part of the dataset does this assertion apply to?"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "schemaField"
    ],
    "name": "Asserts"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "fields",
    "default": null,
    "doc": "One or more dataset schema fields that are targeted by this assertion"
    },
    {
    "type": [
    "null",
    {
    "type": "enum",
    "symbolDocs": {
    "COLUMNS": "Assertion is applied on all columns.",
    "COLUMN_COUNT": "Assertion is applied on number of columns.",
    "IDENTITY": "Assertion is applied on individual column value.",
    "MAX": "Assertion is applied on column std deviation",
    "MEAN": "Assertion is applied on column mean",
    "MEDIAN": "Assertion is applied on column median",
    "MIN": "Assertion is applied on column min",
    "NULL_COUNT": "Assertion is applied on number of null values in column",
    "NULL_PROPORTION": "Assertion is applied on proportion of null values in column",
    "ROW_COUNT": "Assertion is applied on number of rows.",
    "STDDEV": "Assertion is applied on column std deviation",
    "SUM": "Assertion is applied on column sum",
    "UNIQUE_COUNT": "Assertion is applied on number of distinct values in column",
    "UNIQUE_PROPOTION": "Assertion is applied on proportion of distinct values in column",
    "_NATIVE_": "Other"
    },
    "name": "AssertionStdAggregation",
    "namespace": "com.linkedin.assertion",
    "symbols": [
    "ROW_COUNT",
    "COLUMNS",
    "COLUMN_COUNT",
    "IDENTITY",
    "MEAN",
    "MEDIAN",
    "UNIQUE_COUNT",
    "UNIQUE_PROPOTION",
    "NULL_COUNT",
    "NULL_PROPORTION",
    "STDDEV",
    "MIN",
    "MAX",
    "SUM",
    "_NATIVE_"
    ],
    "doc": "The function that is applied to the aggregation input (schema, rows, column values) before evaluating an operator."
    }
    ],
    "name": "aggregation",
    "default": null,
    "doc": "Standardized assertion operator"
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BETWEEN": "Value being asserted is between min_value and max_value. Requires 'minValue' & 'maxValue' parameters.",
    "CONTAIN": "Value being asserted contains value. Requires 'value' parameter.",
    "END_WITH": "Value being asserted ends with value. Requires 'value' parameter.",
    "EQUAL_TO": "Value being asserted is equal to value. Requires 'value' parameter.",
    "GREATER_THAN": "Value being asserted is greater than some value. Requires 'value' parameter.",
    "GREATER_THAN_OR_EQUAL_TO": "Value being asserted is greater than or equal to some value. Requires 'value' parameter.",
    "IN": "Value being asserted is one of the array values. Requires 'value' parameter.",
    "LESS_THAN": "Value being asserted is less than a max value. Requires 'value' parameter.",
    "LESS_THAN_OR_EQUAL_TO": "Value being asserted is less than or equal to some value. Requires 'value' parameter.",
    "NOT_IN": "Value being asserted is not in one of the array values. Requires 'value' parameter.",
    "NOT_NULL": "Value being asserted is not null. Requires no parameters.",
    "REGEX_MATCH": "Value being asserted matches the regex value. Requires 'value' parameter.",
    "START_WITH": "Value being asserted starts with value. Requires 'value' parameter.",
    "_NATIVE_": "Other"
    },
    "name": "AssertionStdOperator",
    "namespace": "com.linkedin.assertion",
    "symbols": [
    "BETWEEN",
    "LESS_THAN",
    "LESS_THAN_OR_EQUAL_TO",
    "GREATER_THAN",
    "GREATER_THAN_OR_EQUAL_TO",
    "EQUAL_TO",
    "NOT_NULL",
    "CONTAIN",
    "END_WITH",
    "START_WITH",
    "REGEX_MATCH",
    "IN",
    "NOT_IN",
    "_NATIVE_"
    ],
    "doc": "A boolean operator that is applied on the input to an assertion, after an aggregation function has been applied."
    },
    "name": "operator",
    "doc": "Standardized assertion operator"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "AssertionStdParameters",
    "namespace": "com.linkedin.assertion",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "AssertionStdParameter",
    "namespace": "com.linkedin.assertion",
    "fields": [
    {
    "type": "string",
    "name": "value",
    "doc": "The parameter value"
    },
    {
    "type": {
    "type": "enum",
    "name": "AssertionStdParameterType",
    "namespace": "com.linkedin.assertion",
    "symbols": [
    "STRING",
    "NUMBER",
    "LIST",
    "SET",
    "UNKNOWN"
    ]
    },
    "name": "type",
    "doc": "The type of the parameter"
    }
    ],
    "doc": "Single parameter for AssertionStdOperators."
    }
    ],
    "name": "value",
    "default": null,
    "doc": "The value parameter of an assertion"
    },
    {
    "type": [
    "null",
    "com.linkedin.assertion.AssertionStdParameter"
    ],
    "name": "maxValue",
    "default": null,
    "doc": "The maxValue parameter of an assertion"
    },
    {
    "type": [
    "null",
    "com.linkedin.assertion.AssertionStdParameter"
    ],
    "name": "minValue",
    "default": null,
    "doc": "The minValue parameter of an assertion"
    }
    ],
    "doc": "Parameters for AssertionStdOperators."
    }
    ],
    "name": "parameters",
    "default": null,
    "doc": "Standard parameters required for the assertion. e.g. min_value, max_value, value, columns"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "nativeType",
    "default": null,
    "doc": "Native assertion type"
    },
    {
    "type": [
    "null",
    {
    "type": "map",
    "values": "string"
    }
    ],
    "name": "nativeParameters",
    "default": null,
    "doc": "Native parameters required for the assertion."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "logic",
    "default": null
    }
    ],
    "doc": "Attributes that are applicable to single-Dataset Assertions"
    }
    ],
    "name": "datasetAssertion",
    "default": null,
    "doc": "Dataset Assertion information when type is DATASET"
    }
    ],
    "doc": "Information about an assertion"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    assertionRunEvent (Timeseries)

    An event representing the current status of evaluating an assertion on a batch. AssertionRunEvent should be used for reporting the status of a run as an assertion evaluation progresses.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "assertionRunEvent",
    "type": "timeseries"
    },
    "name": "AssertionRunEvent",
    "namespace": "com.linkedin.assertion",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "type": "string",
    "name": "runId",
    "doc": " Native (platform-specific) identifier for this run"
    },
    {
    "TimeseriesField": {},
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "assertionUrn"
    },
    {
    "TimeseriesField": {},
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "asserteeUrn"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "BatchSpec",
    "namespace": "com.linkedin.assertion",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "nativeBatchId",
    "default": null,
    "doc": "The native identifier as specified by the system operating on the batch."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "query",
    "default": null,
    "doc": "A query that identifies a batch of data"
    },
    {
    "type": [
    "null",
    "int"
    ],
    "name": "limit",
    "default": null,
    "doc": "Any limit to the number of rows in the batch, if applied"
    }
    ],
    "doc": "A batch on which certain operations, e.g. data quality evaluation, is done."
    }
    ],
    "name": "batchSpec",
    "default": null,
    "doc": "Specification of the batch which this run is evaluating"
    },
    {
    "TimeseriesField": {},
    "type": {
    "type": "enum",
    "symbolDocs": {
    "COMPLETE": "The Assertion Run has completed"
    },
    "name": "AssertionRunStatus",
    "namespace": "com.linkedin.assertion",
    "symbols": [
    "COMPLETE"
    ]
    },
    "name": "status",
    "doc": "The status of the assertion run as per this timeseries event."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "AssertionResult",
    "namespace": "com.linkedin.assertion",
    "fields": [
    {
    "TimeseriesField": {},
    "type": {
    "type": "enum",
    "symbolDocs": {
    "FAILURE": " The Assertion Failed",
    "SUCCESS": " The Assertion Succeeded"
    },
    "name": "AssertionResultType",
    "namespace": "com.linkedin.assertion",
    "symbols": [
    "SUCCESS",
    "FAILURE"
    ]
    },
    "name": "type",
    "doc": " The final result, e.g. either SUCCESS or FAILURE."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "rowCount",
    "default": null,
    "doc": "Number of rows for evaluated batch"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "missingCount",
    "default": null,
    "doc": "Number of rows with missing value for evaluated batch"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "unexpectedCount",
    "default": null,
    "doc": "Number of rows with unexpected value for evaluated batch"
    },
    {
    "type": [
    "null",
    "float"
    ],
    "name": "actualAggValue",
    "default": null,
    "doc": "Observed aggregate value for evaluated batch"
    },
    {
    "type": [
    "null",
    {
    "type": "map",
    "values": "string"
    }
    ],
    "name": "nativeResults",
    "default": null,
    "doc": "Other results of evaluation"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where full results are available"
    }
    ],
    "doc": "The result of running an assertion"
    }
    ],
    "name": "result",
    "default": null,
    "doc": "Results of assertion, present if the status is COMPLETE"
    },
    {
    "type": [
    "null",
    {
    "type": "map",
    "values": "string"
    }
    ],
    "name": "runtimeContext",
    "default": null,
    "doc": "Runtime parameters of evaluation"
    }
    ],
    "doc": "An event representing the current status of evaluating an assertion on a batch.\nAssertionRunEvent should be used for reporting the status of a run as an assertion evaluation progresses."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • Asserts

      • Dataset via assertionInfo.datasetAssertion.dataset
      • SchemaField via assertionInfo.datasetAssertion.fields

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/chart/index.html b/docs/generated/metamodel/entities/chart/index.html index b240735975708..f645e2427a2be 100644 --- a/docs/generated/metamodel/entities/chart/index.html +++ b/docs/generated/metamodel/entities/chart/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableChartProperties"
    },
    "name": "EditableChartProperties",
    "namespace": "com.linkedin.chart",
    "fields": [
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the creation of this resource/association/sub-resource. A value of 0 for time indicates missing data."
    },
    {
    "type": "com.linkedin.common.AuditStamp",
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created. A value of 0 for time indicates missing data."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "deleted",
    "default": null,
    "doc": "An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."
    },
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Edited documentation of the chart "
    }
    ],
    "doc": "Stores editable changes made to properties. This separates changes made from\ningestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    inputFields

    Information about the fields a chart or dashboard references

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "inputFields"
    },
    "name": "InputFields",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InputField",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "schemaField"
    ],
    "name": "consumesField"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "schemaFieldUrn",
    "doc": "Urn of the schema being referenced for lineage purposes"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "SchemaField",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "Searchable": {
    "boostScore": 5.0,
    "fieldName": "fieldPaths",
    "fieldType": "TEXT",
    "queryByDefault": "true"
    },
    "type": "string",
    "name": "fieldPath",
    "doc": "Flattened name of the field. Field is computed from jsonPath field."
    },
    {
    "Deprecated": true,
    "type": [
    "null",
    "string"
    ],
    "name": "jsonPath",
    "default": null,
    "doc": "Flattened name of a field in JSON Path notation."
    },
    {
    "type": "boolean",
    "name": "nullable",
    "default": false,
    "doc": "Indicates if this field is optional or nullable"
    },
    {
    "Searchable": {
    "boostScore": 0.1,
    "fieldName": "fieldDescriptions",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description"
    },
    {
    "Searchable": {
    "boostScore": 0.2,
    "fieldName": "fieldLabels",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "label",
    "default": null,
    "doc": "Label of the field. Provides a more human-readable name for the field than field path. Some sources will\nprovide this metadata but not all sources have the concept of a label. If just one string is associated with\na field in a source, that is most likely a description."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    }
    ],
    "name": "created",
    "default": null,
    "doc": "An AuditStamp corresponding to the creation of this schema field."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "lastModified",
    "default": null,
    "doc": "An AuditStamp corresponding to the last modification of this schema field."
    },
    {
    "type": {
    "type": "record",
    "name": "SchemaFieldDataType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    {
    "type": "record",
    "name": "BooleanType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Boolean field type."
    },
    {
    "type": "record",
    "name": "FixedType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Fixed field type."
    },
    {
    "type": "record",
    "name": "StringType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "String field type."
    },
    {
    "type": "record",
    "name": "BytesType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Bytes field type."
    },
    {
    "type": "record",
    "name": "NumberType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Number data type: long, integer, short, etc.."
    },
    {
    "type": "record",
    "name": "DateType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Date field type."
    },
    {
    "type": "record",
    "name": "TimeType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Time field type. This should also be used for datetimes."
    },
    {
    "type": "record",
    "name": "EnumType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Enum field type."
    },
    {
    "type": "record",
    "name": "NullType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Null field type."
    },
    {
    "type": "record",
    "name": "MapType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "keyType",
    "default": null,
    "doc": "Key type in a map"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "valueType",
    "default": null,
    "doc": "Type of the value in a map"
    }
    ],
    "doc": "Map field type."
    },
    {
    "type": "record",
    "name": "ArrayType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "nestedType",
    "default": null,
    "doc": "List of types this array holds."
    }
    ],
    "doc": "Array field type."
    },
    {
    "type": "record",
    "name": "UnionType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "nestedTypes",
    "default": null,
    "doc": "List of types in union type."
    }
    ],
    "doc": "Union field type."
    },
    {
    "type": "record",
    "name": "RecordType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Record field type."
    }
    ],
    "name": "type",
    "doc": "Data platform specific types"
    }
    ],
    "doc": "Schema field data types"
    },
    "name": "type",
    "doc": "Platform independent field type of the field."
    },
    {
    "type": "string",
    "name": "nativeDataType",
    "doc": "The native type of the field in the dataset's platform as declared by platform schema."
    },
    {
    "type": "boolean",
    "name": "recursive",
    "default": false,
    "doc": "There are use cases when a field in type B references type A. A field in A references field of type B. In such cases, we will mark the first field as recursive."
    },
    {
    "Relationship": {
    "/tags/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "SchemaFieldTaggedWith"
    }
    },
    "Searchable": {
    "/tags/*/tag": {
    "boostScore": 0.5,
    "fieldName": "fieldTags",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }
    ],
    "name": "globalTags",
    "default": null,
    "doc": "Tags associated with the field"
    },
    {
    "Relationship": {
    "/terms/*/urn": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "SchemaFieldWithGlossaryTerm"
    }
    },
    "Searchable": {
    "/terms/*/urn": {
    "boostScore": 0.5,
    "fieldName": "fieldGlossaryTerms",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": "com.linkedin.common.AuditStamp",
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }
    ],
    "name": "glossaryTerms",
    "default": null,
    "doc": "Glossary terms associated with the field"
    },
    {
    "type": "boolean",
    "name": "isPartOfKey",
    "default": false,
    "doc": "For schema fields that are part of complex keys, set this field to true\nWe do this to easily distinguish between value and key fields"
    },
    {
    "type": [
    "null",
    "boolean"
    ],
    "name": "isPartitioningKey",
    "default": null,
    "doc": "For Datasets which are partitioned, this determines the partitioning key."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "jsonProps",
    "default": null,
    "doc": "For schema fields that have other properties that are not modeled explicitly,\nuse this field to serialize those properties into a JSON string"
    }
    ],
    "doc": "SchemaField to describe metadata related to dataset schema."
    }
    ],
    "name": "schemaField",
    "default": null,
    "doc": "Copied version of the referenced schema field object for indexing purposes"
    }
    ],
    "doc": "Information about a field a chart or dashboard references"
    }
    },
    "name": "fields",
    "doc": "List of fields being referenced"
    }
    ],
    "doc": "Information about the fields a chart or dashboard references"
    }

    embed

    Information regarding rendering an embed for an asset.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "embed"
    },
    "name": "Embed",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "renderUrl",
    "default": null,
    "doc": "An embed URL to be rendered inside of an iframe."
    }
    ],
    "doc": "Information regarding rendering an embed for an asset."
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    container

    Link from an asset to its parent container

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "container"
    },
    "name": "Container",
    "namespace": "com.linkedin.container",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "container"
    ],
    "name": "IsPartOf"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "container",
    "fieldType": "URN",
    "filterNameOverride": "Container",
    "hasValuesFieldName": "hasContainer"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "container",
    "doc": "The parent container of an asset"
    }
    ],
    "doc": "Link from an asset to its parent container"
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    chartUsageStatistics (Timeseries)

    Experimental (Subject to breaking change) -- Stats corresponding to chart's usage.

    If this aspect represents the latest snapshot of the statistics about a Chart, the eventGranularity field should be null. If this aspect represents a bucketed window of usage statistics (e.g. over a day), then the eventGranularity field should be set accordingly.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "chartUsageStatistics",
    "type": "timeseries"
    },
    "name": "ChartUsageStatistics",
    "namespace": "com.linkedin.chart",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "viewsCount",
    "default": null,
    "doc": "The total number of times chart has been viewed"
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "uniqueUserCount",
    "default": null,
    "doc": "Unique user count"
    },
    {
    "TimeseriesFieldCollection": {
    "key": "user"
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "ChartUserUsageCounts",
    "namespace": "com.linkedin.chart",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "user",
    "doc": "The unique id of the user."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "viewsCount",
    "default": null,
    "doc": "The number of times the user has viewed the chart"
    }
    ],
    "doc": "Records a single user's usage counts for a given resource"
    }
    }
    ],
    "name": "userCounts",
    "default": null,
    "doc": "Users within this bucket, with frequency counts"
    }
    ],
    "doc": "Experimental (Subject to breaking change) -- Stats corresponding to chart's usage.\n\nIf this aspect represents the latest snapshot of the statistics about a Chart, the eventGranularity field should be null.\nIf this aspect represents a bucketed window of usage statistics (e.g. over a day), then the eventGranularity field should be set accordingly."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • Consumes

      • Dataset via chartInfo.inputs
      • Dataset via chartInfo.inputEdges
    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
      • Tag via inputFields.fields.schemaField.globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • consumesField

      • SchemaField via inputFields.fields.schemaFieldUrn
    • SchemaFieldTaggedWith

      • Tag via inputFields.fields.schemaField.globalTags
    • SchemaFieldWithGlossaryTerm

      • GlossaryTerm via inputFields.fields.schemaField.glossaryTerms
    • AssociatedWith

      • Domain via domains.domains
    • IsPartOf

      • Container via container.container

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/container/index.html b/docs/generated/metamodel/entities/container/index.html index fd1aa2b98270c..cfd7773281fa7 100644 --- a/docs/generated/metamodel/entities/container/index.html +++ b/docs/generated/metamodel/entities/container/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    Container

    A container of related data assets.

    Aspects

    containerProperties

    Information about a Asset Container as received from a 3rd party source system

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "containerProperties"
    },
    "name": "ContainerProperties",
    "namespace": "com.linkedin.container",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Display name of the Asset Container"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "qualifiedName",
    "default": null,
    "doc": "Fully-qualified name of the Container"
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the Asset Container as it exists inside a source system"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "createdAt",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the event occur"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "actor",
    "default": null,
    "doc": "Optional: The actor urn involved in the event."
    }
    ],
    "doc": "A standard event timestamp"
    }
    ],
    "name": "created",
    "default": null,
    "doc": "A timestamp documenting when the asset was created in the source Data Platform (not on DataHub)"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "lastModifiedAt",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    "com.linkedin.common.TimeStamp"
    ],
    "name": "lastModified",
    "default": null,
    "doc": "A timestamp documenting when the asset was last modified in the source Data Platform (not on DataHub)"
    }
    ],
    "doc": "Information about a Asset Container as received from a 3rd party source system"
    }

    editableContainerProperties

    Editable information about an Asset Container as defined on the DataHub Platform

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableContainerProperties"
    },
    "name": "EditableContainerProperties",
    "namespace": "com.linkedin.container",
    "fields": [
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the Asset Container as its received on the DataHub Platform"
    }
    ],
    "doc": "Editable information about an Asset Container as defined on the DataHub Platform"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    subTypes

    Sub Types. Use this aspect to specialize a generic Entity e.g. Making a Dataset also be a View or also be a LookerExplore

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "subTypes"
    },
    "name": "SubTypes",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldType": "KEYWORD",
    "filterNameOverride": "Sub Type",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "typeNames",
    "doc": "The names of the specific types."
    }
    ],
    "doc": "Sub Types. Use this aspect to specialize a generic Entity\ne.g. Making a Dataset also be a View or also be a LookerExplore"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    container

    Link from an asset to its parent container

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "container"
    },
    "name": "Container",
    "namespace": "com.linkedin.container",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "container"
    ],
    "name": "IsPartOf"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "container",
    "fieldType": "URN",
    "filterNameOverride": "Container",
    "hasValuesFieldName": "hasContainer"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "container",
    "doc": "The parent container of an asset"
    }
    ],
    "doc": "Link from an asset to its parent container"
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    Relationships

    Self

    These are the relationships to itself, stored in this entity's aspects

    • IsPartOf (via container.container)

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Incoming

    These are the relationships stored in other entity's aspects

    • IsPartOf

      • Dataset via container.container
      • Chart via container.container
      • Dashboard via container.container

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/corpgroup/index.html b/docs/generated/metamodel/entities/corpgroup/index.html index 0a9df52c44b64..2edcaea2a43d9 100644 --- a/docs/generated/metamodel/entities/corpgroup/index.html +++ b/docs/generated/metamodel/entities/corpgroup/index.html @@ -8,14 +8,14 @@ - +

    CorpGroup

    CorpGroup represents an identity of a group of users in the enterprise.

    Aspects

    corpGroupKey

    Key for a CorpGroup

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "corpGroupKey"
    },
    "name": "CorpGroupKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM",
    "queryByDefault": true
    },
    "type": "string",
    "name": "name",
    "doc": "The URL-encoded name of the AD/LDAP group. Serves as a globally unique identifier within DataHub."
    }
    ],
    "doc": "Key for a CorpGroup"
    }

    corpGroupInfo

    Information about a Corp Group ingested from a third party source

    Schema
    {
    "type": "record",
    "Aspect": {
    "EntityUrns": [
    "com.linkedin.common.CorpGroupUrn"
    ],
    "name": "corpGroupInfo"
    },
    "name": "CorpGroupInfo",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "TEXT_PARTIAL",
    "queryByDefault": true
    },
    "type": [
    "null",
    "string"
    ],
    "name": "displayName",
    "default": null,
    "doc": "The name of the group."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "email",
    "default": null,
    "doc": "email of this group"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "corpuser"
    ],
    "name": "OwnedBy"
    }
    },
    "deprecated": true,
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "admins",
    "doc": "owners of this group\nDeprecated! Replaced by Ownership aspect."
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "corpuser"
    ],
    "name": "IsPartOf"
    }
    },
    "deprecated": true,
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "members",
    "doc": "List of ldap urn in this group.\nDeprecated! Replaced by GroupMembership aspect."
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "corpGroup"
    ],
    "name": "IsPartOf"
    }
    },
    "deprecated": true,
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "groups",
    "doc": "List of groups in this group.\nDeprecated! This field is unused."
    },
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "A description of the group."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "slack",
    "default": null,
    "doc": "Slack channel for the group"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "createdTime",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    }
    ],
    "name": "created",
    "default": null,
    "doc": "Created Audit stamp"
    }
    ],
    "doc": "Information about a Corp Group ingested from a third party source"
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    corpGroupEditableInfo

    Group information that can be edited from UI

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "corpGroupEditableInfo"
    },
    "name": "CorpGroupEditableInfo",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "A description of the group"
    },
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "pictureLink",
    "default": "https://raw.githubusercontent.com/datahub-project/datahub/master/datahub-web-react/src/images/default_avatar.png",
    "doc": "A URL which points to a picture which user wants to set as the photo for the group"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "slack",
    "default": null,
    "doc": "Slack channel for the group"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "email",
    "default": null,
    "doc": "Email address to contact the group"
    }
    ],
    "doc": "Group information that can be edited from UI"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    origin

    Carries information about where an entity originated from.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "origin"
    },
    "name": "Origin",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "EXTERNAL": "The entity is external to DataHub.",
    "NATIVE": "The entity is native to DataHub."
    },
    "name": "OriginType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "NATIVE",
    "EXTERNAL"
    ],
    "doc": "Enum to define where an entity originated from."
    },
    "name": "type",
    "doc": "Where an entity originated from. Either NATIVE or EXTERNAL."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "externalType",
    "default": null,
    "doc": "Only populated if type is EXTERNAL. The externalType of the entity, such as the name of the identity provider."
    }
    ],
    "doc": "Carries information about where an entity originated from."
    }

    Relationships

    Self

    These are the relationships to itself, stored in this entity's aspects

    • IsPartOf (via corpGroupInfo.groups)
    • OwnedBy (via ownership.owners.owner)

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via corpGroupInfo.admins
      • Corpuser via ownership.owners.owner
    • IsPartOf

      • Corpuser via corpGroupInfo.members
    • TaggedWith

      • Tag via globalTags.tags
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn

    Incoming

    These are the relationships stored in other entity's aspects

    • OwnedBy

      • Dataset via ownership.owners.owner
      • DataJob via ownership.owners.owner
      • DataFlow via ownership.owners.owner
      • DataProcess via ownership.owners.owner
      • Chart via ownership.owners.owner
      • Dashboard via ownership.owners.owner
      • Notebook via ownership.owners.owner
    • IsMemberOfGroup

      • Corpuser via groupMembership.groups
    • IsMemberOfNativeGroup

      • Corpuser via nativeGroupMembership.nativeGroups

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/corpuser/index.html b/docs/generated/metamodel/entities/corpuser/index.html index 23ddeb837b889..ddfa75e18cce4 100644 --- a/docs/generated/metamodel/entities/corpuser/index.html +++ b/docs/generated/metamodel/entities/corpuser/index.html @@ -8,14 +8,14 @@ - +

    Corpuser

    CorpUser represents an identity of a person (or an account) in the enterprise.

    Aspects

    corpUserKey

    Key for a CorpUser

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "corpUserKey"
    },
    "name": "CorpUserKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "boostScore": 2.0,
    "enableAutocomplete": true,
    "fieldName": "ldap",
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "username",
    "doc": "The name of the AD/LDAP user."
    }
    ],
    "doc": "Key for a CorpUser"
    }

    corpUserInfo

    Linkedin corp user information

    Schema
    {
    "type": "record",
    "Aspect": {
    "EntityUrns": [
    "com.linkedin.common.CorpuserUrn"
    ],
    "name": "corpUserInfo"
    },
    "name": "CorpUserInfo",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 2.0
    }
    },
    "type": "boolean",
    "name": "active",
    "doc": "Deprecated! Use CorpUserStatus instead. Whether the corpUser is active, ref: https://iwww.corp.linkedin.com/wiki/cf/display/GTSD/Accessing+Active+Directory+via+LDAP+tools"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM",
    "queryByDefault": true
    },
    "type": [
    "null",
    "string"
    ],
    "name": "displayName",
    "default": null,
    "doc": "displayName of this user , e.g. Hang Zhang(DataHQ)"
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD",
    "queryByDefault": true
    },
    "type": [
    "null",
    "string"
    ],
    "name": "email",
    "default": null,
    "doc": "email address of this user"
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD",
    "queryByDefault": true
    },
    "type": [
    "null",
    "string"
    ],
    "name": "title",
    "default": null,
    "doc": "title of this user"
    },
    {
    "Relationship": {
    "entityTypes": [
    "corpuser"
    ],
    "name": "ReportsTo"
    },
    "Searchable": {
    "fieldName": "managerLdap",
    "fieldType": "URN",
    "queryByDefault": true
    },
    "java": {
    "class": "com.linkedin.common.urn.CorpuserUrn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "managerUrn",
    "default": null,
    "doc": "direct manager of this user"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "departmentId",
    "default": null,
    "doc": "department id this user belong to"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "departmentName",
    "default": null,
    "doc": "department name this user belong to"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "firstName",
    "default": null,
    "doc": "first name of this user"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "lastName",
    "default": null,
    "doc": "last name of this user"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM",
    "queryByDefault": true
    },
    "type": [
    "null",
    "string"
    ],
    "name": "fullName",
    "default": null,
    "doc": "Common name of this user, format is firstName + lastName (split by a whitespace)"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "countryCode",
    "default": null,
    "doc": "two uppercase letters country code. e.g. US"
    }
    ],
    "doc": "Linkedin corp user information"
    }

    corpUserEditableInfo

    Linkedin corp user information that can be edited from UI

    Schema
    {
    "type": "record",
    "Aspect": {
    "EntityUrns": [
    "com.linkedin.common.CorpuserUrn"
    ],
    "name": "corpUserEditableInfo"
    },
    "name": "CorpUserEditableInfo",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "aboutMe",
    "default": null,
    "doc": "About me section of the user"
    },
    {
    "Searchable": {
    "/*": {
    "fieldType": "TEXT"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "teams",
    "default": [],
    "doc": "Teams that the user belongs to e.g. Metadata"
    },
    {
    "Searchable": {
    "/*": {
    "fieldType": "TEXT"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "skills",
    "default": [],
    "doc": "Skills that the user possesses e.g. Machine Learning"
    },
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "pictureLink",
    "default": "https://raw.githubusercontent.com/datahub-project/datahub/master/datahub-web-react/src/images/default_avatar.png",
    "doc": "A URL which points to a picture which user wants to set as a profile photo"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "fieldType": "WORD_GRAM",
    "queryByDefault": true
    },
    "type": [
    "null",
    "string"
    ],
    "name": "displayName",
    "default": null,
    "doc": "DataHub-native display name"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "title",
    "default": null,
    "doc": "DataHub-native Title, e.g. 'Software Engineer'"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "slack",
    "default": null,
    "doc": "Slack handle for the user"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "phone",
    "default": null,
    "doc": "Phone number to contact the user"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "email",
    "default": null,
    "doc": "Email address to contact the user"
    }
    ],
    "doc": "Linkedin corp user information that can be edited from UI"
    }

    corpUserStatus

    The status of the user, e.g. provisioned, active, suspended, etc.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "corpUserStatus"
    },
    "name": "CorpUserStatus",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "type": "string",
    "name": "status",
    "doc": "Status of the user, e.g. PROVISIONED / ACTIVE / SUSPENDED"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "doc": "Audit stamp containing who last modified the status and when."
    }
    ],
    "doc": "The status of the user, e.g. provisioned, active, suspended, etc."
    }

    groupMembership

    Carries information about the CorpGroups a user is in.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "groupMembership"
    },
    "name": "GroupMembership",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "corpGroup"
    ],
    "name": "IsMemberOfGroup"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "groups"
    }
    ],
    "doc": "Carries information about the CorpGroups a user is in."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    corpUserCredentials

    Corp user credentials

    Schema
    {
    "type": "record",
    "Aspect": {
    "EntityUrns": [
    "com.linkedin.common.CorpuserUrn"
    ],
    "name": "corpUserCredentials"
    },
    "name": "CorpUserCredentials",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "type": "string",
    "name": "salt",
    "doc": "Salt used to hash password"
    },
    {
    "type": "string",
    "name": "hashedPassword",
    "doc": "Hashed password generated by concatenating salt and password, then hashing"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "passwordResetToken",
    "default": null,
    "doc": "Optional token needed to reset a user's password. Can only be set by the admin."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "passwordResetTokenExpirationTimeMillis",
    "default": null,
    "doc": "When the password reset token expires."
    }
    ],
    "doc": "Corp user credentials"
    }

    nativeGroupMembership

    Carries information about the native CorpGroups a user is in.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "nativeGroupMembership"
    },
    "name": "NativeGroupMembership",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "corpGroup"
    ],
    "name": "IsMemberOfNativeGroup"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "nativeGroups"
    }
    ],
    "doc": "Carries information about the native CorpGroups a user is in."
    }

    corpUserSettings

    Settings that a user can customize through the datahub ui

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "corpUserSettings"
    },
    "name": "CorpUserSettings",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "type": {
    "type": "record",
    "name": "CorpUserAppearanceSettings",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "type": [
    "null",
    "boolean"
    ],
    "name": "showSimplifiedHomepage",
    "default": null,
    "doc": "Flag whether the user should see a homepage with only datasets, charts and dashboards. Intended for users\nwho have less operational use cases for the datahub tool."
    }
    ],
    "doc": "Settings for a user around the appearance of their DataHub UI"
    },
    "name": "appearance",
    "doc": "Settings for a user around the appearance of their DataHub U"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "CorpUserViewsSettings",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "defaultView",
    "default": null,
    "doc": "The default View which is selected for the user.\nIf none is chosen, then this value will be left blank."
    }
    ],
    "doc": "Settings related to the 'Views' feature."
    }
    ],
    "name": "views",
    "default": null,
    "doc": "User preferences for the Views feature."
    }
    ],
    "doc": "Settings that a user can customize through the datahub ui"
    }

    origin

    Carries information about where an entity originated from.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "origin"
    },
    "name": "Origin",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "EXTERNAL": "The entity is external to DataHub.",
    "NATIVE": "The entity is native to DataHub."
    },
    "name": "OriginType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "NATIVE",
    "EXTERNAL"
    ],
    "doc": "Enum to define where an entity originated from."
    },
    "name": "type",
    "doc": "Where an entity originated from. Either NATIVE or EXTERNAL."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "externalType",
    "default": null,
    "doc": "Only populated if type is EXTERNAL. The externalType of the entity, such as the name of the identity provider."
    }
    ],
    "doc": "Carries information about where an entity originated from."
    }

    roleMembership

    Carries information about which roles a user is assigned to.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "roleMembership"
    },
    "name": "RoleMembership",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataHubRole"
    ],
    "name": "IsMemberOfRole"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "roles"
    }
    ],
    "doc": "Carries information about which roles a user is assigned to."
    }

    Relationships

    Self

    These are the relationships to itself, stored in this entity's aspects

    • ReportsTo (via corpUserInfo.managerUrn)

    Outgoing

    These are the relationships stored in this entity's aspects

    • IsMemberOfGroup

      • CorpGroup via groupMembership.groups
    • TaggedWith

      • Tag via globalTags.tags
    • IsMemberOfNativeGroup

      • CorpGroup via nativeGroupMembership.nativeGroups
    • IsMemberOfRole

      • DataHubRole via roleMembership.roles

    Incoming

    These are the relationships stored in other entity's aspects

    • Has

      • Role via actors.users.user
    • OwnedBy

      • Dataset via ownership.owners.owner
      • DataJob via ownership.owners.owner
      • DataFlow via ownership.owners.owner
      • DataProcess via ownership.owners.owner
      • Chart via ownership.owners.owner
      • Dashboard via ownership.owners.owner
      • Notebook via ownership.owners.owner

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/dashboard/index.html b/docs/generated/metamodel/entities/dashboard/index.html index 5159577cc8081..b07061b8755ae 100644 --- a/docs/generated/metamodel/entities/dashboard/index.html +++ b/docs/generated/metamodel/entities/dashboard/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    container

    Link from an asset to its parent container

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "container"
    },
    "name": "Container",
    "namespace": "com.linkedin.container",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "container"
    ],
    "name": "IsPartOf"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "container",
    "fieldType": "URN",
    "filterNameOverride": "Container",
    "hasValuesFieldName": "hasContainer"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "container",
    "doc": "The parent container of an asset"
    }
    ],
    "doc": "Link from an asset to its parent container"
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    inputFields

    Information about the fields a chart or dashboard references

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "inputFields"
    },
    "name": "InputFields",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InputField",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "schemaField"
    ],
    "name": "consumesField"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "schemaFieldUrn",
    "doc": "Urn of the schema being referenced for lineage purposes"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "SchemaField",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "Searchable": {
    "boostScore": 5.0,
    "fieldName": "fieldPaths",
    "fieldType": "TEXT",
    "queryByDefault": "true"
    },
    "type": "string",
    "name": "fieldPath",
    "doc": "Flattened name of the field. Field is computed from jsonPath field."
    },
    {
    "Deprecated": true,
    "type": [
    "null",
    "string"
    ],
    "name": "jsonPath",
    "default": null,
    "doc": "Flattened name of a field in JSON Path notation."
    },
    {
    "type": "boolean",
    "name": "nullable",
    "default": false,
    "doc": "Indicates if this field is optional or nullable"
    },
    {
    "Searchable": {
    "boostScore": 0.1,
    "fieldName": "fieldDescriptions",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description"
    },
    {
    "Searchable": {
    "boostScore": 0.2,
    "fieldName": "fieldLabels",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "label",
    "default": null,
    "doc": "Label of the field. Provides a more human-readable name for the field than field path. Some sources will\nprovide this metadata but not all sources have the concept of a label. If just one string is associated with\na field in a source, that is most likely a description."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    }
    ],
    "name": "created",
    "default": null,
    "doc": "An AuditStamp corresponding to the creation of this schema field."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "lastModified",
    "default": null,
    "doc": "An AuditStamp corresponding to the last modification of this schema field."
    },
    {
    "type": {
    "type": "record",
    "name": "SchemaFieldDataType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    {
    "type": "record",
    "name": "BooleanType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Boolean field type."
    },
    {
    "type": "record",
    "name": "FixedType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Fixed field type."
    },
    {
    "type": "record",
    "name": "StringType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "String field type."
    },
    {
    "type": "record",
    "name": "BytesType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Bytes field type."
    },
    {
    "type": "record",
    "name": "NumberType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Number data type: long, integer, short, etc.."
    },
    {
    "type": "record",
    "name": "DateType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Date field type."
    },
    {
    "type": "record",
    "name": "TimeType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Time field type. This should also be used for datetimes."
    },
    {
    "type": "record",
    "name": "EnumType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Enum field type."
    },
    {
    "type": "record",
    "name": "NullType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Null field type."
    },
    {
    "type": "record",
    "name": "MapType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "keyType",
    "default": null,
    "doc": "Key type in a map"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "valueType",
    "default": null,
    "doc": "Type of the value in a map"
    }
    ],
    "doc": "Map field type."
    },
    {
    "type": "record",
    "name": "ArrayType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "nestedType",
    "default": null,
    "doc": "List of types this array holds."
    }
    ],
    "doc": "Array field type."
    },
    {
    "type": "record",
    "name": "UnionType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "nestedTypes",
    "default": null,
    "doc": "List of types in union type."
    }
    ],
    "doc": "Union field type."
    },
    {
    "type": "record",
    "name": "RecordType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Record field type."
    }
    ],
    "name": "type",
    "doc": "Data platform specific types"
    }
    ],
    "doc": "Schema field data types"
    },
    "name": "type",
    "doc": "Platform independent field type of the field."
    },
    {
    "type": "string",
    "name": "nativeDataType",
    "doc": "The native type of the field in the dataset's platform as declared by platform schema."
    },
    {
    "type": "boolean",
    "name": "recursive",
    "default": false,
    "doc": "There are use cases when a field in type B references type A. A field in A references field of type B. In such cases, we will mark the first field as recursive."
    },
    {
    "Relationship": {
    "/tags/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "SchemaFieldTaggedWith"
    }
    },
    "Searchable": {
    "/tags/*/tag": {
    "boostScore": 0.5,
    "fieldName": "fieldTags",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }
    ],
    "name": "globalTags",
    "default": null,
    "doc": "Tags associated with the field"
    },
    {
    "Relationship": {
    "/terms/*/urn": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "SchemaFieldWithGlossaryTerm"
    }
    },
    "Searchable": {
    "/terms/*/urn": {
    "boostScore": 0.5,
    "fieldName": "fieldGlossaryTerms",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": "com.linkedin.common.AuditStamp",
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }
    ],
    "name": "glossaryTerms",
    "default": null,
    "doc": "Glossary terms associated with the field"
    },
    {
    "type": "boolean",
    "name": "isPartOfKey",
    "default": false,
    "doc": "For schema fields that are part of complex keys, set this field to true\nWe do this to easily distinguish between value and key fields"
    },
    {
    "type": [
    "null",
    "boolean"
    ],
    "name": "isPartitioningKey",
    "default": null,
    "doc": "For Datasets which are partitioned, this determines the partitioning key."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "jsonProps",
    "default": null,
    "doc": "For schema fields that have other properties that are not modeled explicitly,\nuse this field to serialize those properties into a JSON string"
    }
    ],
    "doc": "SchemaField to describe metadata related to dataset schema."
    }
    ],
    "name": "schemaField",
    "default": null,
    "doc": "Copied version of the referenced schema field object for indexing purposes"
    }
    ],
    "doc": "Information about a field a chart or dashboard references"
    }
    },
    "name": "fields",
    "doc": "List of fields being referenced"
    }
    ],
    "doc": "Information about the fields a chart or dashboard references"
    }

    subTypes

    Sub Types. Use this aspect to specialize a generic Entity e.g. Making a Dataset also be a View or also be a LookerExplore

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "subTypes"
    },
    "name": "SubTypes",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldType": "KEYWORD",
    "filterNameOverride": "Sub Type",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "typeNames",
    "doc": "The names of the specific types."
    }
    ],
    "doc": "Sub Types. Use this aspect to specialize a generic Entity\ne.g. Making a Dataset also be a View or also be a LookerExplore"
    }

    embed

    Information regarding rendering an embed for an asset.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "embed"
    },
    "name": "Embed",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "renderUrl",
    "default": null,
    "doc": "An embed URL to be rendered inside of an iframe."
    }
    ],
    "doc": "Information regarding rendering an embed for an asset."
    }

    dashboardUsageStatistics (Timeseries)

    Experimental (Subject to breaking change) -- Stats corresponding to dashboard's usage.

    If this aspect represents the latest snapshot of the statistics about a Dashboard, the eventGranularity field should be null. If this aspect represents a bucketed window of usage statistics (e.g. over a day), then the eventGranularity field should be set accordingly.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dashboardUsageStatistics",
    "type": "timeseries"
    },
    "name": "DashboardUsageStatistics",
    "namespace": "com.linkedin.dashboard",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "viewsCount",
    "default": null,
    "doc": "The total number of times dashboard has been viewed"
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "executionsCount",
    "default": null,
    "doc": "The total number of dashboard executions (refreshes / syncs) "
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "uniqueUserCount",
    "default": null,
    "doc": "Unique user count"
    },
    {
    "TimeseriesFieldCollection": {
    "key": "user"
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "DashboardUserUsageCounts",
    "namespace": "com.linkedin.dashboard",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "user",
    "doc": "The unique id of the user."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "viewsCount",
    "default": null,
    "doc": "The number of times the user has viewed the dashboard"
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "executionsCount",
    "default": null,
    "doc": "The number of times the user has executed (refreshed) the dashboard"
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "usageCount",
    "default": null,
    "doc": "Normalized numeric metric representing user's dashboard usage -- the number of times the user executed or viewed the dashboard. "
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "string"
    ],
    "name": "userEmail",
    "default": null,
    "doc": "If user_email is set, we attempt to resolve the user's urn upon ingest"
    }
    ],
    "doc": "Records a single user's usage counts for a given resource"
    }
    }
    ],
    "name": "userCounts",
    "default": null,
    "doc": "Users within this bucket, with frequency counts"
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "favoritesCount",
    "default": null,
    "doc": "The total number of times that the dashboard has been favorited "
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "long"
    ],
    "name": "lastViewedAt",
    "default": null,
    "doc": "Last viewed at\n\nThis should not be set in cases where statistics are windowed. "
    }
    ],
    "doc": "Experimental (Subject to breaking change) -- Stats corresponding to dashboard's usage.\n\nIf this aspect represents the latest snapshot of the statistics about a Dashboard, the eventGranularity field should be null. \nIf this aspect represents a bucketed window of usage statistics (e.g. over a day), then the eventGranularity field should be set accordingly. "
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • Contains

      • Chart via dashboardInfo.charts
      • Chart via dashboardInfo.chartEdges
    • Consumes

      • Dataset via dashboardInfo.datasets
      • Dataset via dashboardInfo.datasetEdges
    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
      • Tag via inputFields.fields.schemaField.globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains
    • IsPartOf

      • Container via container.container
    • consumesField

      • SchemaField via inputFields.fields.schemaFieldUrn
    • SchemaFieldTaggedWith

      • Tag via inputFields.fields.schemaField.globalTags
    • SchemaFieldWithGlossaryTerm

      • GlossaryTerm via inputFields.fields.schemaField.glossaryTerms

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/dataflow/index.html b/docs/generated/metamodel/entities/dataflow/index.html index 7eb31645b2be2..07177c83d8661 100644 --- a/docs/generated/metamodel/entities/dataflow/index.html +++ b/docs/generated/metamodel/entities/dataflow/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    DataFlow

    Aspects

    dataFlowKey

    Key for a Data Flow

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataFlowKey"
    },
    "name": "DataFlowKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "orchestrator",
    "doc": "Workflow manager like azkaban, airflow which orchestrates the flow"
    },
    {
    "Searchable": {
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "flowId",
    "doc": "Unique Identifier of the data flow"
    },
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "cluster",
    "doc": "Cluster where the flow is executed"
    }
    ],
    "doc": "Key for a Data Flow"
    }

    dataFlowInfo

    Information about a Data processing flow

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataFlowInfo"
    },
    "name": "DataFlowInfo",
    "namespace": "com.linkedin.datajob",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Flow name"
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Flow description"
    },
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL",
    "queryByDefault": false
    },
    "type": [
    "null",
    "string"
    ],
    "name": "project",
    "default": null,
    "doc": "Optional project/namespace associated with the flow"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "createdAt",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the event occur"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "actor",
    "default": null,
    "doc": "Optional: The actor urn involved in the event."
    }
    ],
    "doc": "A standard event timestamp"
    }
    ],
    "name": "created",
    "default": null,
    "doc": "A timestamp documenting when the asset was created in the source Data Platform (not on DataHub)"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "lastModifiedAt",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    "com.linkedin.common.TimeStamp"
    ],
    "name": "lastModified",
    "default": null,
    "doc": "A timestamp documenting when the asset was last modified in the source Data Platform (not on DataHub)"
    }
    ],
    "doc": "Information about a Data processing flow"
    }

    editableDataFlowProperties

    Stores editable changes made to properties. This separates changes made from ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableDataFlowProperties"
    },
    "name": "EditableDataFlowProperties",
    "namespace": "com.linkedin.datajob",
    "fields": [
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the creation of this resource/association/sub-resource. A value of 0 for time indicates missing data."
    },
    {
    "type": "com.linkedin.common.AuditStamp",
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created. A value of 0 for time indicates missing data."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "deleted",
    "default": null,
    "doc": "An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."
    },
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Edited documentation of the data flow"
    }
    ],
    "doc": "Stores editable changes made to properties. This separates changes made from\ningestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    versionInfo

    Information about a Data processing job

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "versionInfo"
    },
    "name": "VersionInfo",
    "namespace": "com.linkedin.datajob",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "type": "string",
    "name": "version",
    "doc": "The version which can indentify a job version like a commit hash or md5 hash"
    },
    {
    "type": "string",
    "name": "versionType",
    "doc": "The type of the version like git hash or md5 hash"
    }
    ],
    "doc": "Information about a Data processing job"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Incoming

    These are the relationships stored in other entity's aspects

    • IsPartOf

      • DataJob via dataJobKey.flow

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubaccesstoken/index.html b/docs/generated/metamodel/entities/datahubaccesstoken/index.html index 3cc8b6d5d2eb1..c5e1ec4935e1a 100644 --- a/docs/generated/metamodel/entities/datahubaccesstoken/index.html +++ b/docs/generated/metamodel/entities/datahubaccesstoken/index.html @@ -8,13 +8,13 @@ - +

    DataHubAccessToken

    Aspects

    dataHubAccessTokenInfo

    Information about a DataHub Access Token

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubAccessTokenInfo"
    },
    "name": "DataHubAccessTokenInfo",
    "namespace": "com.linkedin.access.token",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "name",
    "doc": "User defined name for the access token if defined."
    },
    {
    "Searchable": {
    "fieldType": "URN"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actorUrn",
    "doc": "Urn of the actor to which this access token belongs to."
    },
    {
    "Searchable": {
    "fieldType": "URN"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "ownerUrn",
    "doc": "Urn of the actor which created this access token."
    },
    {
    "Searchable": {
    "fieldType": "COUNT",
    "queryByDefault": false
    },
    "type": "long",
    "name": "createdAt",
    "doc": "When the token was created."
    },
    {
    "Searchable": {
    "fieldType": "COUNT",
    "queryByDefault": false
    },
    "type": [
    "null",
    "long"
    ],
    "name": "expiresAt",
    "default": null,
    "doc": "When the token expires."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the token if defined."
    }
    ],
    "doc": "Information about a DataHub Access Token"
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubexecutionrequest/index.html b/docs/generated/metamodel/entities/datahubexecutionrequest/index.html index 0f3db7ecd6053..68e49a56066b1 100644 --- a/docs/generated/metamodel/entities/datahubexecutionrequest/index.html +++ b/docs/generated/metamodel/entities/datahubexecutionrequest/index.html @@ -8,14 +8,14 @@ - +

    DataHubExecutionRequest

    Aspects

    dataHubExecutionRequestInput

    An request to execution some remote logic or action. TODO: Determine who is responsible for emitting execution request success or failure. Executor?

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubExecutionRequestInput"
    },
    "name": "ExecutionRequestInput",
    "namespace": "com.linkedin.execution",
    "fields": [
    {
    "type": "string",
    "name": "task",
    "doc": "The name of the task to execute, for example RUN_INGEST"
    },
    {
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "args",
    "doc": "Arguments provided to the task"
    },
    {
    "type": "string",
    "name": "executorId",
    "doc": "Advanced: specify a specific executor to route the request to. If none is provided, a \"default\" executor is used."
    },
    {
    "type": {
    "type": "record",
    "name": "ExecutionRequestSource",
    "namespace": "com.linkedin.execution",
    "fields": [
    {
    "type": "string",
    "name": "type",
    "doc": "The type of the execution request source, e.g. INGESTION_SOURCE"
    },
    {
    "Relationship": {
    "entityTypes": [
    "dataHubIngestionSource"
    ],
    "name": "ingestionSource"
    },
    "Searchable": {
    "fieldName": "ingestionSource",
    "fieldType": "KEYWORD",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "ingestionSource",
    "default": null,
    "doc": "The urn of the ingestion source associated with the ingestion request. Present if type is INGESTION_SOURCE"
    }
    ]
    },
    "name": "source",
    "doc": "Source which created the execution request"
    },
    {
    "Searchable": {
    "fieldName": "requestTimeMs",
    "fieldType": "COUNT",
    "queryByDefault": false
    },
    "type": "long",
    "name": "requestedAt",
    "doc": "Time at which the execution request input was created"
    }
    ],
    "doc": "An request to execution some remote logic or action.\nTODO: Determine who is responsible for emitting execution request success or failure. Executor?"
    }

    dataHubExecutionRequestSignal

    An signal sent to a running execution request

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubExecutionRequestSignal"
    },
    "name": "ExecutionRequestSignal",
    "namespace": "com.linkedin.execution",
    "fields": [
    {
    "type": "string",
    "name": "signal",
    "doc": "The signal to issue, e.g. KILL"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "executorId",
    "default": null,
    "doc": "Advanced: specify a specific executor to route the request to. If none is provided, a \"default\" executor is used."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createdAt",
    "doc": "Audit Stamp"
    }
    ],
    "doc": "An signal sent to a running execution request"
    }

    dataHubExecutionRequestResult

    The result of an execution request

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubExecutionRequestResult"
    },
    "name": "ExecutionRequestResult",
    "namespace": "com.linkedin.execution",
    "fields": [
    {
    "type": "string",
    "name": "status",
    "doc": "The status of the execution request"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "report",
    "default": null,
    "doc": "The pretty-printed execution report."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "StructuredExecutionReport",
    "namespace": "com.linkedin.execution",
    "fields": [
    {
    "type": "string",
    "name": "type",
    "doc": "The type of the structured report. (e.g. INGESTION_REPORT, TEST_CONNECTION_REPORT, etc.)"
    },
    {
    "type": "string",
    "name": "serializedValue",
    "doc": "The serialized value of the structured report"
    },
    {
    "type": "string",
    "name": "contentType",
    "doc": "The content-type of the serialized value (e.g. application/json, application/json;gzip etc.)"
    }
    ],
    "doc": "A flexible carrier for structured results of an execution request.\nThe goal is to allow for free flow of structured responses from execution tasks to the orchestrator or observer.\nThe full spectrum of different execution report types is not intended to be modeled by this object."
    }
    ],
    "name": "structuredReport",
    "default": null,
    "doc": "A structured report if available."
    },
    {
    "Searchable": {
    "fieldName": "startTimeMs",
    "fieldType": "COUNT",
    "queryByDefault": false
    },
    "type": [
    "null",
    "long"
    ],
    "name": "startTimeMs",
    "default": null,
    "doc": "Time at which the request was created"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "durationMs",
    "default": null,
    "doc": "Duration in milliseconds"
    }
    ],
    "doc": "The result of an execution request"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • ingestionSource

      • DataHubIngestionSource via dataHubExecutionRequestInput.source.ingestionSource

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubingestionsource/index.html b/docs/generated/metamodel/entities/datahubingestionsource/index.html index 5431c7487b284..47fb203c6ac83 100644 --- a/docs/generated/metamodel/entities/datahubingestionsource/index.html +++ b/docs/generated/metamodel/entities/datahubingestionsource/index.html @@ -8,13 +8,13 @@ - +

    DataHubIngestionSource

    Aspects

    dataHubIngestionSourceInfo

    Info about a DataHub ingestion source

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubIngestionSourceInfo"
    },
    "name": "DataHubIngestionSourceInfo",
    "namespace": "com.linkedin.ingestion",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "name",
    "doc": "The display name of the ingestion source"
    },
    {
    "type": "string",
    "name": "type",
    "doc": "The type of the source itself, e.g. mysql, bigquery, bigquery-usage. Should match the recipe."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "platform",
    "default": null,
    "doc": "Data Platform URN associated with the source"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "DataHubIngestionSourceSchedule",
    "namespace": "com.linkedin.ingestion",
    "fields": [
    {
    "type": "string",
    "name": "interval",
    "doc": "A cron-formatted execution interval, as a cron string, e.g. * * * * *"
    },
    {
    "type": "string",
    "name": "timezone",
    "doc": "Timezone in which the cron interval applies, e.g. America/Los Angeles"
    }
    ],
    "doc": "The schedule associated with an ingestion source."
    }
    ],
    "name": "schedule",
    "default": null,
    "doc": "The schedule on which the ingestion source is executed"
    },
    {
    "type": {
    "type": "record",
    "name": "DataHubIngestionSourceConfig",
    "namespace": "com.linkedin.ingestion",
    "fields": [
    {
    "type": "string",
    "name": "recipe",
    "doc": "The JSON recipe to use for ingestion"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "version",
    "default": null,
    "doc": "The PyPI version of the datahub CLI to use when executing a recipe"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "executorId",
    "default": null,
    "doc": "The id of the executor to use to execute the ingestion run"
    },
    {
    "type": [
    "null",
    "boolean"
    ],
    "name": "debugMode",
    "default": null,
    "doc": "Whether or not to run this ingestion source in debug mode"
    }
    ]
    },
    "name": "config",
    "doc": "Parameters associated with the Ingestion Source"
    }
    ],
    "doc": "Info about a DataHub ingestion source"
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubpolicy/index.html b/docs/generated/metamodel/entities/datahubpolicy/index.html index 89707cfeb819c..ee9d47ce0b16c 100644 --- a/docs/generated/metamodel/entities/datahubpolicy/index.html +++ b/docs/generated/metamodel/entities/datahubpolicy/index.html @@ -8,13 +8,13 @@ - +

    DataHubPolicy

    DataHub Policies represent access policies granted to users or groups on metadata operations like edit, view etc.

    Aspects

    dataHubPolicyKey

    Key for a DataHub Policy

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubPolicyKey"
    },
    "name": "DataHubPolicyKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "A unique id for the DataHub access policy record. Generated on the server side at policy creation time."
    }
    ],
    "doc": "Key for a DataHub Policy"
    }

    dataHubPolicyInfo

    Information about a DataHub (UI) access policy.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubPolicyInfo"
    },
    "name": "DataHubPolicyInfo",
    "namespace": "com.linkedin.policy",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "displayName",
    "doc": "Display name of the Policy"
    },
    {
    "Searchable": {
    "fieldType": "TEXT"
    },
    "type": "string",
    "name": "description",
    "doc": "Description of the Policy"
    },
    {
    "type": "string",
    "name": "type",
    "doc": "The type of policy"
    },
    {
    "type": "string",
    "name": "state",
    "doc": "The state of policy, ACTIVE or INACTIVE"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "DataHubResourceFilter",
    "namespace": "com.linkedin.policy",
    "fields": [
    {
    "deprecated": true,
    "type": [
    "null",
    "string"
    ],
    "name": "type",
    "default": null,
    "doc": "The type of resource that the policy applies to. This will most often be a data asset entity name, for\nexample 'dataset'. It is not strictly required because in the future we will want to support filtering a resource\nby domain, as well."
    },
    {
    "deprecated": true,
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "resources",
    "default": null,
    "doc": "A specific set of resources to apply the policy to, e.g. asset urns"
    },
    {
    "deprecated": true,
    "type": "boolean",
    "name": "allResources",
    "default": false,
    "doc": "Whether the policy should be applied to all assets matching the filter."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "PolicyMatchFilter",
    "namespace": "com.linkedin.policy",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "PolicyMatchCriterion",
    "namespace": "com.linkedin.policy",
    "fields": [
    {
    "type": "string",
    "name": "field",
    "doc": "The name of the field that the criterion refers to"
    },
    {
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "values",
    "doc": "Values. Matches criterion if any one of the values matches condition (OR-relationship)"
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "EQUALS": "Whether the field matches the value"
    },
    "name": "PolicyMatchCondition",
    "namespace": "com.linkedin.policy",
    "symbols": [
    "EQUALS"
    ],
    "doc": "The matching condition in a filter criterion"
    },
    "name": "condition",
    "default": "EQUALS",
    "doc": "The condition for the criterion"
    }
    ],
    "doc": "A criterion for matching a field with given value"
    }
    },
    "name": "criteria",
    "doc": "A list of criteria to apply conjunctively (so all criteria must pass)"
    }
    ],
    "doc": "The filter for specifying the resource or actor to apply privileges to"
    }
    ],
    "name": "filter",
    "default": null,
    "doc": "Filter to apply privileges to"
    }
    ],
    "doc": "Information used to filter DataHub resource."
    }
    ],
    "name": "resources",
    "default": null,
    "doc": "The resource that the policy applies to. Not required for some 'Platform' privileges."
    },
    {
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "privileges",
    "doc": "The privileges that the policy grants."
    },
    {
    "type": {
    "type": "record",
    "name": "DataHubActorFilter",
    "namespace": "com.linkedin.policy",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "users",
    "default": null,
    "doc": "A specific set of users to apply the policy to (disjunctive)"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "groups",
    "default": null,
    "doc": "A specific set of groups to apply the policy to (disjunctive)"
    },
    {
    "type": "boolean",
    "name": "resourceOwners",
    "default": false,
    "doc": "Whether the filter should return true for owners of a particular resource.\nOnly applies to policies of type 'Metadata', which have a resource associated with them."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "resourceOwnersTypes",
    "default": null,
    "doc": "Define type of ownership for the policy"
    },
    {
    "type": "boolean",
    "name": "allUsers",
    "default": false,
    "doc": "Whether the filter should apply to all users."
    },
    {
    "type": "boolean",
    "name": "allGroups",
    "default": false,
    "doc": "Whether the filter should apply to all groups."
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataHubRole"
    ],
    "name": "IsAssociatedWithRole"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "roles",
    "default": null,
    "doc": "A specific set of roles to apply the policy to (disjunctive)."
    }
    ],
    "doc": "Information used to filter DataHub actors."
    },
    "name": "actors",
    "doc": "The actors that the policy applies to."
    },
    {
    "type": "boolean",
    "name": "editable",
    "default": true,
    "doc": "Whether the policy should be editable via the UI"
    },
    {
    "Searchable": {
    "fieldType": "DATETIME"
    },
    "type": [
    "null",
    "long"
    ],
    "name": "lastUpdatedTimestamp",
    "default": null,
    "doc": "Timestamp when the policy was last updated"
    }
    ],
    "doc": "Information about a DataHub (UI) access policy."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • IsAssociatedWithRole

      • DataHubRole via dataHubPolicyInfo.actors.roles

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubretention/index.html b/docs/generated/metamodel/entities/datahubretention/index.html index ff85a3b6eb119..8abb2ed9546d9 100644 --- a/docs/generated/metamodel/entities/datahubretention/index.html +++ b/docs/generated/metamodel/entities/datahubretention/index.html @@ -8,13 +8,13 @@ - +

    DataHubRetention

    Aspects

    dataHubRetentionKey

    Key for a DataHub Retention

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubRetentionKey"
    },
    "name": "DataHubRetentionKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "type": "string",
    "name": "entityName",
    "doc": "Entity name to apply retention to. * (or empty) for applying defaults."
    },
    {
    "type": "string",
    "name": "aspectName",
    "doc": "Aspect name to apply retention to. * (or empty) for applying defaults."
    }
    ],
    "doc": "Key for a DataHub Retention"
    }

    dataHubRetentionConfig

    None

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubRetentionConfig"
    },
    "name": "DataHubRetentionConfig",
    "namespace": "com.linkedin.retention",
    "fields": [
    {
    "type": {
    "type": "record",
    "name": "Retention",
    "namespace": "com.linkedin.retention",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "VersionBasedRetention",
    "namespace": "com.linkedin.retention",
    "fields": [
    {
    "type": "int",
    "name": "maxVersions"
    }
    ],
    "doc": "Keep max N latest records"
    }
    ],
    "name": "version",
    "default": null
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeBasedRetention",
    "namespace": "com.linkedin.retention",
    "fields": [
    {
    "type": "int",
    "name": "maxAgeInSeconds"
    }
    ],
    "doc": "Keep records that are less than X seconds old"
    }
    ],
    "name": "time",
    "default": null
    }
    ],
    "doc": "Base class that encapsulates different retention policies.\nOnly one of the fields should be set"
    },
    "name": "retention"
    }
    ]
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubrole/index.html b/docs/generated/metamodel/entities/datahubrole/index.html index 7abd7f2c7a07a..759db6c15661a 100644 --- a/docs/generated/metamodel/entities/datahubrole/index.html +++ b/docs/generated/metamodel/entities/datahubrole/index.html @@ -8,13 +8,13 @@ - +

    DataHubRole

    Aspects

    dataHubRoleInfo

    Information about a DataHub Role.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubRoleInfo"
    },
    "name": "DataHubRoleInfo",
    "namespace": "com.linkedin.policy",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "name",
    "doc": "Name of the Role"
    },
    {
    "Searchable": {
    "fieldType": "TEXT"
    },
    "type": "string",
    "name": "description",
    "doc": "Description of the Role"
    },
    {
    "type": "boolean",
    "name": "editable",
    "default": false,
    "doc": "Whether the role should be editable via the UI"
    }
    ],
    "doc": "Information about a DataHub Role."
    }

    Relationships

    Incoming

    These are the relationships stored in other entity's aspects

    • IsAssociatedWithRole

      • DataHubPolicy via dataHubPolicyInfo.actors.roles
    • IsMemberOfRole

      • Corpuser via roleMembership.roles

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubsecret/index.html b/docs/generated/metamodel/entities/datahubsecret/index.html index b1940bba24cc1..6703b64654efc 100644 --- a/docs/generated/metamodel/entities/datahubsecret/index.html +++ b/docs/generated/metamodel/entities/datahubsecret/index.html @@ -8,13 +8,13 @@ - +

    DataHubSecret

    Aspects

    dataHubSecretValue

    The value of a DataHub Secret

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubSecretValue"
    },
    "name": "DataHubSecretValue",
    "namespace": "com.linkedin.secret",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "name",
    "doc": "The display name for the secret"
    },
    {
    "type": "string",
    "name": "value",
    "doc": "The AES-encrypted value of the DataHub secret."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the secret"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "createdTime",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    }
    ],
    "name": "created",
    "default": null,
    "doc": "Created Audit stamp"
    }
    ],
    "doc": "The value of a DataHub Secret"
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubstepstate/index.html b/docs/generated/metamodel/entities/datahubstepstate/index.html index d797e63af435b..c811e6b44b3f3 100644 --- a/docs/generated/metamodel/entities/datahubstepstate/index.html +++ b/docs/generated/metamodel/entities/datahubstepstate/index.html @@ -8,13 +8,13 @@ - +

    DataHubStepState

    Aspects

    dataHubStepStateProperties

    The properties associated with a DataHub step state

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubStepStateProperties"
    },
    "name": "DataHubStepStateProperties",
    "namespace": "com.linkedin.step",
    "fields": [
    {
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "properties",
    "default": {},
    "doc": "Description of the secret"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "doc": "Audit stamp describing the last person to update it."
    }
    ],
    "doc": "The properties associated with a DataHub step state"
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubupgrade/index.html b/docs/generated/metamodel/entities/datahubupgrade/index.html index 793aaf39fc97a..e3807de975568 100644 --- a/docs/generated/metamodel/entities/datahubupgrade/index.html +++ b/docs/generated/metamodel/entities/datahubupgrade/index.html @@ -8,13 +8,13 @@ - +

    DataHubUpgrade

    Aspects

    dataHubUpgradeRequest

    Information collected when kicking off a DataHubUpgrade

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubUpgradeRequest"
    },
    "name": "DataHubUpgradeRequest",
    "namespace": "com.linkedin.upgrade",
    "fields": [
    {
    "type": "long",
    "name": "timestampMs",
    "doc": "Timestamp when we started this DataHubUpgrade"
    },
    {
    "type": "string",
    "name": "version",
    "doc": "Version of this upgrade"
    }
    ],
    "doc": "Information collected when kicking off a DataHubUpgrade"
    }

    dataHubUpgradeResult

    Information collected when a DataHubUpgrade successfully finishes

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubUpgradeResult"
    },
    "name": "DataHubUpgradeResult",
    "namespace": "com.linkedin.upgrade",
    "fields": [
    {
    "type": "long",
    "name": "timestampMs",
    "doc": "Timestamp when we started this DataHubUpgrade"
    },
    {
    "type": [
    "null",
    {
    "type": "map",
    "values": "string"
    }
    ],
    "name": "result",
    "default": null,
    "doc": "Result map to place helpful information about this upgrade job"
    }
    ],
    "doc": "Information collected when a DataHubUpgrade successfully finishes"
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datahubview/index.html b/docs/generated/metamodel/entities/datahubview/index.html index 280dc0be28e4a..70b512d355c0d 100644 --- a/docs/generated/metamodel/entities/datahubview/index.html +++ b/docs/generated/metamodel/entities/datahubview/index.html @@ -8,13 +8,13 @@ - +

    DataHubView

    Aspects

    dataHubViewInfo

    Information about a DataHub View. -- TODO: Understand whether an entity type filter is required.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataHubViewInfo"
    },
    "name": "DataHubViewInfo",
    "namespace": "com.linkedin.view",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "name",
    "doc": "The name of the View"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the view"
    },
    {
    "Searchable": {},
    "type": {
    "type": "enum",
    "symbolDocs": {
    "GLOBAL": "A global view, which all users can see and use.",
    "PERSONAL": "A view private for a specific person."
    },
    "name": "DataHubViewType",
    "namespace": "com.linkedin.view",
    "symbols": [
    "PERSONAL",
    "GLOBAL"
    ]
    },
    "name": "type",
    "doc": "The type of View"
    },
    {
    "type": {
    "type": "record",
    "name": "DataHubViewDefinition",
    "namespace": "com.linkedin.view",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "entityTypes",
    "doc": "The Entity Types in the scope of the View."
    },
    {
    "type": {
    "type": "record",
    "name": "Filter",
    "namespace": "com.linkedin.metadata.query.filter",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "ConjunctiveCriterion",
    "namespace": "com.linkedin.metadata.query.filter",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Criterion",
    "namespace": "com.linkedin.metadata.query.filter",
    "fields": [
    {
    "type": "string",
    "name": "field",
    "doc": "The name of the field that the criterion refers to"
    },
    {
    "type": "string",
    "name": "value",
    "doc": "The value of the intended field"
    },
    {
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "values",
    "default": [],
    "doc": "Values. one of which the intended field should match\nNote, if values is set, the above \"value\" field will be ignored"
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "CONTAIN": "Represent the relation: String field contains value, e.g. name contains Profile",
    "END_WITH": "Represent the relation: String field ends with value, e.g. name ends with Event",
    "EQUAL": "Represent the relation: field = value, e.g. platform = hdfs",
    "EXISTS": "Represents the relation: field exists and is non-empty, e.g. owners is not null and != [] (empty)",
    "GREATER_THAN": "Represent the relation greater than, e.g. ownerCount > 5",
    "GREATER_THAN_OR_EQUAL_TO": "Represent the relation greater than or equal to, e.g. ownerCount >= 5",
    "IN": "Represent the relation: String field is one of the array values to, e.g. name in [\"Profile\", \"Event\"]",
    "IS_NULL": "Represent the relation: field is null, e.g. platform is null",
    "LESS_THAN": "Represent the relation less than, e.g. ownerCount < 3",
    "LESS_THAN_OR_EQUAL_TO": "Represent the relation less than or equal to, e.g. ownerCount <= 3",
    "START_WITH": "Represent the relation: String field starts with value, e.g. name starts with PageView"
    },
    "name": "Condition",
    "namespace": "com.linkedin.metadata.query.filter",
    "symbols": [
    "CONTAIN",
    "END_WITH",
    "EQUAL",
    "IS_NULL",
    "EXISTS",
    "GREATER_THAN",
    "GREATER_THAN_OR_EQUAL_TO",
    "IN",
    "LESS_THAN",
    "LESS_THAN_OR_EQUAL_TO",
    "START_WITH"
    ],
    "doc": "The matching condition in a filter criterion"
    },
    "name": "condition",
    "default": "EQUAL",
    "doc": "The condition for the criterion, e.g. EQUAL, START_WITH"
    },
    {
    "type": "boolean",
    "name": "negated",
    "default": false,
    "doc": "Whether the condition should be negated"
    }
    ],
    "doc": "A criterion for matching a field with given value"
    }
    },
    "name": "and",
    "doc": "A list of and criteria the filter applies to the query"
    }
    ],
    "doc": "A list of criterion and'd together."
    }
    }
    ],
    "name": "or",
    "default": null,
    "doc": "A list of disjunctive criterion for the filter. (or operation to combine filters)"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "com.linkedin.metadata.query.filter.Criterion"
    }
    ],
    "name": "criteria",
    "default": null,
    "doc": "Deprecated! A list of conjunctive criterion for the filter. If \"or\" field is provided, then this field is ignored."
    }
    ],
    "doc": "The filter for finding a record or a collection of records"
    },
    "name": "filter",
    "doc": "The filter criteria, which represents the view itself"
    }
    ],
    "doc": "A View definition."
    },
    "name": "definition",
    "doc": "The view itself"
    },
    {
    "Searchable": {
    "/actor": {
    "fieldName": "createdBy",
    "fieldType": "URN"
    },
    "/time": {
    "fieldName": "createdAt",
    "fieldType": "DATETIME"
    }
    },
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "doc": "Audit stamp capturing the time and actor who created the View."
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "lastModifiedAt",
    "fieldType": "DATETIME"
    }
    },
    "type": "com.linkedin.common.AuditStamp",
    "name": "lastModified",
    "doc": "Audit stamp capturing the time and actor who last modified the View."
    }
    ],
    "doc": "Information about a DataHub View. -- TODO: Understand whether an entity type filter is required."
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/datajob/index.html b/docs/generated/metamodel/entities/datajob/index.html index 66bbaca542184..b5bcca4ebc24a 100644 --- a/docs/generated/metamodel/entities/datajob/index.html +++ b/docs/generated/metamodel/entities/datajob/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    DataJob

    Aspects

    dataJobKey

    Key for a Data Job

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataJobKey"
    },
    "name": "DataJobKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "dataFlow"
    ],
    "name": "IsPartOf"
    },
    "Searchable": {
    "fieldName": "dataFlow",
    "fieldType": "URN_PARTIAL",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "flow",
    "doc": "Standardized data processing flow urn representing the flow for the job"
    },
    {
    "Searchable": {
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "jobId",
    "doc": "Unique Identifier of the data job"
    }
    ],
    "doc": "Key for a Data Job"
    }

    dataJobInfo

    Information about a Data processing job

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataJobInfo"
    },
    "name": "DataJobInfo",
    "namespace": "com.linkedin.datajob",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Job name"
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Job description"
    },
    {
    "type": [
    {
    "type": "enum",
    "symbolDocs": {
    "COMMAND": "The command job type is one of the basic built-in types. It runs multiple UNIX commands using java processbuilder.\nUpon execution, Azkaban spawns off a process to run the command.",
    "GLUE": "Glue type is for running AWS Glue job transforms.",
    "HADOOP_JAVA": "Runs a java program with ability to access Hadoop cluster.\nhttps://azkaban.readthedocs.io/en/latest/jobTypes.html#java-job-type",
    "HADOOP_SHELL": "In large part, this is the same Command type. The difference is its ability to talk to a Hadoop cluster\nsecurely, via Hadoop tokens.",
    "HIVE": "Hive type is for running Hive jobs.",
    "PIG": "Pig type is for running Pig jobs.",
    "SQL": "SQL is for running Presto, mysql queries etc"
    },
    "name": "AzkabanJobType",
    "namespace": "com.linkedin.datajob.azkaban",
    "symbols": [
    "COMMAND",
    "HADOOP_JAVA",
    "HADOOP_SHELL",
    "HIVE",
    "PIG",
    "SQL",
    "GLUE"
    ],
    "doc": "The various types of support azkaban jobs"
    },
    "string"
    ],
    "name": "type",
    "doc": "Datajob type\n*NOTE**: AzkabanJobType is deprecated. Please use strings instead."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.DataFlowUrn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "flowUrn",
    "default": null,
    "doc": "DataFlow urn that this job is part of"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "createdAt",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the event occur"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "actor",
    "default": null,
    "doc": "Optional: The actor urn involved in the event."
    }
    ],
    "doc": "A standard event timestamp"
    }
    ],
    "name": "created",
    "default": null,
    "doc": "A timestamp documenting when the asset was created in the source Data Platform (not on DataHub)"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "lastModifiedAt",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    "com.linkedin.common.TimeStamp"
    ],
    "name": "lastModified",
    "default": null,
    "doc": "A timestamp documenting when the asset was last modified in the source Data Platform (not on DataHub)"
    },
    {
    "deprecated": "Use Data Process Instance model, instead",
    "type": [
    "null",
    {
    "type": "enum",
    "symbolDocs": {
    "COMPLETED": "Jobs with successful completion.",
    "FAILED": "Jobs that have failed.",
    "IN_PROGRESS": "Jobs currently running.",
    "SKIPPED": "Jobs that have been skipped.",
    "STARTING": "Jobs being initialized.",
    "STOPPED": "Jobs that have stopped.",
    "STOPPING": "Jobs being stopped.",
    "UNKNOWN": "Jobs with unknown status (either unmappable or unavailable)"
    },
    "name": "JobStatus",
    "namespace": "com.linkedin.datajob",
    "symbols": [
    "STARTING",
    "IN_PROGRESS",
    "STOPPING",
    "STOPPED",
    "COMPLETED",
    "FAILED",
    "UNKNOWN",
    "SKIPPED"
    ],
    "doc": "Job statuses"
    }
    ],
    "name": "status",
    "default": null,
    "doc": "Status of the job - Deprecated for Data Process Instance model."
    }
    ],
    "doc": "Information about a Data processing job"
    }

    dataJobInputOutput

    Information about the inputs and outputs of a Data processing job

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataJobInputOutput"
    },
    "name": "DataJobInputOutput",
    "namespace": "com.linkedin.datajob",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "isLineage": true,
    "name": "Consumes"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "inputs",
    "fieldType": "URN",
    "numValuesFieldName": "numInputDatasets",
    "queryByDefault": false
    }
    },
    "deprecated": true,
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "inputDatasets",
    "doc": "Input datasets consumed by the data job during processing\nDeprecated! Use inputDatasetEdges instead."
    },
    {
    "Relationship": {
    "/*/destinationUrn": {
    "createdActor": "inputDatasetEdges/*/created/actor",
    "createdOn": "inputDatasetEdges/*/created/time",
    "entityTypes": [
    "dataset"
    ],
    "isLineage": true,
    "name": "Consumes",
    "properties": "inputDatasetEdges/*/properties",
    "updatedActor": "inputDatasetEdges/*/lastModified/actor",
    "updatedOn": "inputDatasetEdges/*/lastModified/time"
    }
    },
    "Searchable": {
    "/*/destinationUrn": {
    "fieldName": "inputDatasetEdges",
    "fieldType": "URN",
    "numValuesFieldName": "numInputDatasets",
    "queryByDefault": false
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Edge",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "sourceUrn",
    "default": null,
    "doc": "Urn of the source of this relationship edge.\nIf not specified, assumed to be the entity that this aspect belongs to."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "destinationUrn",
    "doc": "Urn of the destination of this relationship edge."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    }
    ],
    "name": "created",
    "default": null,
    "doc": "Audit stamp containing who created this relationship edge and when"
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "lastModified",
    "default": null,
    "doc": "Audit stamp containing who last modified this relationship edge and when"
    },
    {
    "type": [
    "null",
    {
    "type": "map",
    "values": "string"
    }
    ],
    "name": "properties",
    "default": null,
    "doc": "A generic properties bag that allows us to store specific information on this graph edge."
    }
    ],
    "doc": "A common structure to represent all edges to entities when used inside aspects as collections\nThis ensures that all edges have common structure around audit-stamps and will support PATCH, time-travel automatically."
    }
    }
    ],
    "name": "inputDatasetEdges",
    "default": null,
    "doc": "Input datasets consumed by the data job during processing"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "isLineage": true,
    "isUpstream": false,
    "name": "Produces"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "outputs",
    "fieldType": "URN",
    "numValuesFieldName": "numOutputDatasets",
    "queryByDefault": false
    }
    },
    "deprecated": true,
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "outputDatasets",
    "doc": "Output datasets produced by the data job during processing\nDeprecated! Use outputDatasetEdges instead."
    },
    {
    "Relationship": {
    "/*/destinationUrn": {
    "createdActor": "outputDatasetEdges/*/created/actor",
    "createdOn": "outputDatasetEdges/*/created/time",
    "entityTypes": [
    "dataset"
    ],
    "isLineage": true,
    "isUpstream": false,
    "name": "Produces",
    "properties": "outputDatasetEdges/*/properties",
    "updatedActor": "outputDatasetEdges/*/lastModified/actor",
    "updatedOn": "outputDatasetEdges/*/lastModified/time"
    }
    },
    "Searchable": {
    "/*/destinationUrn": {
    "fieldName": "outputDatasetEdges",
    "fieldType": "URN",
    "numValuesFieldName": "numOutputDatasets",
    "queryByDefault": false
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "com.linkedin.common.Edge"
    }
    ],
    "name": "outputDatasetEdges",
    "default": null,
    "doc": "Output datasets produced by the data job during processing"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataJob"
    ],
    "isLineage": true,
    "name": "DownstreamOf"
    }
    },
    "deprecated": true,
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "inputDatajobs",
    "default": null,
    "doc": "Input datajobs that this data job depends on\nDeprecated! Use inputDatajobEdges instead."
    },
    {
    "Relationship": {
    "/*/destinationUrn": {
    "createdActor": "inputDatajobEdges/*/created/actor",
    "createdOn": "inputDatajobEdges/*/created/time",
    "entityTypes": [
    "dataJob"
    ],
    "isLineage": true,
    "name": "DownstreamOf",
    "properties": "inputDatajobEdges/*/properties",
    "updatedActor": "inputDatajobEdges/*/lastModified/actor",
    "updatedOn": "inputDatajobEdges/*/lastModified/time"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "com.linkedin.common.Edge"
    }
    ],
    "name": "inputDatajobEdges",
    "default": null,
    "doc": "Input datajobs that this data job depends on"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "schemaField"
    ],
    "name": "Consumes"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "inputFields",
    "fieldType": "URN",
    "numValuesFieldName": "numInputFields",
    "queryByDefault": false
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "inputDatasetFields",
    "default": null,
    "doc": "Fields of the input datasets used by this job"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "schemaField"
    ],
    "name": "Produces"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "outputFields",
    "fieldType": "URN",
    "numValuesFieldName": "numOutputFields",
    "queryByDefault": false
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "outputDatasetFields",
    "default": null,
    "doc": "Fields of the output datasets this job writes to"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "FineGrainedLineage",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "DATASET": " Indicates that this lineage is originating from upstream dataset(s)",
    "FIELD_SET": " Indicates that this lineage is originating from upstream field(s)",
    "NONE": " Indicates that there is no upstream lineage i.e. the downstream field is not a derived field"
    },
    "name": "FineGrainedLineageUpstreamType",
    "namespace": "com.linkedin.dataset",
    "symbols": [
    "FIELD_SET",
    "DATASET",
    "NONE"
    ],
    "doc": "The type of upstream entity in a fine-grained lineage"
    },
    "name": "upstreamType",
    "doc": "The type of upstream entity"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "upstreams",
    "default": null,
    "doc": "Upstream entities in the lineage"
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "FIELD": " Indicates that the lineage is for a single, specific, downstream field",
    "FIELD_SET": " Indicates that the lineage is for a set of downstream fields"
    },
    "name": "FineGrainedLineageDownstreamType",
    "namespace": "com.linkedin.dataset",
    "symbols": [
    "FIELD",
    "FIELD_SET"
    ],
    "doc": "The type of downstream field(s) in a fine-grained lineage"
    },
    "name": "downstreamType",
    "doc": "The type of downstream field(s)"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "downstreams",
    "default": null,
    "doc": "Downstream fields in the lineage"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "transformOperation",
    "default": null,
    "doc": "The transform operation applied to the upstream entities to produce the downstream field(s)"
    },
    {
    "type": "float",
    "name": "confidenceScore",
    "default": 1.0,
    "doc": "The confidence in this lineage between 0 (low confidence) and 1 (high confidence)"
    }
    ],
    "doc": "A fine-grained lineage from upstream fields/datasets to downstream field(s)"
    }
    }
    ],
    "name": "fineGrainedLineages",
    "default": null,
    "doc": "Fine-grained column-level lineages\nNot currently supported in the UI\nUse UpstreamLineage aspect for datasets to express Column Level Lineage for the UI"
    }
    ],
    "doc": "Information about the inputs and outputs of a Data processing job"
    }

    editableDataJobProperties

    Stores editable changes made to properties. This separates changes made from ingestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableDataJobProperties"
    },
    "name": "EditableDataJobProperties",
    "namespace": "com.linkedin.datajob",
    "fields": [
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the creation of this resource/association/sub-resource. A value of 0 for time indicates missing data."
    },
    {
    "type": "com.linkedin.common.AuditStamp",
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created. A value of 0 for time indicates missing data."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "deleted",
    "default": null,
    "doc": "An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."
    },
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Edited documentation of the data job "
    }
    ],
    "doc": "Stores editable changes made to properties. This separates changes made from\ningestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    versionInfo

    Information about a Data processing job

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "versionInfo"
    },
    "name": "VersionInfo",
    "namespace": "com.linkedin.datajob",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "type": "string",
    "name": "version",
    "doc": "The version which can indentify a job version like a commit hash or md5 hash"
    },
    {
    "type": "string",
    "name": "versionType",
    "doc": "The type of the version like git hash or md5 hash"
    }
    ],
    "doc": "Information about a Data processing job"
    }

    datahubIngestionRunSummary (Timeseries)

    Summary of a datahub ingestion run for a given platform.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "datahubIngestionRunSummary",
    "type": "timeseries"
    },
    "name": "DatahubIngestionRunSummary",
    "namespace": "com.linkedin.datajob.datahub",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "pipelineName",
    "doc": "The name of the pipeline that ran ingestion, a stable unique user provided identifier.\n e.g. my_snowflake1-to-datahub."
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "platformInstanceId",
    "doc": "The id of the instance against which the ingestion pipeline ran.\ne.g.: Bigquery project ids, MySQL hostnames etc."
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "runId",
    "doc": "The runId for this pipeline instance."
    },
    {
    "TimeseriesField": {},
    "type": {
    "type": "enum",
    "symbolDocs": {
    "COMPLETED": "Jobs with successful completion.",
    "FAILED": "Jobs that have failed.",
    "IN_PROGRESS": "Jobs currently running.",
    "SKIPPED": "Jobs that have been skipped.",
    "STARTING": "Jobs being initialized.",
    "STOPPED": "Jobs that have stopped.",
    "STOPPING": "Jobs being stopped.",
    "UNKNOWN": "Jobs with unknown status (either unmappable or unavailable)"
    },
    "name": "JobStatus",
    "namespace": "com.linkedin.datajob",
    "symbols": [
    "STARTING",
    "IN_PROGRESS",
    "STOPPING",
    "STOPPED",
    "COMPLETED",
    "FAILED",
    "UNKNOWN",
    "SKIPPED"
    ],
    "doc": "Job statuses"
    },
    "name": "runStatus",
    "doc": "Run Status - Succeeded/Skipped/Failed etc."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numWorkUnitsCommitted",
    "default": null,
    "doc": "The number of workunits written to sink."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numWorkUnitsCreated",
    "default": null,
    "doc": "The number of workunits that are produced."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numEvents",
    "default": null,
    "doc": "The number of events produced (MCE + MCP)."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numEntities",
    "default": null,
    "doc": "The total number of entities produced (unique entity urns)."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numAspects",
    "default": null,
    "doc": "The total number of aspects produced across all entities."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numSourceAPICalls",
    "default": null,
    "doc": "Total number of source API calls."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "totalLatencySourceAPICalls",
    "default": null,
    "doc": "Total latency across all source API calls."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numSinkAPICalls",
    "default": null,
    "doc": "Total number of sink API calls."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "totalLatencySinkAPICalls",
    "default": null,
    "doc": "Total latency across all sink API calls."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numWarnings",
    "default": null,
    "doc": "Number of warnings generated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numErrors",
    "default": null,
    "doc": "Number of errors generated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "numEntitiesSkipped",
    "default": null,
    "doc": "Number of entities skipped."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "config",
    "default": null,
    "doc": "The non-sensitive key-value pairs of the yaml config used as json string."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "custom_summary",
    "default": null,
    "doc": "Custom value."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "string"
    ],
    "name": "softwareVersion",
    "default": null,
    "doc": "The software version of this ingestion."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "systemHostName",
    "default": null,
    "doc": "The hostname the ingestion pipeline ran on."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "string"
    ],
    "name": "operatingSystemName",
    "default": null,
    "doc": "The os the ingestion pipeline ran on."
    },
    {
    "type": [
    "null",
    "int"
    ],
    "name": "numProcessors",
    "default": null,
    "doc": "The number of processors on the host the ingestion pipeline ran on."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "totalMemory",
    "default": null,
    "doc": "The total amount of memory on the host the ingestion pipeline ran on."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "availableMemory",
    "default": null,
    "doc": "The available memory on the host the ingestion pipeline ran on."
    }
    ],
    "doc": "Summary of a datahub ingestion run for a given platform."
    }

    datahubIngestionCheckpoint (Timeseries)

    Checkpoint of a datahub ingestion run for a given job.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "datahubIngestionCheckpoint",
    "type": "timeseries"
    },
    "name": "DatahubIngestionCheckpoint",
    "namespace": "com.linkedin.datajob.datahub",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "pipelineName",
    "doc": "The name of the pipeline that ran ingestion, a stable unique user provided identifier.\n e.g. my_snowflake1-to-datahub."
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "platformInstanceId",
    "doc": "The id of the instance against which the ingestion pipeline ran.\ne.g.: Bigquery project ids, MySQL hostnames etc."
    },
    {
    "type": "string",
    "name": "config",
    "doc": "Json-encoded string representation of the non-secret members of the config ."
    },
    {
    "type": {
    "type": "record",
    "name": "IngestionCheckpointState",
    "namespace": "com.linkedin.datajob.datahub",
    "fields": [
    {
    "type": "string",
    "name": "formatVersion",
    "doc": "The version of the state format."
    },
    {
    "type": "string",
    "name": "serde",
    "doc": "The serialization/deserialization protocol."
    },
    {
    "type": [
    "null",
    "bytes"
    ],
    "name": "payload",
    "default": null,
    "doc": "Opaque blob of the state representation."
    }
    ],
    "doc": "The checkpoint state object of a datahub ingestion run for a given job."
    },
    "name": "state",
    "doc": "Opaque blob of the state representation."
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "runId",
    "doc": "The run identifier of this job."
    }
    ],
    "doc": "Checkpoint of a datahub ingestion run for a given job."
    }

    Relationships

    Self

    These are the relationships to itself, stored in this entity's aspects

    • DownstreamOf (via dataJobInputOutput.inputDatajobs)
    • DownstreamOf (via dataJobInputOutput.inputDatajobEdges)

    Outgoing

    These are the relationships stored in this entity's aspects

    • IsPartOf

      • DataFlow via dataJobKey.flow
    • Consumes

      • Dataset via dataJobInputOutput.inputDatasets
      • Dataset via dataJobInputOutput.inputDatasetEdges
      • SchemaField via dataJobInputOutput.inputDatasetFields
    • Produces

      • Dataset via dataJobInputOutput.outputDatasets
      • Dataset via dataJobInputOutput.outputDatasetEdges
      • SchemaField via dataJobInputOutput.outputDatasetFields
    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/dataplatform/index.html b/docs/generated/metamodel/entities/dataplatform/index.html index 350ba765189aa..62e3fe406215a 100644 --- a/docs/generated/metamodel/entities/dataplatform/index.html +++ b/docs/generated/metamodel/entities/dataplatform/index.html @@ -8,13 +8,13 @@ - +

    Data Platform

    Data Platforms are systems or tools that contain Datasets, Dashboards, Charts, and all other kinds of data assets modeled in the metadata graph.

    Examples of data platforms are redshift, hive, bigquery, looker, tableau etc.

    Identity

    Data Platforms are identified by the name of the technology. A complete list of currently supported data platforms is available here.

    Aspects

    dataPlatformKey

    Key for a Data Platform

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformKey"
    },
    "name": "DataPlatformKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "type": "string",
    "name": "platformName",
    "doc": "Data platform name i.e. hdfs, oracle, espresso"
    }
    ],
    "doc": "Key for a Data Platform"
    }

    dataPlatformInfo

    Information about a data platform

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInfo"
    },
    "name": "DataPlatformInfo",
    "namespace": "com.linkedin.dataplatform",
    "fields": [
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": false,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "validate": {
    "strlen": {
    "max": 15
    }
    },
    "type": "string",
    "name": "name",
    "doc": "Name of the data platform"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "displayName",
    "default": null,
    "doc": "The name that will be used for displaying a platform type."
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "FILE_SYSTEM": "Value for a file system, e.g. hdfs",
    "KEY_VALUE_STORE": "Value for a key value store, e.g. espresso, voldemort",
    "MESSAGE_BROKER": "Value for a message broker, e.g. kafka",
    "OBJECT_STORE": "Value for an object store, e.g. ambry",
    "OLAP_DATASTORE": "Value for an OLAP datastore, e.g. pinot",
    "OTHERS": "Value for other platforms, e.g salesforce, dovetail",
    "QUERY_ENGINE": "Value for a query engine, e.g. presto",
    "RELATIONAL_DB": "Value for a relational database, e.g. oracle, mysql",
    "SEARCH_ENGINE": "Value for a search engine, e.g seas"
    },
    "name": "PlatformType",
    "namespace": "com.linkedin.dataplatform",
    "symbols": [
    "FILE_SYSTEM",
    "KEY_VALUE_STORE",
    "MESSAGE_BROKER",
    "OBJECT_STORE",
    "OLAP_DATASTORE",
    "OTHERS",
    "QUERY_ENGINE",
    "RELATIONAL_DB",
    "SEARCH_ENGINE"
    ],
    "doc": "Platform types available at LinkedIn"
    },
    "name": "type",
    "doc": "Platform type this data platform describes"
    },
    {
    "type": "string",
    "name": "datasetNameDelimiter",
    "doc": "The delimiter in the dataset names on the data platform, e.g. '/' for HDFS and '.' for Oracle"
    },
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "logoUrl",
    "default": null,
    "doc": "The URL for a logo associated with the platform"
    }
    ],
    "doc": "Information about a data platform"
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/dataplatforminstance/index.html b/docs/generated/metamodel/entities/dataplatforminstance/index.html index def7fa535fffb..2779eb5577af7 100644 --- a/docs/generated/metamodel/entities/dataplatforminstance/index.html +++ b/docs/generated/metamodel/entities/dataplatforminstance/index.html @@ -8,14 +8,14 @@ - +

    DataPlatformInstance

    Aspects

    dataPlatformInstanceProperties

    Properties associated with a Data Platform Instance

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstanceProperties"
    },
    "name": "DataPlatformInstanceProperties",
    "namespace": "com.linkedin.dataplatforminstance",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "name",
    "default": null,
    "doc": "Display name of the Data Platform Instance"
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the Data Platform Instance"
    }
    ],
    "doc": "Properties associated with a Data Platform Instance"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/dataprocess/index.html b/docs/generated/metamodel/entities/dataprocess/index.html index 4906820a2dd03..6c77dbd433fcc 100644 --- a/docs/generated/metamodel/entities/dataprocess/index.html +++ b/docs/generated/metamodel/entities/dataprocess/index.html @@ -8,14 +8,14 @@ - +

    DataProcess

    Aspects

    dataProcessKey

    Key for a Data Process

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataProcessKey"
    },
    "name": "DataProcessKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "boostScore": 4.0,
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Process name i.e. an ETL job name"
    },
    {
    "Searchable": {
    "enableAutocomplete": true,
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "orchestrator",
    "doc": "Standardized Orchestrator where data process is defined.\nTODO: Migrate towards something that can be validated like DataPlatform urn"
    },
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL",
    "queryByDefault": false
    },
    "type": {
    "type": "enum",
    "symbolDocs": {
    "CORP": "Designates corporation fabrics",
    "DEV": "Designates development fabrics",
    "EI": "Designates early-integration fabrics",
    "NON_PROD": "Designates non-production fabrics",
    "PRE": "Designates pre-production fabrics",
    "PROD": "Designates production fabrics",
    "QA": "Designates quality assurance fabrics",
    "STG": "Designates staging fabrics",
    "TEST": "Designates testing fabrics",
    "UAT": "Designates user acceptance testing fabrics"
    },
    "name": "FabricType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "DEV",
    "TEST",
    "QA",
    "UAT",
    "EI",
    "PRE",
    "STG",
    "NON_PROD",
    "PROD",
    "CORP"
    ],
    "doc": "Fabric group type"
    },
    "name": "origin",
    "doc": "Fabric type where dataset belongs to or where it was generated."
    }
    ],
    "doc": "Key for a Data Process"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    dataProcessInfo

    The inputs and outputs of this data process

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataProcessInfo"
    },
    "name": "DataProcessInfo",
    "namespace": "com.linkedin.dataprocess",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "isLineage": true,
    "name": "Consumes"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "inputs",
    "fieldType": "URN",
    "numValuesFieldName": "numInputDatasets",
    "queryByDefault": false
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "inputs",
    "default": null,
    "doc": "the inputs of the data process"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "isLineage": true,
    "name": "Consumes"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "outputs",
    "fieldType": "URN",
    "numValuesFieldName": "numOutputDatasets",
    "queryByDefault": false
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "outputs",
    "default": null,
    "doc": "the outputs of the data process"
    }
    ],
    "doc": "The inputs and outputs of this data process"
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • Consumes

      • Dataset via dataProcessInfo.inputs
      • Dataset via dataProcessInfo.outputs

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/dataprocessinstance/index.html b/docs/generated/metamodel/entities/dataprocessinstance/index.html index 5cc71bd857bfc..17bc50910ecdd 100644 --- a/docs/generated/metamodel/entities/dataprocessinstance/index.html +++ b/docs/generated/metamodel/entities/dataprocessinstance/index.html @@ -8,14 +8,14 @@ - +

    DataProcessInstance

    DataProcessInstance represents an instance of a datajob/jobflow run

    Aspects

    dataProcessInstanceInput

    Information about the inputs datasets of a Data process

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataProcessInstanceInput"
    },
    "name": "DataProcessInstanceInput",
    "namespace": "com.linkedin.dataprocess",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "name": "Consumes"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "inputs",
    "fieldType": "URN",
    "numValuesFieldName": "numInputs",
    "queryByDefault": false
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "inputs",
    "doc": "Input datasets to be consumed"
    }
    ],
    "doc": "Information about the inputs datasets of a Data process"
    }

    dataProcessInstanceOutput

    Information about the outputs of a Data process

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataProcessInstanceOutput"
    },
    "name": "DataProcessInstanceOutput",
    "namespace": "com.linkedin.dataprocess",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "name": "Produces"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "outputs",
    "fieldType": "URN",
    "numValuesFieldName": "numOutputs",
    "queryByDefault": false
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "outputs",
    "doc": "Output datasets to be produced"
    }
    ],
    "doc": "Information about the outputs of a Data process"
    }

    dataProcessInstanceProperties

    The inputs and outputs of this data process

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataProcessInstanceProperties"
    },
    "name": "DataProcessInstanceProperties",
    "namespace": "com.linkedin.dataprocess",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Process name"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "processType",
    "fieldType": "KEYWORD",
    "filterNameOverride": "Process Type"
    },
    "type": [
    "null",
    {
    "type": "enum",
    "name": "DataProcessType",
    "namespace": "com.linkedin.dataprocess",
    "symbols": [
    "BATCH_SCHEDULED",
    "BATCH_AD_HOC",
    "STREAMING"
    ]
    }
    ],
    "name": "type",
    "default": null,
    "doc": "Process type"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "created",
    "fieldType": "COUNT",
    "queryByDefault": false
    }
    },
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "doc": "Audit stamp containing who reported the lineage and when"
    }
    ],
    "doc": "The inputs and outputs of this data process"
    }

    dataProcessInstanceRelationships

    Information about Data process relationships

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataProcessInstanceRelationships"
    },
    "name": "DataProcessInstanceRelationships",
    "namespace": "com.linkedin.dataprocess",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "dataJob",
    "dataFlow"
    ],
    "name": "InstanceOf"
    },
    "Searchable": {
    "/*": {
    "fieldName": "parentTemplate",
    "fieldType": "URN",
    "queryByDefault": false
    }
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "parentTemplate",
    "default": null,
    "doc": "The parent entity whose run instance it is"
    },
    {
    "Relationship": {
    "entityTypes": [
    "dataProcessInstance"
    ],
    "name": "ChildOf"
    },
    "Searchable": {
    "/*": {
    "fieldName": "parentInstance",
    "fieldType": "URN",
    "queryByDefault": false
    }
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "parentInstance",
    "default": null,
    "doc": "The parent DataProcessInstance where it belongs to.\nIf it is a Airflow Task then it should belong to an Airflow Dag run as well\nwhich will be another DataProcessInstance"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataProcessInstance"
    ],
    "name": "UpstreamOf"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "upstream",
    "fieldType": "URN",
    "numValuesFieldName": "numUpstreams",
    "queryByDefault": false
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "upstreamInstances",
    "doc": "Input DataProcessInstance which triggered this dataprocess instance"
    }
    ],
    "doc": "Information about Data process relationships"
    }

    dataProcessInstanceRunEvent (Timeseries)

    An event representing the current status of data process run. DataProcessRunEvent should be used for reporting the status of a dataProcess' run.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataProcessInstanceRunEvent",
    "type": "timeseries"
    },
    "name": "DataProcessInstanceRunEvent",
    "namespace": "com.linkedin.dataprocess",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "TimeseriesField": {},
    "type": {
    "type": "enum",
    "symbolDocs": {
    "STARTED": "The status where the Data processing run is in."
    },
    "name": "DataProcessRunStatus",
    "namespace": "com.linkedin.dataprocess",
    "symbols": [
    "STARTED",
    "COMPLETE"
    ]
    },
    "name": "status"
    },
    {
    "type": [
    "null",
    "int"
    ],
    "name": "attempt",
    "default": null,
    "doc": "Return the try number that this Instance Run is in"
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    {
    "type": "record",
    "name": "DataProcessInstanceRunResult",
    "namespace": "com.linkedin.dataprocess",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "FAILURE": " The Run Failed",
    "SKIPPED": " The Run Skipped",
    "SUCCESS": " The Run Succeeded",
    "UP_FOR_RETRY": " The Run Failed and will Retry"
    },
    "name": "RunResultType",
    "namespace": "com.linkedin.dataprocess",
    "symbols": [
    "SUCCESS",
    "FAILURE",
    "SKIPPED",
    "UP_FOR_RETRY"
    ]
    },
    "name": "type",
    "doc": " The final result, e.g. SUCCESS, FAILURE, SKIPPED, or UP_FOR_RETRY."
    },
    {
    "type": "string",
    "name": "nativeResultType",
    "doc": "It identifies the system where the native result comes from like Airflow, Azkaban, etc.."
    }
    ]
    }
    ],
    "name": "result",
    "default": null,
    "doc": "The final result of the Data Processing run."
    }
    ],
    "doc": "An event representing the current status of data process run.\nDataProcessRunEvent should be used for reporting the status of a dataProcess' run."
    }

    Relationships

    Self

    These are the relationships to itself, stored in this entity's aspects

    • ChildOf (via dataProcessInstanceRelationships.parentInstance)
    • UpstreamOf (via dataProcessInstanceRelationships.upstreamInstances)

    Outgoing

    These are the relationships stored in this entity's aspects

    • Consumes

      • Dataset via dataProcessInstanceInput.inputs
    • Produces

      • Dataset via dataProcessInstanceOutput.outputs
    • InstanceOf

      • DataJob via dataProcessInstanceRelationships.parentTemplate
      • DataFlow via dataProcessInstanceRelationships.parentTemplate

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/dataproduct/index.html b/docs/generated/metamodel/entities/dataproduct/index.html index c98d8606a2a9e..2aa1ea3c95fdd 100644 --- a/docs/generated/metamodel/entities/dataproduct/index.html +++ b/docs/generated/metamodel/entities/dataproduct/index.html @@ -8,14 +8,14 @@ - +

    DataProduct

    Aspects

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    dataProductProperties

    The main properties of a Data Product

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataProductProperties"
    },
    "name": "DataProductProperties",
    "namespace": "com.linkedin.dataproduct",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "name",
    "default": null,
    "doc": "Display name of the Data Product"
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the dataset"
    },
    {
    "Relationship": {
    "/*/destinationUrn": {
    "entityTypes": [
    "dataset",
    "dataJob",
    "dataFlow",
    "chart",
    "dashboard",
    "notebook",
    "container",
    "mlModel",
    "mlModelGroup",
    "mlFeatureTable",
    "mlFeature",
    "mlPrimaryKey"
    ],
    "name": "DataProductContains"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "DataProductAssociation",
    "namespace": "com.linkedin.dataproduct",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "sourceUrn",
    "default": null,
    "doc": "Urn of the source of this relationship edge.\nIf not specified, assumed to be the entity that this aspect belongs to."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "destinationUrn",
    "doc": "Urn of the destination of this relationship edge."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    }
    ],
    "name": "created",
    "default": null,
    "doc": "Audit stamp containing who created this relationship edge and when"
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "lastModified",
    "default": null,
    "doc": "Audit stamp containing who last modified this relationship edge and when"
    },
    {
    "type": [
    "null",
    {
    "type": "map",
    "values": "string"
    }
    ],
    "name": "properties",
    "default": null,
    "doc": "A generic properties bag that allows us to store specific information on this graph edge."
    }
    ],
    "doc": "Represents an association of assets to a Data Product."
    }
    }
    ],
    "name": "assets",
    "default": null,
    "doc": "A list of assets that are part of this Data Product"
    }
    ],
    "doc": "The main properties of a Data Product"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • TaggedWith

      • Tag via globalTags.tags
    • AssociatedWith

      • Domain via domains.domains
    • DataProductContains

      • Dataset via dataProductProperties.assets
      • DataJob via dataProductProperties.assets
      • DataFlow via dataProductProperties.assets
      • Chart via dataProductProperties.assets
      • Dashboard via dataProductProperties.assets
      • Notebook via dataProductProperties.assets
      • Container via dataProductProperties.assets
      • MlModel via dataProductProperties.assets
      • MlModelGroup via dataProductProperties.assets
      • MlFeatureTable via dataProductProperties.assets
      • MlFeature via dataProductProperties.assets
      • MlPrimaryKey via dataProductProperties.assets

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/dataset/index.html b/docs/generated/metamodel/entities/dataset/index.html index daf77976c33f7..21d791f016da2 100644 --- a/docs/generated/metamodel/entities/dataset/index.html +++ b/docs/generated/metamodel/entities/dataset/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ e.g. Gets activated when subTypes is view

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "viewProperties"
    },
    "name": "ViewProperties",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "materialized",
    "doc": "Whether the view is materialized"
    },
    {
    "type": "string",
    "name": "viewLogic",
    "doc": "The view logic"
    },
    {
    "type": "string",
    "name": "viewLanguage",
    "doc": "The view logic language / dialect"
    }
    ],
    "doc": "Details about a View. \ne.g. Gets activated when subTypes is view"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    subTypes

    Sub Types. Use this aspect to specialize a generic Entity e.g. Making a Dataset also be a View or also be a LookerExplore

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "subTypes"
    },
    "name": "SubTypes",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldType": "KEYWORD",
    "filterNameOverride": "Sub Type",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "typeNames",
    "doc": "The names of the specific types."
    }
    ],
    "doc": "Sub Types. Use this aspect to specialize a generic Entity\ne.g. Making a Dataset also be a View or also be a LookerExplore"
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    container

    Link from an asset to its parent container

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "container"
    },
    "name": "Container",
    "namespace": "com.linkedin.container",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "container"
    ],
    "name": "IsPartOf"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "container",
    "fieldType": "URN",
    "filterNameOverride": "Container",
    "hasValuesFieldName": "hasContainer"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "container",
    "doc": "The parent container of an asset"
    }
    ],
    "doc": "Link from an asset to its parent container"
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    testResults

    Information about a Test Result

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "testResults"
    },
    "name": "TestResults",
    "namespace": "com.linkedin.test",
    "fields": [
    {
    "Relationship": {
    "/*/test": {
    "entityTypes": [
    "test"
    ],
    "name": "IsFailing"
    }
    },
    "Searchable": {
    "/*/test": {
    "fieldName": "failingTests",
    "fieldType": "URN",
    "hasValuesFieldName": "hasFailingTests"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TestResult",
    "namespace": "com.linkedin.test",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "test",
    "doc": "The urn of the test"
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "FAILURE": " The Test Failed",
    "SUCCESS": " The Test Succeeded"
    },
    "name": "TestResultType",
    "namespace": "com.linkedin.test",
    "symbols": [
    "SUCCESS",
    "FAILURE"
    ]
    },
    "name": "type",
    "doc": "The type of the result"
    }
    ],
    "doc": "Information about a Test Result"
    }
    },
    "name": "failing",
    "doc": "Results that are failing"
    },
    {
    "Relationship": {
    "/*/test": {
    "entityTypes": [
    "test"
    ],
    "name": "IsPassing"
    }
    },
    "Searchable": {
    "/*/test": {
    "fieldName": "passingTests",
    "fieldType": "URN",
    "hasValuesFieldName": "hasPassingTests"
    }
    },
    "type": {
    "type": "array",
    "items": "com.linkedin.test.TestResult"
    },
    "name": "passing",
    "doc": "Results that are passing"
    }
    ],
    "doc": "Information about a Test Result"
    }

    siblings

    Siblings information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "siblings"
    },
    "name": "Siblings",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "name": "SiblingOf"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "siblings",
    "fieldType": "URN",
    "queryByDefault": false
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "siblings",
    "doc": "List of sibling entities"
    },
    {
    "type": "boolean",
    "name": "primary",
    "doc": "If this is the leader entity of the set of siblings"
    }
    ],
    "doc": "Siblings information of an entity."
    }

    embed

    Information regarding rendering an embed for an asset.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "embed"
    },
    "name": "Embed",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "renderUrl",
    "default": null,
    "doc": "An embed URL to be rendered inside of an iframe."
    }
    ],
    "doc": "Information regarding rendering an embed for an asset."
    }

    access

    Aspect used for associating roles to a dataset or any asset

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "access"
    },
    "name": "Access",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "RoleAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "role"
    ],
    "name": "AssociatedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "roles",
    "fieldType": "URN",
    "filterNameOverride": "Role",
    "hasValuesFieldName": "hasRoles"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the External Role"
    }
    ],
    "doc": "Properties of an applied Role. For now, just an Urn"
    }
    }
    ],
    "name": "roles",
    "default": null,
    "doc": "List of Roles which needs to be associated"
    }
    ],
    "doc": "Aspect used for associating roles to a dataset or any asset"
    }

    datasetProfile (Timeseries)

    Stats corresponding to datasets

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "datasetProfile",
    "type": "timeseries"
    },
    "name": "DatasetProfile",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "Searchable": {
    "fieldType": "COUNT",
    "hasValuesFieldName": "hasRowCount"
    },
    "type": [
    "null",
    "long"
    ],
    "name": "rowCount",
    "default": null,
    "doc": "The total number of rows"
    },
    {
    "Searchable": {
    "fieldType": "COUNT",
    "hasValuesFieldName": "hasColumnCount"
    },
    "type": [
    "null",
    "long"
    ],
    "name": "columnCount",
    "default": null,
    "doc": "The total number of columns (or schema fields)"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "DatasetFieldProfile",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "type": "string",
    "name": "fieldPath"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "uniqueCount",
    "default": null
    },
    {
    "type": [
    "null",
    "float"
    ],
    "name": "uniqueProportion",
    "default": null
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "nullCount",
    "default": null
    },
    {
    "type": [
    "null",
    "float"
    ],
    "name": "nullProportion",
    "default": null
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "min",
    "default": null
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "max",
    "default": null
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "mean",
    "default": null
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "median",
    "default": null
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "stdev",
    "default": null
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Quantile",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "type": "string",
    "name": "quantile"
    },
    {
    "type": "string",
    "name": "value"
    }
    ]
    }
    }
    ],
    "name": "quantiles",
    "default": null
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "ValueFrequency",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "type": "string",
    "name": "value"
    },
    {
    "type": "long",
    "name": "frequency"
    }
    ]
    }
    }
    ],
    "name": "distinctValueFrequencies",
    "default": null
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "Histogram",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "boundaries"
    },
    {
    "type": {
    "type": "array",
    "items": "float"
    },
    "name": "heights"
    }
    ]
    }
    ],
    "name": "histogram",
    "default": null
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "sampleValues",
    "default": null
    }
    ],
    "doc": "Stats corresponding to fields in a dataset"
    }
    }
    ],
    "name": "fieldProfiles",
    "default": null,
    "doc": "Profiles for each column (or schema field)"
    },
    {
    "Searchable": {
    "fieldType": "COUNT",
    "hasValuesFieldName": "hasSizeInBytes"
    },
    "type": [
    "null",
    "long"
    ],
    "name": "sizeInBytes",
    "default": null,
    "doc": "Storage size in bytes"
    }
    ],
    "doc": "Stats corresponding to datasets"
    }

    datasetUsageStatistics (Timeseries)

    Stats corresponding to dataset's usage.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "datasetUsageStatistics",
    "type": "timeseries"
    },
    "name": "DatasetUsageStatistics",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "uniqueUserCount",
    "default": null,
    "doc": "Unique user count"
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "int"
    ],
    "name": "totalSqlQueries",
    "default": null,
    "doc": "Total SQL query count"
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "topSqlQueries",
    "default": null,
    "doc": "Frequent SQL queries; mostly makes sense for datasets in SQL databases"
    },
    {
    "TimeseriesFieldCollection": {
    "key": "user"
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "DatasetUserUsageCounts",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "user",
    "doc": "The unique id of the user."
    },
    {
    "TimeseriesField": {},
    "type": "int",
    "name": "count",
    "doc": "Number of times the dataset has been used by the user."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "string"
    ],
    "name": "userEmail",
    "default": null,
    "doc": "If user_email is set, we attempt to resolve the user's urn upon ingest"
    }
    ],
    "doc": "Records a single user's usage counts for a given resource"
    }
    }
    ],
    "name": "userCounts",
    "default": null,
    "doc": "Users within this bucket, with frequency counts"
    },
    {
    "TimeseriesFieldCollection": {
    "key": "fieldPath"
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "DatasetFieldUsageCounts",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "type": "string",
    "name": "fieldPath",
    "doc": "The name of the field."
    },
    {
    "TimeseriesField": {},
    "type": "int",
    "name": "count",
    "doc": "Number of times the field has been used."
    }
    ],
    "doc": "Records field-level usage counts for a given dataset"
    }
    }
    ],
    "name": "fieldCounts",
    "default": null,
    "doc": "Field-level usage stats"
    }
    ],
    "doc": "Stats corresponding to dataset's usage."
    }

    operation (Timeseries)

    Operational info for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "operation",
    "type": "timeseries"
    },
    "name": "Operation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "timestampMillis",
    "doc": "The event timestamp field as epoch at UTC in milli seconds."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindowSize",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "CalendarInterval",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "SECOND",
    "MINUTE",
    "HOUR",
    "DAY",
    "WEEK",
    "MONTH",
    "QUARTER",
    "YEAR"
    ]
    },
    "name": "unit",
    "doc": "Interval unit such as minute/hour/day etc."
    },
    {
    "type": "int",
    "name": "multiple",
    "default": 1,
    "doc": "How many units. Defaults to 1."
    }
    ],
    "doc": "Defines the size of a time window."
    }
    ],
    "name": "eventGranularity",
    "default": null,
    "doc": "Granularity of the event if applicable"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "PartitionSpec",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "PartitionType",
    "namespace": "com.linkedin.timeseries",
    "symbols": [
    "FULL_TABLE",
    "QUERY",
    "PARTITION"
    ]
    },
    "name": "type",
    "default": "PARTITION"
    },
    {
    "TimeseriesField": {},
    "type": "string",
    "name": "partition",
    "doc": "String representation of the partition"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "TimeWindow",
    "namespace": "com.linkedin.timeseries",
    "fields": [
    {
    "type": "long",
    "name": "startTimeMillis",
    "doc": "Start time as epoch at UTC."
    },
    {
    "type": "com.linkedin.timeseries.TimeWindowSize",
    "name": "length",
    "doc": "The length of the window."
    }
    ]
    }
    ],
    "name": "timePartition",
    "default": null,
    "doc": "Time window of the partition if applicable"
    }
    ],
    "doc": "Defines how the data is partitioned"
    },
    "null"
    ],
    "name": "partitionSpec",
    "default": {
    "partition": "FULL_TABLE_SNAPSHOT",
    "type": "FULL_TABLE",
    "timePartition": null
    },
    "doc": "The optional partition specification."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "messageId",
    "default": null,
    "doc": "The optional messageId, if provided serves as a custom user-defined unique identifier for an aspect value."
    },
    {
    "TimeseriesField": {},
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "actor",
    "default": null,
    "doc": "Actor who issued this operation."
    },
    {
    "TimeseriesField": {},
    "type": {
    "type": "enum",
    "symbolDocs": {
    "ALTER": "Asset was altered",
    "CREATE": "Asset was created",
    "CUSTOM": "Custom asset operation",
    "DELETE": "Rows were deleted",
    "DROP": "Asset was dropped",
    "INSERT": "Rows were inserted",
    "UPDATE": "Rows were updated"
    },
    "name": "OperationType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "INSERT",
    "UPDATE",
    "DELETE",
    "CREATE",
    "ALTER",
    "DROP",
    "CUSTOM",
    "UNKNOWN"
    ],
    "doc": "Enum to define the operation type when an entity changes."
    },
    "name": "operationType",
    "doc": "Operation type of change."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "string"
    ],
    "name": "customOperationType",
    "default": null,
    "doc": "A custom type of operation. Required if operationType is CUSTOM."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    "long"
    ],
    "name": "numAffectedRows",
    "default": null,
    "doc": "How many rows were affected by this operation."
    },
    {
    "TimeseriesFieldCollection": {
    "key": "datasetName"
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "affectedDatasets",
    "default": null,
    "doc": "Which other datasets were affected by this operation."
    },
    {
    "TimeseriesField": {},
    "type": [
    "null",
    {
    "type": "enum",
    "symbolDocs": {
    "DATA_PLATFORM": "Rows were updated",
    "DATA_PROCESS": "Provided by a Data Process"
    },
    "name": "OperationSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "DATA_PROCESS",
    "DATA_PLATFORM"
    ],
    "doc": "The source of an operation"
    }
    ],
    "name": "sourceType",
    "default": null,
    "doc": "Source Type"
    },
    {
    "type": [
    "null",
    {
    "type": "map",
    "values": "string"
    }
    ],
    "name": "customProperties",
    "default": null,
    "doc": "Custom properties"
    },
    {
    "Searchable": {
    "fieldName": "lastOperationTime",
    "fieldType": "DATETIME"
    },
    "TimeseriesField": {},
    "type": "long",
    "name": "lastUpdatedTimestamp",
    "doc": "The time at which the operation occurred. Would be better named 'operationTime'"
    }
    ],
    "doc": "Operational info for an entity."
    }

    datasetDeprecation (Deprecated)

    Dataset deprecation status Deprecated! This aspect is deprecated in favor of the more-general-purpose 'Deprecation' aspect.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "datasetDeprecation"
    },
    "Deprecated": true,
    "name": "DatasetDeprecation",
    "namespace": "com.linkedin.dataset",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the dataset is deprecated by owner."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this dataset."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the dataset deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "actor",
    "default": null,
    "doc": "The corpuser URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Dataset deprecation status\nDeprecated! This aspect is deprecated in favor of the more-general-purpose 'Deprecation' aspect."
    }

    Relationships

    Self

    These are the relationships to itself, stored in this entity's aspects

    • DownstreamOf (via upstreamLineage.upstreams.dataset)
    • DownstreamOf (via upstreamLineage.fineGrainedLineages)
    • ForeignKeyToDataset (via schemaMetadata.foreignKeys.foreignDataset)
    • SiblingOf (via siblings.siblings)

    Outgoing

    These are the relationships stored in this entity's aspects

    • DownstreamOf

      • SchemaField via upstreamLineage.fineGrainedLineages
    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • SchemaFieldTaggedWith

      • Tag via schemaMetadata.fields.globalTags
    • TaggedWith

      • Tag via schemaMetadata.fields.globalTags.tags
      • Tag via editableSchemaMetadata.editableSchemaFieldInfo.globalTags.tags
      • Tag via globalTags.tags
    • SchemaFieldWithGlossaryTerm

      • GlossaryTerm via schemaMetadata.fields.glossaryTerms
    • TermedWith

      • GlossaryTerm via schemaMetadata.fields.glossaryTerms.terms.urn
    • ForeignKeyTo

      • SchemaField via schemaMetadata.foreignKeys.foreignFields
    • EditableSchemaFieldTaggedWith

      • Tag via editableSchemaMetadata.editableSchemaFieldInfo.globalTags
    • EditableSchemaFieldWithGlossaryTerm

      • GlossaryTerm via editableSchemaMetadata.editableSchemaFieldInfo.glossaryTerms
    • AssociatedWith

      • Domain via domains.domains
      • Role via access.roles.urn
    • IsPartOf

      • Container via container.container
    • IsFailing

      • Test via testResults.failing
    • IsPassing

      • Test via testResults.passing

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/domain/index.html b/docs/generated/metamodel/entities/domain/index.html index 7eab77ad6ebb8..7dfa6cde20545 100644 --- a/docs/generated/metamodel/entities/domain/index.html +++ b/docs/generated/metamodel/entities/domain/index.html @@ -8,13 +8,13 @@ - +

    Domain

    A data domain within an organization.

    Aspects

    domainProperties

    Information about a Domain

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domainProperties"
    },
    "name": "DomainProperties",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Display name of the Domain"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the Domain"
    },
    {
    "Searchable": {
    "/time": {
    "fieldName": "createdTime",
    "fieldType": "DATETIME"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    }
    ],
    "name": "created",
    "default": null,
    "doc": "Created Audit stamp"
    }
    ],
    "doc": "Information about a Domain"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn

    Incoming

    These are the relationships stored in other entity's aspects

    • AssociatedWith

      • Dataset via domains.domains
      • DataJob via domains.domains
      • DataFlow via domains.domains
      • Chart via domains.domains
      • Dashboard via domains.domains
      • Notebook via domains.domains

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/globalsettings/index.html b/docs/generated/metamodel/entities/globalsettings/index.html index 75fa268d091fe..758d4be5531e8 100644 --- a/docs/generated/metamodel/entities/globalsettings/index.html +++ b/docs/generated/metamodel/entities/globalsettings/index.html @@ -8,13 +8,13 @@ - +

    GlobalSettings

    Global settings for an the platform

    Aspects

    globalSettingsInfo

    DataHub Global platform settings. Careful - these should not be modified by the outside world!

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalSettingsInfo"
    },
    "name": "GlobalSettingsInfo",
    "namespace": "com.linkedin.settings.global",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "GlobalViewsSettings",
    "namespace": "com.linkedin.settings.global",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "defaultView",
    "default": null,
    "doc": "The default View for the instance, or organization."
    }
    ],
    "doc": "Settings for DataHub Views feature."
    }
    ],
    "name": "views",
    "default": null,
    "doc": "Settings related to the Views Feature"
    }
    ],
    "doc": "DataHub Global platform settings. Careful - these should not be modified by the outside world!"
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/glossarynode/index.html b/docs/generated/metamodel/entities/glossarynode/index.html index 5bbde235aeed0..31295e3d158fb 100644 --- a/docs/generated/metamodel/entities/glossarynode/index.html +++ b/docs/generated/metamodel/entities/glossarynode/index.html @@ -8,14 +8,14 @@ - +

    GlossaryNode

    Aspects

    glossaryNodeKey

    Key for a GlossaryNode

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryNodeKey"
    },
    "name": "GlossaryNodeKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name"
    }
    ],
    "doc": "Key for a GlossaryNode"
    }

    glossaryNodeInfo

    Properties associated with a GlossaryNode

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryNodeInfo"
    },
    "name": "GlossaryNodeInfo",
    "namespace": "com.linkedin.glossary",
    "fields": [
    {
    "Searchable": {},
    "type": "string",
    "name": "definition",
    "doc": "Definition of business node"
    },
    {
    "Relationship": {
    "entityTypes": [
    "glossaryNode"
    ],
    "name": "IsPartOf"
    },
    "Searchable": {
    "fieldName": "parentNode",
    "fieldType": "URN",
    "hasValuesFieldName": "hasParentNode"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryNodeUrn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "parentNode",
    "default": null,
    "doc": "Parent node of the glossary term"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldName": "displayName",
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "name",
    "default": null,
    "doc": "Display name of the node"
    },
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "id",
    "default": null,
    "doc": "Optional id for the GlossaryNode"
    }
    ],
    "doc": "Properties associated with a GlossaryNode"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    Relationships

    Self

    These are the relationships to itself, stored in this entity's aspects

    • IsPartOf (via glossaryNodeInfo.parentNode)

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn

    Incoming

    These are the relationships stored in other entity's aspects

    • IsPartOf

      • GlossaryTerm via glossaryTermInfo.parentNode

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/glossaryterm/index.html b/docs/generated/metamodel/entities/glossaryterm/index.html index f6e9b909e7c95..edf2b30b77628 100644 --- a/docs/generated/metamodel/entities/glossaryterm/index.html +++ b/docs/generated/metamodel/entities/glossaryterm/index.html @@ -8,14 +8,14 @@ - +

    GlossaryTerm

    Aspects

    glossaryTermKey

    Key for a GlossaryTerm

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTermKey"
    },
    "name": "GlossaryTermKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "enableAutocomplete": true,
    "fieldName": "id",
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "The term name, which serves as a unique id"
    }
    ],
    "doc": "Key for a GlossaryTerm"
    }

    glossaryTermInfo

    Properties associated with a GlossaryTerm

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTermInfo"
    },
    "name": "GlossaryTermInfo",
    "namespace": "com.linkedin.glossary",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "id",
    "default": null,
    "doc": "Optional id for the term"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "name",
    "default": null,
    "doc": "Display name of the term"
    },
    {
    "Searchable": {},
    "type": "string",
    "name": "definition",
    "doc": "Definition of business term."
    },
    {
    "Relationship": {
    "entityTypes": [
    "glossaryNode"
    ],
    "name": "IsPartOf"
    },
    "Searchable": {
    "fieldName": "parentNode",
    "fieldType": "URN",
    "hasValuesFieldName": "hasParentNode"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryNodeUrn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "parentNode",
    "default": null,
    "doc": "Parent node of the glossary term"
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "type": "string",
    "name": "termSource",
    "doc": "Source of the Business Term (INTERNAL or EXTERNAL) with default value as INTERNAL"
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "sourceRef",
    "default": null,
    "doc": "External Reference to the business-term"
    },
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "sourceUrl",
    "default": null,
    "doc": "The abstracted URL such as https://spec.edmcouncil.org/fibo/ontology/FBC/FinancialInstruments/FinancialInstruments/CashInstrument."
    },
    {
    "deprecated": true,
    "type": [
    "null",
    "string"
    ],
    "name": "rawSchema",
    "default": null,
    "doc": "Schema definition of the glossary term"
    }
    ],
    "doc": "Properties associated with a GlossaryTerm"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    glossaryRelatedTerms

    Has A / Is A lineage information about a glossary Term reporting the lineage

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryRelatedTerms"
    },
    "name": "GlossaryRelatedTerms",
    "namespace": "com.linkedin.glossary",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "IsA"
    }
    },
    "Searchable": {
    "/*": {
    "boostScore": 2.0,
    "fieldName": "isRelatedTerms",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "isRelatedTerms",
    "default": null,
    "doc": "The relationship Is A with glossary term"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "HasA"
    }
    },
    "Searchable": {
    "/*": {
    "boostScore": 2.0,
    "fieldName": "hasRelatedTerms",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "hasRelatedTerms",
    "default": null,
    "doc": "The relationship Has A with glossary term"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "HasValue"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "values",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "values",
    "default": null,
    "doc": "The relationship Has Value with glossary term.\nThese are fixed value a term has. For example a ColorEnum where RED, GREEN and YELLOW are fixed values."
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "IsRelatedTo"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "relatedTerms",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "relatedTerms",
    "default": null,
    "doc": "The relationship isRelatedTo with glossary term"
    }
    ],
    "doc": "Has A / Is A lineage information about a glossary Term reporting the lineage"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    schemaMetadata

    SchemaMetadata to describe metadata related to store schema

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "schemaMetadata"
    },
    "name": "SchemaMetadata",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "validate": {
    "strlen": {
    "max": 500,
    "min": 1
    }
    },
    "type": "string",
    "name": "schemaName",
    "doc": "Schema name e.g. PageViewEvent, identity.Profile, ams.account_management_tracking"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.DataPlatformUrn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Standardized platform urn where schema is defined. The data platform Urn (urn:li:platform:{platform_name})"
    },
    {
    "type": "long",
    "name": "version",
    "doc": "Every change to SchemaMetadata in the resource results in a new version. Version is server assigned. This version is differ from platform native schema version."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the creation of this resource/association/sub-resource. A value of 0 for time indicates missing data."
    },
    {
    "type": "com.linkedin.common.AuditStamp",
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created. A value of 0 for time indicates missing data."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "deleted",
    "default": null,
    "doc": "An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.DatasetUrn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "dataset",
    "default": null,
    "doc": "Dataset this schema metadata is associated with."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "cluster",
    "default": null,
    "doc": "The cluster this schema metadata resides from"
    },
    {
    "type": "string",
    "name": "hash",
    "doc": "the SHA1 hash of the schema content"
    },
    {
    "type": [
    {
    "type": "record",
    "name": "EspressoSchema",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "documentSchema",
    "doc": "The native espresso document schema."
    },
    {
    "type": "string",
    "name": "tableSchema",
    "doc": "The espresso table schema definition."
    }
    ],
    "doc": "Schema text of an espresso table schema."
    },
    {
    "type": "record",
    "name": "OracleDDL",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "tableSchema",
    "doc": "The native schema in the dataset's platform. This is a human readable (json blob) table schema."
    }
    ],
    "doc": "Schema holder for oracle data definition language that describes an oracle table."
    },
    {
    "type": "record",
    "name": "MySqlDDL",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "tableSchema",
    "doc": "The native schema in the dataset's platform. This is a human readable (json blob) table schema."
    }
    ],
    "doc": "Schema holder for MySql data definition language that describes an MySql table."
    },
    {
    "type": "record",
    "name": "PrestoDDL",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "rawSchema",
    "doc": "The raw schema in the dataset's platform. This includes the DDL and the columns extracted from DDL."
    }
    ],
    "doc": "Schema holder for presto data definition language that describes a presto view."
    },
    {
    "type": "record",
    "name": "KafkaSchema",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "documentSchema",
    "doc": "The native kafka document schema. This is a human readable avro document schema."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "documentSchemaType",
    "default": null,
    "doc": "The native kafka document schema type. This can be AVRO/PROTOBUF/JSON."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "keySchema",
    "default": null,
    "doc": "The native kafka key schema as retrieved from Schema Registry"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "keySchemaType",
    "default": null,
    "doc": "The native kafka key schema type. This can be AVRO/PROTOBUF/JSON."
    }
    ],
    "doc": "Schema holder for kafka schema."
    },
    {
    "type": "record",
    "name": "BinaryJsonSchema",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "schema",
    "doc": "The native schema text for binary JSON file format."
    }
    ],
    "doc": "Schema text of binary JSON schema."
    },
    {
    "type": "record",
    "name": "OrcSchema",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "schema",
    "doc": "The native schema for ORC file format."
    }
    ],
    "doc": "Schema text of an ORC schema."
    },
    {
    "type": "record",
    "name": "Schemaless",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "The dataset has no specific schema associated with it"
    },
    {
    "type": "record",
    "name": "KeyValueSchema",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "keySchema",
    "doc": "The raw schema for the key in the key-value store."
    },
    {
    "type": "string",
    "name": "valueSchema",
    "doc": "The raw schema for the value in the key-value store."
    }
    ],
    "doc": "Schema text of a key-value store schema."
    },
    {
    "type": "record",
    "name": "OtherSchema",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "rawSchema",
    "doc": "The native schema in the dataset's platform."
    }
    ],
    "doc": "Schema holder for undefined schema types."
    }
    ],
    "name": "platformSchema",
    "doc": "The native schema in the dataset's platform."
    },
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "SchemaField",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "Searchable": {
    "boostScore": 5.0,
    "fieldName": "fieldPaths",
    "fieldType": "TEXT",
    "queryByDefault": "true"
    },
    "type": "string",
    "name": "fieldPath",
    "doc": "Flattened name of the field. Field is computed from jsonPath field."
    },
    {
    "Deprecated": true,
    "type": [
    "null",
    "string"
    ],
    "name": "jsonPath",
    "default": null,
    "doc": "Flattened name of a field in JSON Path notation."
    },
    {
    "type": "boolean",
    "name": "nullable",
    "default": false,
    "doc": "Indicates if this field is optional or nullable"
    },
    {
    "Searchable": {
    "boostScore": 0.1,
    "fieldName": "fieldDescriptions",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description"
    },
    {
    "Searchable": {
    "boostScore": 0.2,
    "fieldName": "fieldLabels",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "label",
    "default": null,
    "doc": "Label of the field. Provides a more human-readable name for the field than field path. Some sources will\nprovide this metadata but not all sources have the concept of a label. If just one string is associated with\na field in a source, that is most likely a description."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "created",
    "default": null,
    "doc": "An AuditStamp corresponding to the creation of this schema field."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "lastModified",
    "default": null,
    "doc": "An AuditStamp corresponding to the last modification of this schema field."
    },
    {
    "type": {
    "type": "record",
    "name": "SchemaFieldDataType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    {
    "type": "record",
    "name": "BooleanType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Boolean field type."
    },
    {
    "type": "record",
    "name": "FixedType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Fixed field type."
    },
    {
    "type": "record",
    "name": "StringType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "String field type."
    },
    {
    "type": "record",
    "name": "BytesType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Bytes field type."
    },
    {
    "type": "record",
    "name": "NumberType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Number data type: long, integer, short, etc.."
    },
    {
    "type": "record",
    "name": "DateType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Date field type."
    },
    {
    "type": "record",
    "name": "TimeType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Time field type. This should also be used for datetimes."
    },
    {
    "type": "record",
    "name": "EnumType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Enum field type."
    },
    {
    "type": "record",
    "name": "NullType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Null field type."
    },
    {
    "type": "record",
    "name": "MapType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "keyType",
    "default": null,
    "doc": "Key type in a map"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "valueType",
    "default": null,
    "doc": "Type of the value in a map"
    }
    ],
    "doc": "Map field type."
    },
    {
    "type": "record",
    "name": "ArrayType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "nestedType",
    "default": null,
    "doc": "List of types this array holds."
    }
    ],
    "doc": "Array field type."
    },
    {
    "type": "record",
    "name": "UnionType",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "nestedTypes",
    "default": null,
    "doc": "List of types in union type."
    }
    ],
    "doc": "Union field type."
    },
    {
    "type": "record",
    "name": "RecordType",
    "namespace": "com.linkedin.schema",
    "fields": [],
    "doc": "Record field type."
    }
    ],
    "name": "type",
    "doc": "Data platform specific types"
    }
    ],
    "doc": "Schema field data types"
    },
    "name": "type",
    "doc": "Platform independent field type of the field."
    },
    {
    "type": "string",
    "name": "nativeDataType",
    "doc": "The native type of the field in the dataset's platform as declared by platform schema."
    },
    {
    "type": "boolean",
    "name": "recursive",
    "default": false,
    "doc": "There are use cases when a field in type B references type A. A field in A references field of type B. In such cases, we will mark the first field as recursive."
    },
    {
    "Relationship": {
    "/tags/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "SchemaFieldTaggedWith"
    }
    },
    "Searchable": {
    "/tags/*/tag": {
    "boostScore": 0.5,
    "fieldName": "fieldTags",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }
    ],
    "name": "globalTags",
    "default": null,
    "doc": "Tags associated with the field"
    },
    {
    "Relationship": {
    "/terms/*/urn": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "SchemaFieldWithGlossaryTerm"
    }
    },
    "Searchable": {
    "/terms/*/urn": {
    "boostScore": 0.5,
    "fieldName": "fieldGlossaryTerms",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": "com.linkedin.common.AuditStamp",
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }
    ],
    "name": "glossaryTerms",
    "default": null,
    "doc": "Glossary terms associated with the field"
    },
    {
    "type": "boolean",
    "name": "isPartOfKey",
    "default": false,
    "doc": "For schema fields that are part of complex keys, set this field to true\nWe do this to easily distinguish between value and key fields"
    },
    {
    "type": [
    "null",
    "boolean"
    ],
    "name": "isPartitioningKey",
    "default": null,
    "doc": "For Datasets which are partitioned, this determines the partitioning key."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "jsonProps",
    "default": null,
    "doc": "For schema fields that have other properties that are not modeled explicitly,\nuse this field to serialize those properties into a JSON string"
    }
    ],
    "doc": "SchemaField to describe metadata related to dataset schema."
    }
    },
    "name": "fields",
    "doc": "Client provided a list of fields from document schema."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "primaryKeys",
    "default": null,
    "doc": "Client provided list of fields that define primary keys to access record. Field order defines hierarchical espresso keys. Empty lists indicates absence of primary key access patter. Value is a SchemaField@fieldPath."
    },
    {
    "deprecated": "Use foreignKeys instead.",
    "type": [
    "null",
    {
    "type": "map",
    "values": {
    "type": "record",
    "name": "ForeignKeySpec",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": [
    {
    "type": "record",
    "name": "DatasetFieldForeignKey",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.DatasetUrn"
    },
    "type": "string",
    "name": "parentDataset",
    "doc": "dataset that stores the resource."
    },
    {
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "currentFieldPaths",
    "doc": "List of fields in hosting(current) SchemaMetadata that conform a foreign key. List can contain a single entry or multiple entries if several entries in hosting schema conform a foreign key in a single parent dataset."
    },
    {
    "type": "string",
    "name": "parentField",
    "doc": "SchemaField@fieldPath that uniquely identify field in parent dataset that this field references."
    }
    ],
    "doc": "For non-urn based foregin keys."
    },
    {
    "type": "record",
    "name": "UrnForeignKey",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "currentFieldPath",
    "doc": "Field in hosting(current) SchemaMetadata."
    }
    ],
    "doc": "If SchemaMetadata fields make any external references and references are of type com.linkedin.common.Urn or any children, this models can be used to mark it."
    }
    ],
    "name": "foreignKey",
    "doc": "Foreign key definition in metadata schema."
    }
    ],
    "doc": "Description of a foreign key in a schema."
    }
    }
    ],
    "name": "foreignKeysSpecs",
    "default": null,
    "doc": "Map captures all the references schema makes to external datasets. Map key is ForeignKeySpecName typeref."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "ForeignKeyConstraint",
    "namespace": "com.linkedin.schema",
    "fields": [
    {
    "type": "string",
    "name": "name",
    "doc": "Name of the constraint, likely provided from the source"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "schemaField"
    ],
    "name": "ForeignKeyTo"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "foreignFields",
    "doc": "Fields the constraint maps to on the foreign dataset"
    },
    {
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "sourceFields",
    "doc": "Fields the constraint maps to on the source dataset"
    },
    {
    "Relationship": {
    "entityTypes": [
    "dataset"
    ],
    "name": "ForeignKeyToDataset"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "foreignDataset",
    "doc": "Reference to the foreign dataset for ease of lookup"
    }
    ],
    "doc": "Description of a foreign key constraint in a schema."
    }
    }
    ],
    "name": "foreignKeys",
    "default": null,
    "doc": "List of foreign key constraints for the schema"
    }
    ],
    "doc": "SchemaMetadata to describe metadata related to store schema"
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    Relationships

    Self

    These are the relationships to itself, stored in this entity's aspects

    • IsA (via glossaryRelatedTerms.isRelatedTerms)
    • HasA (via glossaryRelatedTerms.hasRelatedTerms)
    • HasValue (via glossaryRelatedTerms.values)
    • IsRelatedTo (via glossaryRelatedTerms.relatedTerms)
    • SchemaFieldWithGlossaryTerm (via schemaMetadata.fields.glossaryTerms)
    • TermedWith (via schemaMetadata.fields.glossaryTerms.terms.urn)

    Outgoing

    These are the relationships stored in this entity's aspects

    • IsPartOf

      • GlossaryNode via glossaryTermInfo.parentNode
    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • SchemaFieldTaggedWith

      • Tag via schemaMetadata.fields.globalTags
    • TaggedWith

      • Tag via schemaMetadata.fields.globalTags.tags
    • ForeignKeyTo

      • SchemaField via schemaMetadata.foreignKeys.foreignFields
    • ForeignKeyToDataset

      • Dataset via schemaMetadata.foreignKeys.foreignDataset
    • AssociatedWith

      • Domain via domains.domains

    Incoming

    These are the relationships stored in other entity's aspects

    • SchemaFieldWithGlossaryTerm

      • Dataset via schemaMetadata.fields.glossaryTerms
      • Chart via inputFields.fields.schemaField.glossaryTerms
      • Dashboard via inputFields.fields.schemaField.glossaryTerms
    • TermedWith

      • Dataset via schemaMetadata.fields.glossaryTerms.terms.urn
      • DataJob via glossaryTerms.terms.urn
      • DataFlow via glossaryTerms.terms.urn
      • Chart via glossaryTerms.terms.urn
      • Dashboard via glossaryTerms.terms.urn
      • Notebook via glossaryTerms.terms.urn
      • Container via glossaryTerms.terms.urn
    • EditableSchemaFieldWithGlossaryTerm

      • Dataset via editableSchemaMetadata.editableSchemaFieldInfo.glossaryTerms

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/invitetoken/index.html b/docs/generated/metamodel/entities/invitetoken/index.html index 00bc61af07b9c..403f20400029b 100644 --- a/docs/generated/metamodel/entities/invitetoken/index.html +++ b/docs/generated/metamodel/entities/invitetoken/index.html @@ -8,13 +8,13 @@ - +

    InviteToken

    Aspects

    inviteToken

    Aspect used to store invite tokens.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "inviteToken"
    },
    "name": "InviteToken",
    "namespace": "com.linkedin.identity",
    "fields": [
    {
    "type": "string",
    "name": "token",
    "doc": "The encrypted invite token."
    },
    {
    "Searchable": {
    "fieldName": "role",
    "fieldType": "KEYWORD",
    "hasValuesFieldName": "hasRole"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "role",
    "default": null,
    "doc": "The role that this invite token may be associated with"
    }
    ],
    "doc": "Aspect used to store invite tokens."
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/mlfeature/index.html b/docs/generated/metamodel/entities/mlfeature/index.html index ea1fcf12b56ba..43f52d617001b 100644 --- a/docs/generated/metamodel/entities/mlfeature/index.html +++ b/docs/generated/metamodel/entities/mlfeature/index.html @@ -8,14 +8,14 @@ - +

    MlFeature

    Aspects

    mlFeatureKey

    Key for an MLFeature

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlFeatureKey"
    },
    "name": "MLFeatureKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "featureNamespace",
    "doc": "Namespace for the feature"
    },
    {
    "Searchable": {
    "boostScore": 8.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Name of the feature"
    }
    ],
    "doc": "Key for an MLFeature"
    }

    mlFeatureProperties

    Properties associated with a MLFeature

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlFeatureProperties"
    },
    "name": "MLFeatureProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLFeature"
    },
    {
    "type": [
    "null",
    {
    "type": "enum",
    "symbolDocs": {
    "AUDIO": "Audio Data",
    "BINARY": "Binary data is discrete data that can be in only one of two categories - either yes or no, 1 or 0, off or on, etc",
    "BYTE": "Bytes data are binary-encoded values that can represent complex objects.",
    "CONTINUOUS": "Continuous data are made of uncountable values, often the result of a measurement such as height, weight, age etc.",
    "COUNT": "Count data is discrete whole number data - no negative numbers here.\nCount data often has many small values, such as zero and one.",
    "IMAGE": "Image Data",
    "INTERVAL": "Interval data has equal spaces between the numbers and does not represent a temporal pattern.\nExamples include percentages, temperatures, and income.",
    "MAP": "Mapping Data Type ex: dict, map",
    "NOMINAL": "Nominal data is made of discrete values with no numerical relationship between the different categories - mean and median are meaningless.\nAnimal species is one example. For example, pig is not higher than bird and lower than fish.",
    "ORDINAL": "Ordinal data are discrete integers that can be ranked or sorted.\nFor example, the distance between first and second may not be the same as the distance between second and third.",
    "SEQUENCE": "Sequence Data Type ex: list, tuple, range",
    "SET": "Set Data Type ex: set, frozenset",
    "TEXT": "Text Data",
    "TIME": "Time data is a cyclical, repeating continuous form of data.\nThe relevant time features can be any period- daily, weekly, monthly, annual, etc.",
    "UNKNOWN": "Unknown data are data that we don't know the type for.",
    "USELESS": "Useless data is unique, discrete data with no potential relationship with the outcome variable.\nA useless feature has high cardinality. An example would be bank account numbers that were generated randomly.",
    "VIDEO": "Video Data"
    },
    "name": "MLFeatureDataType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "USELESS",
    "NOMINAL",
    "ORDINAL",
    "BINARY",
    "COUNT",
    "TIME",
    "INTERVAL",
    "IMAGE",
    "VIDEO",
    "AUDIO",
    "TEXT",
    "MAP",
    "SEQUENCE",
    "SET",
    "CONTINUOUS",
    "BYTE",
    "UNKNOWN"
    ],
    "doc": "MLFeature Data Type"
    }
    ],
    "name": "dataType",
    "default": null,
    "doc": "Data Type of the MLFeature"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "VersionTag",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "versionTag",
    "default": null
    }
    ],
    "doc": "A resource-defined string representing the resource state for the purpose of concurrency control"
    }
    ],
    "name": "version",
    "default": null,
    "doc": "Version of the MLFeature"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "isLineage": true,
    "name": "DerivedFrom"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "sources",
    "default": null,
    "doc": "Source of the MLFeature"
    }
    ],
    "doc": "Properties associated with a MLFeature"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    editableMlFeatureProperties

    Properties associated with a MLFeature editable from the UI

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableMlFeatureProperties"
    },
    "name": "EditableMLFeatureProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLFeature"
    }
    ],
    "doc": "Properties associated with a MLFeature editable from the UI"
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • DerivedFrom

      • Dataset via mlFeatureProperties.sources
    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Incoming

    These are the relationships stored in other entity's aspects

    • Consumes

      • MlModel via mlModelProperties.mlFeatures
    • Contains

      • MlFeatureTable via mlFeatureTableProperties.mlFeatures

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/mlfeaturetable/index.html b/docs/generated/metamodel/entities/mlfeaturetable/index.html index 6b4a03b421ba6..800f37a931042 100644 --- a/docs/generated/metamodel/entities/mlfeaturetable/index.html +++ b/docs/generated/metamodel/entities/mlfeaturetable/index.html @@ -8,14 +8,14 @@ - +

    MlFeatureTable

    Aspects

    mlFeatureTableKey

    Key for an MLFeatureTable

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlFeatureTableKey"
    },
    "name": "MLFeatureTableKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "dataPlatform"
    ],
    "name": "SourcePlatform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data platform urn associated with the feature table"
    },
    {
    "Searchable": {
    "boostScore": 8.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Name of the feature table"
    }
    ],
    "doc": "Key for an MLFeatureTable"
    }

    mlFeatureTableProperties

    Properties associated with a MLFeatureTable

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlFeatureTableProperties"
    },
    "name": "MLFeatureTableProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLFeatureTable"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "mlFeature"
    ],
    "name": "Contains"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "features",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "mlFeatures",
    "default": null,
    "doc": "List of features contained in the feature table"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "mlPrimaryKey"
    ],
    "name": "KeyedBy"
    }
    },
    "Searchable": {
    "/*": {
    "fieldName": "primaryKeys",
    "fieldType": "URN"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "mlPrimaryKeys",
    "default": null,
    "doc": "List of primary keys in the feature table (if multiple, assumed to act as a composite key)"
    }
    ],
    "doc": "Properties associated with a MLFeatureTable"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    editableMlFeatureTableProperties

    Properties associated with a MLFeatureTable editable from the ui

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableMlFeatureTableProperties"
    },
    "name": "EditableMLFeatureTableProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLFeatureTable"
    }
    ],
    "doc": "Properties associated with a MLFeatureTable editable from the ui"
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • SourcePlatform

      • DataPlatform via mlFeatureTableKey.platform
    • Contains

      • MlFeature via mlFeatureTableProperties.mlFeatures
    • KeyedBy

      • MlPrimaryKey via mlFeatureTableProperties.mlPrimaryKeys
    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/mlmodel/index.html b/docs/generated/metamodel/entities/mlmodel/index.html index 163892d2ed7b3..350e6a5d4d930 100644 --- a/docs/generated/metamodel/entities/mlmodel/index.html +++ b/docs/generated/metamodel/entities/mlmodel/index.html @@ -8,14 +8,14 @@ - +

    MlModel

    Aspects

    mlModelKey

    Key for an ML model

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelKey"
    },
    "name": "MLModelKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Standardized platform urn for the model"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Name of the MLModel"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "TEXT_PARTIAL",
    "filterNameOverride": "Environment",
    "queryByDefault": false
    },
    "type": {
    "type": "enum",
    "symbolDocs": {
    "CORP": "Designates corporation fabrics",
    "DEV": "Designates development fabrics",
    "EI": "Designates early-integration fabrics",
    "NON_PROD": "Designates non-production fabrics",
    "PRE": "Designates pre-production fabrics",
    "PROD": "Designates production fabrics",
    "QA": "Designates quality assurance fabrics",
    "STG": "Designates staging fabrics",
    "TEST": "Designates testing fabrics",
    "UAT": "Designates user acceptance testing fabrics"
    },
    "name": "FabricType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "DEV",
    "TEST",
    "QA",
    "UAT",
    "EI",
    "PRE",
    "STG",
    "NON_PROD",
    "PROD",
    "CORP"
    ],
    "doc": "Fabric group type"
    },
    "name": "origin",
    "doc": "Fabric type where model belongs to or where it was generated"
    }
    ],
    "doc": "Key for an ML model"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    mlModelProperties

    Properties associated with a ML Model

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelProperties"
    },
    "name": "MLModelProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLModel"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "date",
    "default": null,
    "doc": "Date when the MLModel was developed"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "VersionTag",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "versionTag",
    "default": null
    }
    ],
    "doc": "A resource-defined string representing the resource state for the purpose of concurrency control"
    }
    ],
    "name": "version",
    "default": null,
    "doc": "Version of the MLModel"
    },
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "type",
    "default": null,
    "doc": "Type of Algorithm or MLModel such as whether it is a Naive Bayes classifier, Convolutional Neural Network, etc"
    },
    {
    "type": [
    "null",
    {
    "type": "map",
    "values": [
    "string",
    "int",
    "float",
    "double",
    "boolean"
    ]
    }
    ],
    "name": "hyperParameters",
    "default": null,
    "doc": "Hyper Parameters of the MLModel\n\nNOTE: these are deprecated in favor of hyperParams"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "Aspect": {
    "name": "mlHyperParam"
    },
    "name": "MLHyperParam",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": "string",
    "name": "name",
    "doc": "Name of the MLHyperParam"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLHyperParam"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "value",
    "default": null,
    "doc": "The value of the MLHyperParam"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "createdAt",
    "default": null,
    "doc": "Date when the MLHyperParam was developed"
    }
    ],
    "doc": "Properties associated with an ML Hyper Param"
    }
    }
    ],
    "name": "hyperParams",
    "default": null,
    "doc": "Hyperparameters of the MLModel"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "Aspect": {
    "name": "mlMetric"
    },
    "name": "MLMetric",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": "string",
    "name": "name",
    "doc": "Name of the mlMetric"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the mlMetric"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "value",
    "default": null,
    "doc": "The value of the mlMetric"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "createdAt",
    "default": null,
    "doc": "Date when the mlMetric was developed"
    }
    ],
    "doc": "Properties associated with an ML Metric"
    }
    }
    ],
    "name": "trainingMetrics",
    "default": null,
    "doc": "Metrics of the MLModel used in training"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "com.linkedin.ml.metadata.MLMetric"
    }
    ],
    "name": "onlineMetrics",
    "default": null,
    "doc": "Metrics of the MLModel used in production"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "mlFeature"
    ],
    "isLineage": true,
    "name": "Consumes"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "mlFeatures",
    "default": null,
    "doc": "List of features used for MLModel training"
    },
    {
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "tags",
    "default": [],
    "doc": "Tags for the MLModel"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "mlModelDeployment"
    ],
    "name": "DeployedTo"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "deployments",
    "default": null,
    "doc": "Deployments for the MLModel"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataJob"
    ],
    "isLineage": true,
    "name": "TrainedBy"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "trainingJobs",
    "default": null,
    "doc": "List of jobs (if any) used to train the model"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataJob"
    ],
    "isLineage": true,
    "isUpstream": false,
    "name": "UsedBy"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "downstreamJobs",
    "default": null,
    "doc": "List of jobs (if any) that use the model"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "mlModelGroup"
    ],
    "isLineage": true,
    "isUpstream": false,
    "name": "MemberOf"
    }
    },
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "groups",
    "default": null,
    "doc": "Groups the model belongs to"
    }
    ],
    "doc": "Properties associated with a ML Model"
    }

    intendedUse

    Intended Use for the ML Model

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "intendedUse"
    },
    "name": "IntendedUse",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "primaryUses",
    "default": null,
    "doc": "Primary Use cases for the MLModel."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "enum",
    "name": "IntendedUserType",
    "namespace": "com.linkedin.ml.metadata",
    "symbols": [
    "ENTERPRISE",
    "HOBBY",
    "ENTERTAINMENT"
    ]
    }
    }
    ],
    "name": "primaryUsers",
    "default": null,
    "doc": "Primary Intended Users - For example, was the MLModel developed for entertainment purposes, for hobbyists, or enterprise solutions?"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "outOfScopeUses",
    "default": null,
    "doc": "Highlight technology that the MLModel might easily be confused with, or related contexts that users could try to apply the MLModel to."
    }
    ],
    "doc": "Intended Use for the ML Model"
    }

    mlModelFactorPrompts

    Prompts which affect the performance of the MLModel

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelFactorPrompts"
    },
    "name": "MLModelFactorPrompts",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "MLModelFactors",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "groups",
    "default": null,
    "doc": "Groups refers to distinct categories with similar characteristics that are present in the evaluation data instances.\nFor human-centric machine learning MLModels, groups are people who share one or multiple characteristics."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "instrumentation",
    "default": null,
    "doc": "The performance of a MLModel can vary depending on what instruments were used to capture the input to the MLModel.\nFor example, a face detection model may perform differently depending on the camera's hardware and software,\nincluding lens, image stabilization, high dynamic range techniques, and background blurring for portrait mode."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "environment",
    "default": null,
    "doc": "A further factor affecting MLModel performance is the environment in which it is deployed."
    }
    ],
    "doc": "Factors affecting the performance of the MLModel."
    }
    }
    ],
    "name": "relevantFactors",
    "default": null,
    "doc": "What are foreseeable salient factors for which MLModel performance may vary, and how were these determined?"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "com.linkedin.ml.metadata.MLModelFactors"
    }
    ],
    "name": "evaluationFactors",
    "default": null,
    "doc": "Which factors are being reported, and why were these chosen?"
    }
    ],
    "doc": "Prompts which affect the performance of the MLModel"
    }

    mlModelMetrics

    Metrics to be featured for the MLModel.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelMetrics"
    },
    "name": "Metrics",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "performanceMeasures",
    "default": null,
    "doc": "Measures of MLModel performance"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "decisionThreshold",
    "default": null,
    "doc": "Decision Thresholds used (if any)?"
    }
    ],
    "doc": "Metrics to be featured for the MLModel."
    }

    mlModelEvaluationData

    All referenced datasets would ideally point to any set of documents that provide visibility into the source and composition of the dataset.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelEvaluationData"
    },
    "name": "EvaluationData",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BaseData",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.DatasetUrn"
    },
    "type": "string",
    "name": "dataset",
    "doc": "What dataset were used in the MLModel?"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "motivation",
    "default": null,
    "doc": "Why was this dataset chosen?"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "preProcessing",
    "default": null,
    "doc": "How was the data preprocessed (e.g., tokenization of sentences, cropping of images, any filtering such as dropping images without faces)?"
    }
    ],
    "doc": "BaseData record"
    }
    },
    "name": "evaluationData",
    "doc": "Details on the dataset(s) used for the quantitative analyses in the MLModel"
    }
    ],
    "doc": "All referenced datasets would ideally point to any set of documents that provide visibility into the source and composition of the dataset."
    }

    mlModelTrainingData

    Ideally, the MLModel card would contain as much information about the training data as the evaluation data. However, there might be cases where it is not feasible to provide this level of detailed information about the training data. For example, the data may be proprietary, or require a non-disclosure agreement. In these cases, we advocate for basic details about the distributions over groups in the data, as well as any other details that could inform stakeholders on the kinds of biases the model may have encoded.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelTrainingData"
    },
    "name": "TrainingData",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BaseData",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.DatasetUrn"
    },
    "type": "string",
    "name": "dataset",
    "doc": "What dataset were used in the MLModel?"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "motivation",
    "default": null,
    "doc": "Why was this dataset chosen?"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "preProcessing",
    "default": null,
    "doc": "How was the data preprocessed (e.g., tokenization of sentences, cropping of images, any filtering such as dropping images without faces)?"
    }
    ],
    "doc": "BaseData record"
    }
    },
    "name": "trainingData",
    "doc": "Details on the dataset(s) used for training the MLModel"
    }
    ],
    "doc": "Ideally, the MLModel card would contain as much information about the training data as the evaluation data. However, there might be cases where it is not feasible to provide this level of detailed information about the training data. For example, the data may be proprietary, or require a non-disclosure agreement. In these cases, we advocate for basic details about the distributions over groups in the data, as well as any other details that could inform stakeholders on the kinds of biases the model may have encoded."
    }

    mlModelQuantitativeAnalyses

    Quantitative analyses should be disaggregated, that is, broken down by the chosen factors. Quantitative analyses should provide the results of evaluating the MLModel according to the chosen metrics, providing confidence interval values when possible.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelQuantitativeAnalyses"
    },
    "name": "QuantitativeAnalyses",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "unitaryResults",
    "default": null,
    "doc": "Link to a dashboard with results showing how the MLModel performed with respect to each factor"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "intersectionalResults",
    "default": null,
    "doc": "Link to a dashboard with results showing how the MLModel performed with respect to the intersection of evaluated factors?"
    }
    ],
    "doc": "Quantitative analyses should be disaggregated, that is, broken down by the chosen factors. Quantitative analyses should provide the results of evaluating the MLModel according to the chosen metrics, providing confidence interval values when possible."
    }

    mlModelEthicalConsiderations

    This section is intended to demonstrate the ethical considerations that went into MLModel development, surfacing ethical challenges and solutions to stakeholders.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelEthicalConsiderations"
    },
    "name": "EthicalConsiderations",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "data",
    "default": null,
    "doc": "Does the MLModel use any sensitive data (e.g., protected classes)?"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "humanLife",
    "default": null,
    "doc": " Is the MLModel intended to inform decisions about matters central to human life or flourishing - e.g., health or safety? Or could it be used in such a way?"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "mitigations",
    "default": null,
    "doc": "What risk mitigation strategies were used during MLModel development?"
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "risksAndHarms",
    "default": null,
    "doc": "What risks may be present in MLModel usage? Try to identify the potential recipients, likelihood, and magnitude of harms. If these cannot be determined, note that they were considered but remain unknown."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "useCases",
    "default": null,
    "doc": "Are there any known MLModel use cases that are especially fraught? This may connect directly to the intended use section"
    }
    ],
    "doc": "This section is intended to demonstrate the ethical considerations that went into MLModel development, surfacing ethical challenges and solutions to stakeholders."
    }

    mlModelCaveatsAndRecommendations

    This section should list additional concerns that were not covered in the previous sections. For example, did the results suggest any further testing? Were there any relevant groups that were not represented in the evaluation dataset? Are there additional recommendations for model use?

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelCaveatsAndRecommendations"
    },
    "name": "CaveatsAndRecommendations",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "CaveatDetails",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    "boolean"
    ],
    "name": "needsFurtherTesting",
    "default": null,
    "doc": "Did the results suggest any further testing?"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "caveatDescription",
    "default": null,
    "doc": "Caveat Description\nFor ex: Given gender classes are binary (male/not male), which we include as male/female. Further work needed to evaluate across a spectrum of genders."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "groupsNotRepresented",
    "default": null,
    "doc": "Relevant groups that were not represented in the evaluation dataset?"
    }
    ],
    "doc": "This section should list additional concerns that were not covered in the previous sections. For example, did the results suggest any further testing? Were there any relevant groups that were not represented in the evaluation dataset? Are there additional recommendations for model use?"
    }
    ],
    "name": "caveats",
    "default": null,
    "doc": "This section should list additional concerns that were not covered in the previous sections. For example, did the results suggest any further testing? Were there any relevant groups that were not represented in the evaluation dataset?"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "recommendations",
    "default": null,
    "doc": "Recommendations on where this MLModel should be used."
    },
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": "string"
    }
    ],
    "name": "idealDatasetCharacteristics",
    "default": null,
    "doc": "Ideal characteristics of an evaluation dataset for this MLModel"
    }
    ],
    "doc": "This section should list additional concerns that were not covered in the previous sections. For example, did the results suggest any further testing? Were there any relevant groups that were not represented in the evaluation dataset? Are there additional recommendations for model use?"
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    sourceCode

    Source Code

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "sourceCode"
    },
    "name": "SourceCode",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "SourceCodeUrl",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": {
    "type": "enum",
    "name": "SourceCodeUrlType",
    "namespace": "com.linkedin.ml.metadata",
    "symbols": [
    "ML_MODEL_SOURCE_CODE",
    "TRAINING_PIPELINE_SOURCE_CODE",
    "EVALUATION_PIPELINE_SOURCE_CODE"
    ]
    },
    "name": "type",
    "doc": "Source Code Url Types"
    },
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "sourceCodeUrl",
    "doc": "Source Code Url"
    }
    ],
    "doc": "Source Code Url Entity"
    }
    },
    "name": "sourceCode",
    "doc": "Source Code along with types"
    }
    ],
    "doc": "Source Code"
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    cost

    None

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "cost"
    },
    "name": "Cost",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "ORG_COST_TYPE": "Org Cost Type to which the Cost of this entity should be attributed to"
    },
    "name": "CostType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "ORG_COST_TYPE"
    ],
    "doc": "Type of Cost Code"
    },
    "name": "costType"
    },
    {
    "type": {
    "type": "record",
    "name": "CostCost",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "double"
    ],
    "name": "costId",
    "default": null
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "costCode",
    "default": null
    },
    {
    "type": {
    "type": "enum",
    "name": "CostCostDiscriminator",
    "namespace": "com.linkedin.common",
    "symbols": [
    "costId",
    "costCode"
    ]
    },
    "name": "fieldDiscriminator",
    "doc": "Contains the name of the field that has its value set."
    }
    ]
    },
    "name": "cost"
    }
    ]
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    editableMlModelProperties

    Properties associated with a ML Model editable from the UI

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableMlModelProperties"
    },
    "name": "EditableMLModelProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the ml model"
    }
    ],
    "doc": "Properties associated with a ML Model editable from the UI"
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • Consumes

      • MlFeature via mlModelProperties.mlFeatures
    • DeployedTo

      • MlModelDeployment via mlModelProperties.deployments
    • TrainedBy

      • DataJob via mlModelProperties.trainingJobs
    • UsedBy

      • DataJob via mlModelProperties.downstreamJobs
    • MemberOf

      • MlModelGroup via mlModelProperties.groups
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/mlmodeldeployment/index.html b/docs/generated/metamodel/entities/mlmodeldeployment/index.html index e7ff8769487ab..31d968cea3bba 100644 --- a/docs/generated/metamodel/entities/mlmodeldeployment/index.html +++ b/docs/generated/metamodel/entities/mlmodeldeployment/index.html @@ -8,14 +8,14 @@ - +

    MlModelDeployment

    Aspects

    mlModelDeploymentKey

    Key for an ML model deployment

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelDeploymentKey"
    },
    "name": "MLModelDeploymentKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Standardized platform urn for the model Deployment"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Name of the MLModelDeployment"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "TEXT_PARTIAL",
    "filterNameOverride": "Environment",
    "queryByDefault": false
    },
    "type": {
    "type": "enum",
    "symbolDocs": {
    "CORP": "Designates corporation fabrics",
    "DEV": "Designates development fabrics",
    "EI": "Designates early-integration fabrics",
    "NON_PROD": "Designates non-production fabrics",
    "PRE": "Designates pre-production fabrics",
    "PROD": "Designates production fabrics",
    "QA": "Designates quality assurance fabrics",
    "STG": "Designates staging fabrics",
    "TEST": "Designates testing fabrics",
    "UAT": "Designates user acceptance testing fabrics"
    },
    "name": "FabricType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "DEV",
    "TEST",
    "QA",
    "UAT",
    "EI",
    "PRE",
    "STG",
    "NON_PROD",
    "PROD",
    "CORP"
    ],
    "doc": "Fabric group type"
    },
    "name": "origin",
    "doc": "Fabric type where model Deployment belongs to or where it was generated"
    }
    ],
    "doc": "Key for an ML model deployment"
    }

    mlModelDeploymentProperties

    Properties associated with an ML Model Deployment

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelDeploymentProperties"
    },
    "name": "MLModelDeploymentProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "externalUrl",
    "default": null,
    "doc": "URL where the reference exist"
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLModelDeployment"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "createdAt",
    "default": null,
    "doc": "Date when the MLModelDeployment was developed"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "VersionTag",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "versionTag",
    "default": null
    }
    ],
    "doc": "A resource-defined string representing the resource state for the purpose of concurrency control"
    }
    ],
    "name": "version",
    "default": null,
    "doc": "Version of the MLModelDeployment"
    },
    {
    "type": [
    "null",
    {
    "type": "enum",
    "symbolDocs": {
    "CREATING": "Deployments being created.",
    "DELETING": "Deployments being deleted.",
    "FAILED": "Deployments with an error state.",
    "IN_SERVICE": "Deployments that are active.",
    "OUT_OF_SERVICE": "Deployments out of service.",
    "ROLLING_BACK": "Deployments being reverted to a previous version.",
    "UNKNOWN": "Deployments with unknown/unmappable state.",
    "UPDATING": "Deployments being updated."
    },
    "name": "DeploymentStatus",
    "namespace": "com.linkedin.ml.metadata",
    "symbols": [
    "OUT_OF_SERVICE",
    "CREATING",
    "UPDATING",
    "ROLLING_BACK",
    "IN_SERVICE",
    "DELETING",
    "FAILED",
    "UNKNOWN"
    ],
    "doc": "Model endpoint statuses"
    }
    ],
    "name": "status",
    "default": null,
    "doc": "Status of the deployment"
    }
    ],
    "doc": "Properties associated with an ML Model Deployment"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags

    Incoming

    These are the relationships stored in other entity's aspects

    • DeployedTo

      • MlModel via mlModelProperties.deployments

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/mlmodelgroup/index.html b/docs/generated/metamodel/entities/mlmodelgroup/index.html index 14873b0467ea4..e32d4c5de27b3 100644 --- a/docs/generated/metamodel/entities/mlmodelgroup/index.html +++ b/docs/generated/metamodel/entities/mlmodelgroup/index.html @@ -8,14 +8,14 @@ - +

    MlModelGroup

    Aspects

    mlModelGroupKey

    Key for an ML model group

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelGroupKey"
    },
    "name": "MLModelGroupKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Standardized platform urn for the model group"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Name of the MLModelGroup"
    },
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL",
    "queryByDefault": false
    },
    "type": {
    "type": "enum",
    "symbolDocs": {
    "CORP": "Designates corporation fabrics",
    "DEV": "Designates development fabrics",
    "EI": "Designates early-integration fabrics",
    "NON_PROD": "Designates non-production fabrics",
    "PRE": "Designates pre-production fabrics",
    "PROD": "Designates production fabrics",
    "QA": "Designates quality assurance fabrics",
    "STG": "Designates staging fabrics",
    "TEST": "Designates testing fabrics",
    "UAT": "Designates user acceptance testing fabrics"
    },
    "name": "FabricType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "DEV",
    "TEST",
    "QA",
    "UAT",
    "EI",
    "PRE",
    "STG",
    "NON_PROD",
    "PROD",
    "CORP"
    ],
    "doc": "Fabric group type"
    },
    "name": "origin",
    "doc": "Fabric type where model group belongs to or where it was generated"
    }
    ],
    "doc": "Key for an ML model group"
    }

    mlModelGroupProperties

    Properties associated with an ML Model Group

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlModelGroupProperties"
    },
    "name": "MLModelGroupProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "queryByDefault": true
    }
    },
    "type": {
    "type": "map",
    "values": "string"
    },
    "name": "customProperties",
    "default": {},
    "doc": "Custom property bag."
    },
    {
    "Searchable": {
    "fieldType": "TEXT",
    "hasValuesFieldName": "hasDescription"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLModelGroup"
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "createdAt",
    "default": null,
    "doc": "Date when the MLModelGroup was developed"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "VersionTag",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "versionTag",
    "default": null
    }
    ],
    "doc": "A resource-defined string representing the resource state for the purpose of concurrency control"
    }
    ],
    "name": "version",
    "default": null,
    "doc": "Version of the MLModelGroup"
    }
    ],
    "doc": "Properties associated with an ML Model Group"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    editableMlModelGroupProperties

    Properties associated with an ML Model Group editable from the UI

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableMlModelGroupProperties"
    },
    "name": "EditableMLModelGroupProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the ml model group"
    }
    ],
    "doc": "Properties associated with an ML Model Group editable from the UI"
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Incoming

    These are the relationships stored in other entity's aspects

    • MemberOf

      • MlModel via mlModelProperties.groups

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/mlprimarykey/index.html b/docs/generated/metamodel/entities/mlprimarykey/index.html index 8fb68f1346c63..a0b5c52a5706d 100644 --- a/docs/generated/metamodel/entities/mlprimarykey/index.html +++ b/docs/generated/metamodel/entities/mlprimarykey/index.html @@ -8,14 +8,14 @@ - +

    MlPrimaryKey

    Aspects

    mlPrimaryKeyKey

    Key for an MLPrimaryKey

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlPrimaryKeyKey"
    },
    "name": "MLPrimaryKeyKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "featureNamespace",
    "doc": "Namespace for the primary key"
    },
    {
    "Searchable": {
    "boostScore": 8.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Name of the primary key"
    }
    ],
    "doc": "Key for an MLPrimaryKey"
    }

    mlPrimaryKeyProperties

    Properties associated with a MLPrimaryKey

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "mlPrimaryKeyProperties"
    },
    "name": "MLPrimaryKeyProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLPrimaryKey"
    },
    {
    "type": [
    "null",
    {
    "type": "enum",
    "symbolDocs": {
    "AUDIO": "Audio Data",
    "BINARY": "Binary data is discrete data that can be in only one of two categories - either yes or no, 1 or 0, off or on, etc",
    "BYTE": "Bytes data are binary-encoded values that can represent complex objects.",
    "CONTINUOUS": "Continuous data are made of uncountable values, often the result of a measurement such as height, weight, age etc.",
    "COUNT": "Count data is discrete whole number data - no negative numbers here.\nCount data often has many small values, such as zero and one.",
    "IMAGE": "Image Data",
    "INTERVAL": "Interval data has equal spaces between the numbers and does not represent a temporal pattern.\nExamples include percentages, temperatures, and income.",
    "MAP": "Mapping Data Type ex: dict, map",
    "NOMINAL": "Nominal data is made of discrete values with no numerical relationship between the different categories - mean and median are meaningless.\nAnimal species is one example. For example, pig is not higher than bird and lower than fish.",
    "ORDINAL": "Ordinal data are discrete integers that can be ranked or sorted.\nFor example, the distance between first and second may not be the same as the distance between second and third.",
    "SEQUENCE": "Sequence Data Type ex: list, tuple, range",
    "SET": "Set Data Type ex: set, frozenset",
    "TEXT": "Text Data",
    "TIME": "Time data is a cyclical, repeating continuous form of data.\nThe relevant time features can be any period- daily, weekly, monthly, annual, etc.",
    "UNKNOWN": "Unknown data are data that we don't know the type for.",
    "USELESS": "Useless data is unique, discrete data with no potential relationship with the outcome variable.\nA useless feature has high cardinality. An example would be bank account numbers that were generated randomly.",
    "VIDEO": "Video Data"
    },
    "name": "MLFeatureDataType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "USELESS",
    "NOMINAL",
    "ORDINAL",
    "BINARY",
    "COUNT",
    "TIME",
    "INTERVAL",
    "IMAGE",
    "VIDEO",
    "AUDIO",
    "TEXT",
    "MAP",
    "SEQUENCE",
    "SET",
    "CONTINUOUS",
    "BYTE",
    "UNKNOWN"
    ],
    "doc": "MLFeature Data Type"
    }
    ],
    "name": "dataType",
    "default": null,
    "doc": "Data Type of the MLPrimaryKey"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "VersionTag",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "versionTag",
    "default": null
    }
    ],
    "doc": "A resource-defined string representing the resource state for the purpose of concurrency control"
    }
    ],
    "name": "version",
    "default": null,
    "doc": "Version of the MLPrimaryKey"
    },
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "dataset"
    ],
    "isLineage": true,
    "name": "DerivedFrom"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "sources",
    "doc": "Source of the MLPrimaryKey"
    }
    ],
    "doc": "Properties associated with a MLPrimaryKey"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    editableMlPrimaryKeyProperties

    Properties associated with a MLPrimaryKey editable from the UI

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableMlPrimaryKeyProperties"
    },
    "name": "EditableMLPrimaryKeyProperties",
    "namespace": "com.linkedin.ml.metadata",
    "fields": [
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the MLPrimaryKey"
    }
    ],
    "doc": "Properties associated with a MLPrimaryKey editable from the UI"
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • DerivedFrom

      • Dataset via mlPrimaryKeyProperties.sources
    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Incoming

    These are the relationships stored in other entity's aspects

    • KeyedBy

      • MlFeatureTable via mlFeatureTableProperties.mlPrimaryKeys

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/notebook/index.html b/docs/generated/metamodel/entities/notebook/index.html index d4011168ef078..ddf6acda414df 100644 --- a/docs/generated/metamodel/entities/notebook/index.html +++ b/docs/generated/metamodel/entities/notebook/index.html @@ -8,7 +8,7 @@ - + @@ -22,7 +22,7 @@ Note: This is IN BETA version

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "editableNotebookProperties"
    },
    "name": "EditableNotebookProperties",
    "namespace": "com.linkedin.notebook",
    "fields": [
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the creation of this resource/association/sub-resource. A value of 0 for time indicates missing data."
    },
    {
    "type": "com.linkedin.common.AuditStamp",
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "An AuditStamp corresponding to the last modification of this resource/association/sub-resource. If no modification has happened since creation, lastModified should be the same as created. A value of 0 for time indicates missing data."
    },
    {
    "type": [
    "null",
    "com.linkedin.common.AuditStamp"
    ],
    "name": "deleted",
    "default": null,
    "doc": "An AuditStamp corresponding to the deletion of this resource/association/sub-resource. Logically, deleted MUST have a later timestamp than creation. It may or may not have the same time as lastModified depending upon the resource/association/sub-resource semantics."
    },
    {
    "Searchable": {
    "fieldName": "editedDescription",
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Edited documentation of the Notebook"
    }
    ],
    "doc": "Stores editable changes made to properties. This separates changes made from\ningestion pipelines and edits in the UI to avoid accidental overwrites of user-provided data by ingestion pipelines\nNote: This is IN BETA version"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    globalTags

    Tag aspect used for applying tags to an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "globalTags"
    },
    "name": "GlobalTags",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "/*/tag": {
    "entityTypes": [
    "tag"
    ],
    "name": "TaggedWith"
    }
    },
    "Searchable": {
    "/*/tag": {
    "addToFilters": true,
    "boostScore": 0.5,
    "fieldName": "tags",
    "fieldType": "URN",
    "filterNameOverride": "Tag",
    "hasValuesFieldName": "hasTags",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "TagAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.urn.TagUrn"
    },
    "type": "string",
    "name": "tag",
    "doc": "Urn of the applied tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied tag. For now, just an Urn. In the future we can extend this with other properties, e.g.\npropagation parameters."
    }
    },
    "name": "tags",
    "doc": "Tags associated with a given entity"
    }
    ],
    "doc": "Tag aspect used for applying tags to an entity"
    }

    glossaryTerms

    Related business terms information

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "glossaryTerms"
    },
    "name": "GlossaryTerms",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "GlossaryTermAssociation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "glossaryTerm"
    ],
    "name": "TermedWith"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "glossaryTerms",
    "fieldType": "URN",
    "filterNameOverride": "Glossary Term",
    "hasValuesFieldName": "hasGlossaryTerms"
    },
    "java": {
    "class": "com.linkedin.common.urn.GlossaryTermUrn"
    },
    "type": "string",
    "name": "urn",
    "doc": "Urn of the applied glossary term"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "context",
    "default": null,
    "doc": "Additional context about the association"
    }
    ],
    "doc": "Properties of an applied glossary term."
    }
    },
    "name": "terms",
    "doc": "The related business terms"
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "auditStamp",
    "doc": "Audit stamp containing who reported the related business term"
    }
    ],
    "doc": "Related business terms information"
    }

    browsePaths

    Shared aspect containing Browse Paths to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePaths"
    },
    "name": "BrowsePaths",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "fieldName": "browsePaths",
    "fieldType": "BROWSE_PATH"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "paths",
    "doc": "A list of valid browse paths for the entity.\n\nBrowse paths are expected to be forward slash-separated strings. For example: 'prod/snowflake/datasetName'"
    }
    ],
    "doc": "Shared aspect containing Browse Paths to be indexed for an entity."
    }

    institutionalMemory

    Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "institutionalMemory"
    },
    "name": "InstitutionalMemory",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "InstitutionalMemoryMetadata",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "url",
    "doc": "Link to an engineering design document or a wiki page."
    },
    {
    "type": "string",
    "name": "description",
    "doc": "Description of the link."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "createStamp",
    "doc": "Audit stamp associated with creation of this record"
    }
    ],
    "doc": "Metadata corresponding to a record of institutional memory."
    }
    },
    "name": "elements",
    "doc": "List of records that represent institutional memory of an entity. Each record consists of a link, description, creator and timestamps associated with that record."
    }
    ],
    "doc": "Institutional memory of an entity. This is a way to link to relevant documentation and provide description of the documentation. Institutional or tribal knowledge is very important for users to leverage the entity."
    }

    domains

    Links from an Asset to its Domains

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "domains"
    },
    "name": "Domains",
    "namespace": "com.linkedin.domain",
    "fields": [
    {
    "Relationship": {
    "/*": {
    "entityTypes": [
    "domain"
    ],
    "name": "AssociatedWith"
    }
    },
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldName": "domains",
    "fieldType": "URN",
    "filterNameOverride": "Domain",
    "hasValuesFieldName": "hasDomain"
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "domains",
    "doc": "The Domains attached to an Asset"
    }
    ],
    "doc": "Links from an Asset to its Domains"
    }

    subTypes

    Sub Types. Use this aspect to specialize a generic Entity e.g. Making a Dataset also be a View or also be a LookerExplore

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "subTypes"
    },
    "name": "SubTypes",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*": {
    "addToFilters": true,
    "fieldType": "KEYWORD",
    "filterNameOverride": "Sub Type",
    "queryByDefault": true
    }
    },
    "type": {
    "type": "array",
    "items": "string"
    },
    "name": "typeNames",
    "doc": "The names of the specific types."
    }
    ],
    "doc": "Sub Types. Use this aspect to specialize a generic Entity\ne.g. Making a Dataset also be a View or also be a LookerExplore"
    }

    dataPlatformInstance

    The specific instance of the data platform that this entity belongs to

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "dataPlatformInstance"
    },
    "name": "DataPlatformInstance",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "addToFilters": true,
    "fieldType": "URN",
    "filterNameOverride": "Platform"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "platform",
    "doc": "Data Platform"
    },
    {
    "Searchable": {
    "addToFilters": true,
    "fieldName": "platformInstance",
    "fieldType": "URN",
    "filterNameOverride": "Platform Instance"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "instance",
    "default": null,
    "doc": "Instance of the data platform (e.g. db instance)"
    }
    ],
    "doc": "The specific instance of the data platform that this entity belongs to"
    }

    browsePathsV2

    Shared aspect containing a Browse Path to be indexed for an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "browsePathsV2"
    },
    "name": "BrowsePathsV2",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "/*/id": {
    "fieldName": "browsePathV2",
    "fieldType": "BROWSE_PATH_V2"
    }
    },
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "BrowsePathEntry",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "string",
    "name": "id",
    "doc": "The ID of the browse path entry. This is what gets stored in the index.\nIf there's an urn associated with this entry, id and urn will be the same"
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "urn",
    "default": null,
    "doc": "Optional urn pointing to some entity in DataHub"
    }
    ],
    "doc": "Represents a single level in an entity's browsePathV2"
    }
    },
    "name": "path",
    "doc": "A valid browse path for the entity. This field is provided by DataHub by default.\nThis aspect is a newer version of browsePaths where we can encode more information in the path.\nThis path is also based on containers for a given entity if it has containers.\n\nThis is stored in elasticsearch as unit-separator delimited strings and only includes platform specific folders or containers.\nThese paths should not include high level info captured elsewhere ie. Platform and Environment."
    }
    ],
    "doc": "Shared aspect containing a Browse Path to be indexed for an entity."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn
    • TaggedWith

      • Tag via globalTags.tags
    • TermedWith

      • GlossaryTerm via glossaryTerms.terms.urn
    • AssociatedWith

      • Domain via domains.domains

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/ownershiptype/index.html b/docs/generated/metamodel/entities/ownershiptype/index.html index 44b1fc0a4e494..dc8740b858d88 100644 --- a/docs/generated/metamodel/entities/ownershiptype/index.html +++ b/docs/generated/metamodel/entities/ownershiptype/index.html @@ -8,14 +8,14 @@ - +

    OwnershipType

    Ownership Type represents a user-created ownership category for a person or group who is responsible for an asset.

    Aspects

    ownershipTypeInfo

    Information about an ownership type

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownershipTypeInfo"
    },
    "name": "OwnershipTypeInfo",
    "namespace": "com.linkedin.ownership",
    "fields": [
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Display name of the Ownership Type"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the Ownership Type"
    },
    {
    "Searchable": {
    "/actor": {
    "fieldName": "createdBy",
    "fieldType": "URN"
    },
    "/time": {
    "fieldName": "createdAt",
    "fieldType": "DATETIME"
    }
    },
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "doc": "Audit stamp capturing the time and actor who created the Ownership Type."
    },
    {
    "Searchable": {
    "/actor": {
    "fieldName": "lastModifiedBy",
    "fieldType": "URN"
    },
    "/time": {
    "fieldName": "lastModifiedAt",
    "fieldType": "DATETIME"
    }
    },
    "type": "com.linkedin.common.AuditStamp",
    "name": "lastModified",
    "doc": "Audit stamp capturing the time and actor who last modified the Ownership Type."
    }
    ],
    "doc": "Information about an ownership type"
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    Relationships

    Incoming

    These are the relationships stored in other entity's aspects

    • ownershipType

      • Dataset via ownership.owners.typeUrn
      • DataJob via ownership.owners.typeUrn
      • DataFlow via ownership.owners.typeUrn
      • DataProcess via ownership.owners.typeUrn
      • Chart via ownership.owners.typeUrn
      • Dashboard via ownership.owners.typeUrn
      • Notebook via ownership.owners.typeUrn
      • CorpGroup via ownership.owners.typeUrn
      • Domain via ownership.owners.typeUrn
      • Container via ownership.owners.typeUrn
      • Tag via ownership.owners.typeUrn
      • GlossaryTerm via ownership.owners.typeUrn
      • GlossaryNode via ownership.owners.typeUrn
      • DataPlatformInstance via ownership.owners.typeUrn
      • MlModel via ownership.owners.typeUrn
      • MlModelGroup via ownership.owners.typeUrn
      • MlModelDeployment via ownership.owners.typeUrn
      • MlFeatureTable via ownership.owners.typeUrn
      • MlFeature via ownership.owners.typeUrn
      • MlPrimaryKey via ownership.owners.typeUrn
      • DataProduct via ownership.owners.typeUrn

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/post/index.html b/docs/generated/metamodel/entities/post/index.html index 23174c351ae3b..90cb44134c222 100644 --- a/docs/generated/metamodel/entities/post/index.html +++ b/docs/generated/metamodel/entities/post/index.html @@ -8,13 +8,13 @@ - +

    Post

    Aspects

    postInfo

    Information about a DataHub Post.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "postInfo"
    },
    "name": "PostInfo",
    "namespace": "com.linkedin.post",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "HOME_PAGE_ANNOUNCEMENT": "The Post is an Home Page announcement."
    },
    "name": "PostType",
    "namespace": "com.linkedin.post",
    "symbols": [
    "HOME_PAGE_ANNOUNCEMENT"
    ],
    "doc": "Enum defining types of Posts."
    },
    "name": "type",
    "doc": "Type of the Post."
    },
    {
    "type": {
    "type": "record",
    "name": "PostContent",
    "namespace": "com.linkedin.post",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "title",
    "doc": "Title of the post."
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "LINK": "Link content",
    "TEXT": "Text content"
    },
    "name": "PostContentType",
    "namespace": "com.linkedin.post",
    "symbols": [
    "TEXT",
    "LINK"
    ],
    "doc": "Enum defining the type of content held in a Post."
    },
    "name": "type",
    "doc": "Type of content held in the post."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Optional description of the post."
    },
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "link",
    "default": null,
    "doc": "Optional link that the post is associated with."
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "Media",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "IMAGE": "The Media holds an image."
    },
    "name": "MediaType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "IMAGE"
    ],
    "doc": "Enum defining the type of content a Media object holds."
    },
    "name": "type",
    "doc": "Type of content the Media is storing, e.g. image, video, etc."
    },
    {
    "java": {
    "class": "com.linkedin.common.url.Url",
    "coercerClass": "com.linkedin.common.url.UrlCoercer"
    },
    "type": "string",
    "name": "location",
    "doc": "Where the media content is stored."
    }
    ],
    "doc": "Carries information about which roles a user is assigned to."
    }
    ],
    "name": "media",
    "default": null,
    "doc": "Optional media that the post is storing"
    }
    ],
    "doc": "Content stored inside a Post."
    },
    "name": "content",
    "doc": "Content stored in the post."
    },
    {
    "Searchable": {
    "fieldType": "COUNT"
    },
    "type": "long",
    "name": "created",
    "doc": "The time at which the post was initially created"
    },
    {
    "Searchable": {
    "fieldType": "COUNT"
    },
    "type": "long",
    "name": "lastModified",
    "doc": "The time at which the post was last modified"
    }
    ],
    "doc": "Information about a DataHub Post."
    }

    Relationships

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/query/index.html b/docs/generated/metamodel/entities/query/index.html index 6963dcfc8ac36..e200a307190e2 100644 --- a/docs/generated/metamodel/entities/query/index.html +++ b/docs/generated/metamodel/entities/query/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    Query

    Aspects

    queryProperties

    Information about a Query against one or more data assets (e.g. Tables or Views).

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "queryProperties"
    },
    "name": "QueryProperties",
    "namespace": "com.linkedin.query",
    "fields": [
    {
    "type": {
    "type": "record",
    "name": "QueryStatement",
    "namespace": "com.linkedin.query",
    "fields": [
    {
    "type": "string",
    "name": "value",
    "doc": "The query text"
    },
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "SQL": "A SQL Query"
    },
    "name": "QueryLanguage",
    "namespace": "com.linkedin.query",
    "symbols": [
    "SQL"
    ]
    },
    "name": "language",
    "default": "SQL",
    "doc": "The language of the Query, e.g. SQL."
    }
    ],
    "doc": "A query statement against one or more data assets."
    },
    "name": "statement",
    "doc": "The Query Statement."
    },
    {
    "Searchable": {},
    "type": {
    "type": "enum",
    "symbolDocs": {
    "MANUAL": "The query was entered manually by a user (via the UI)."
    },
    "name": "QuerySource",
    "namespace": "com.linkedin.query",
    "symbols": [
    "MANUAL"
    ]
    },
    "name": "source",
    "doc": "The source of the Query"
    },
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldType": "WORD_GRAM"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "name",
    "default": null,
    "doc": "Optional display name to identify the query."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "The Query description."
    },
    {
    "Searchable": {
    "/actor": {
    "fieldName": "createdBy",
    "fieldType": "URN"
    },
    "/time": {
    "fieldName": "createdAt",
    "fieldType": "DATETIME"
    }
    },
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "created",
    "doc": "Audit stamp capturing the time and actor who created the Query."
    },
    {
    "Searchable": {
    "/actor": {
    "fieldName": "lastModifiedBy",
    "fieldType": "URN"
    },
    "/time": {
    "fieldName": "lastModifiedAt",
    "fieldType": "DATETIME"
    }
    },
    "type": "com.linkedin.common.AuditStamp",
    "name": "lastModified",
    "doc": "Audit stamp capturing the time and actor who last modified the Query."
    }
    ],
    "doc": "Information about a Query against one or more data assets (e.g. Tables or Views)."
    }

    querySubjects

    Information about the subjects of a particular Query, i.e. the assets being queried.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "querySubjects"
    },
    "name": "QuerySubjects",
    "namespace": "com.linkedin.query",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "QuerySubject",
    "namespace": "com.linkedin.query",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "dataset",
    "schemaField"
    ],
    "name": "IsAssociatedWith"
    },
    "Searchable": {
    "fieldName": "entities",
    "fieldType": "URN"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "entity",
    "doc": "An entity which is the subject of a query."
    }
    ],
    "doc": "A single subject of a particular query.\nIn the future, we may evolve this model to include richer details\nabout the Query Subject in relation to the query."
    }
    },
    "name": "subjects",
    "doc": "One or more subjects of the query.\n\nIn single-asset queries (e.g. table select), this will contain the Table reference\nand optionally schema field references.\n\nIn multi-asset queries (e.g. table joins), this may contain multiple Table references\nand optionally schema field references."
    }
    ],
    "doc": "Information about the subjects of a particular Query, i.e. the assets\nbeing queried."
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • IsAssociatedWith

      • Dataset via querySubjects.subjects.entity
      • SchemaField via querySubjects.subjects.entity

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/role/index.html b/docs/generated/metamodel/entities/role/index.html index b44aa26ff2228..b9c66a12b87af 100644 --- a/docs/generated/metamodel/entities/role/index.html +++ b/docs/generated/metamodel/entities/role/index.html @@ -8,13 +8,13 @@ - +

    Role

    Aspects

    roleProperties

    Information about a ExternalRoleProperties

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "roleProperties"
    },
    "name": "RoleProperties",
    "namespace": "com.linkedin.role",
    "fields": [
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Display name of the IAM Role in the external system"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the IAM Role"
    },
    {
    "type": "string",
    "name": "type",
    "doc": "Can be READ, ADMIN, WRITE"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "requestUrl",
    "default": null,
    "doc": "Link to access external access management"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    }
    ],
    "name": "created",
    "default": null,
    "doc": "Created Audit stamp"
    }
    ],
    "doc": "Information about a ExternalRoleProperties"
    }

    actors

    Provisioned users of a role

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "actors"
    },
    "name": "Actors",
    "namespace": "com.linkedin.role",
    "fields": [
    {
    "type": [
    "null",
    {
    "type": "array",
    "items": {
    "type": "record",
    "name": "RoleUser",
    "namespace": "com.linkedin.role",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser"
    ],
    "name": "Has"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "user",
    "doc": "Link provisioned corp user for a role"
    }
    ],
    "doc": "Provisioned users of a role"
    }
    }
    ],
    "name": "users",
    "default": null,
    "doc": "List of provisioned users of a role"
    }
    ],
    "doc": "Provisioned users of a role"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • Has

      • Corpuser via actors.users.user

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/schemafield/index.html b/docs/generated/metamodel/entities/schemafield/index.html index 6af0f2413fc1b..3b875ca6d6e4f 100644 --- a/docs/generated/metamodel/entities/schemafield/index.html +++ b/docs/generated/metamodel/entities/schemafield/index.html @@ -8,13 +8,13 @@ - +

    SchemaField

    Aspects

    schemaFieldKey

    Key for a SchemaField

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "schemaFieldKey"
    },
    "name": "SchemaFieldKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "fieldType": "URN"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "parent",
    "doc": "Parent associated with the schema field"
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "type": "string",
    "name": "fieldPath",
    "doc": "fieldPath identifying the schema field"
    }
    ],
    "doc": "Key for a SchemaField"
    }

    Relationships

    Incoming

    These are the relationships stored in other entity's aspects

    • DownstreamOf

      • Dataset via upstreamLineage.fineGrainedLineages
    • ForeignKeyTo

      • Dataset via schemaMetadata.foreignKeys.foreignFields
      • GlossaryTerm via schemaMetadata.foreignKeys.foreignFields
    • Consumes

      • DataJob via dataJobInputOutput.inputDatasetFields
    • Produces

      • DataJob via dataJobInputOutput.outputDatasetFields
    • consumesField

      • Chart via inputFields.fields.schemaFieldUrn
      • Dashboard via inputFields.fields.schemaFieldUrn
    • Asserts

      • Assertion via assertionInfo.datasetAssertion.fields

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/tag/index.html b/docs/generated/metamodel/entities/tag/index.html index 8503c87437ce9..f4dbc492be0bc 100644 --- a/docs/generated/metamodel/entities/tag/index.html +++ b/docs/generated/metamodel/entities/tag/index.html @@ -8,14 +8,14 @@ - +

    Tag

    Aspects

    tagKey

    Key for a Tag

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "tagKey"
    },
    "name": "TagKey",
    "namespace": "com.linkedin.metadata.key",
    "fields": [
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldName": "id",
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "The tag name, which serves as a unique id"
    }
    ],
    "doc": "Key for a Tag"
    }

    ownership

    Ownership information of an entity.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "ownership"
    },
    "name": "Ownership",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "array",
    "items": {
    "type": "record",
    "name": "Owner",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Relationship": {
    "entityTypes": [
    "corpuser",
    "corpGroup"
    ],
    "name": "OwnedBy"
    },
    "Searchable": {
    "addToFilters": true,
    "fieldName": "owners",
    "fieldType": "URN",
    "filterNameOverride": "Owned By",
    "hasValuesFieldName": "hasOwners",
    "queryByDefault": false
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "owner",
    "doc": "Owner URN, e.g. urn:li:corpuser:ldap, urn:li:corpGroup:group_name, and urn:li:multiProduct:mp_name\n(Caveat: only corpuser is currently supported in the frontend.)"
    },
    {
    "deprecated": true,
    "type": {
    "type": "enum",
    "symbolDocs": {
    "BUSINESS_OWNER": "A person or group who is responsible for logical, or business related, aspects of the asset.",
    "CONSUMER": "A person, group, or service that consumes the data\nDeprecated! Use TECHNICAL_OWNER or BUSINESS_OWNER instead.",
    "CUSTOM": "Set when ownership type is unknown or a when new one is specified as an ownership type entity for which we have no\nenum value for. This is used for backwards compatibility",
    "DATAOWNER": "A person or group that is owning the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DATA_STEWARD": "A steward, expert, or delegate responsible for the asset.",
    "DELEGATE": "A person or a group that overseas the operation, e.g. a DBA or SRE.\nDeprecated! Use TECHNICAL_OWNER instead.",
    "DEVELOPER": "A person or group that is in charge of developing the code\nDeprecated! Use TECHNICAL_OWNER instead.",
    "NONE": "No specific type associated to the owner.",
    "PRODUCER": "A person, group, or service that produces/generates the data\nDeprecated! Use TECHNICAL_OWNER instead.",
    "STAKEHOLDER": "A person or a group that has direct business interest\nDeprecated! Use TECHNICAL_OWNER, BUSINESS_OWNER, or STEWARD instead.",
    "TECHNICAL_OWNER": "person or group who is responsible for technical aspects of the asset."
    },
    "deprecatedSymbols": {
    "CONSUMER": true,
    "DATAOWNER": true,
    "DELEGATE": true,
    "DEVELOPER": true,
    "PRODUCER": true,
    "STAKEHOLDER": true
    },
    "name": "OwnershipType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "CUSTOM",
    "TECHNICAL_OWNER",
    "BUSINESS_OWNER",
    "DATA_STEWARD",
    "NONE",
    "DEVELOPER",
    "DATAOWNER",
    "DELEGATE",
    "PRODUCER",
    "CONSUMER",
    "STAKEHOLDER"
    ],
    "doc": "Asset owner types"
    },
    "name": "type",
    "doc": "The type of the ownership"
    },
    {
    "Relationship": {
    "entityTypes": [
    "ownershipType"
    ],
    "name": "ownershipType"
    },
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "typeUrn",
    "default": null,
    "doc": "The type of the ownership\nUrn of type O"
    },
    {
    "type": [
    "null",
    {
    "type": "record",
    "name": "OwnershipSource",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "AUDIT": "Auditing system or audit logs",
    "DATABASE": "Database, e.g. GRANTS table",
    "FILE_SYSTEM": "File system, e.g. file/directory owner",
    "ISSUE_TRACKING_SYSTEM": "Issue tracking system, e.g. Jira",
    "MANUAL": "Manually provided by a user",
    "OTHER": "Other sources",
    "SERVICE": "Other ownership-like service, e.g. Nuage, ACL service etc",
    "SOURCE_CONTROL": "SCM system, e.g. GIT, SVN"
    },
    "name": "OwnershipSourceType",
    "namespace": "com.linkedin.common",
    "symbols": [
    "AUDIT",
    "DATABASE",
    "FILE_SYSTEM",
    "ISSUE_TRACKING_SYSTEM",
    "MANUAL",
    "SERVICE",
    "SOURCE_CONTROL",
    "OTHER"
    ]
    },
    "name": "type",
    "doc": "The type of the source"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "url",
    "default": null,
    "doc": "A reference URL for the source"
    }
    ],
    "doc": "Source/provider of the ownership information"
    }
    ],
    "name": "source",
    "default": null,
    "doc": "Source information for the ownership"
    }
    ],
    "doc": "Ownership information"
    }
    },
    "name": "owners",
    "doc": "List of owners of the entity."
    },
    {
    "type": {
    "type": "record",
    "name": "AuditStamp",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "type": "long",
    "name": "time",
    "doc": "When did the resource/association/sub-resource move into the specific lifecycle stage represented by this AuditEvent."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The entity (e.g. a member URN) which will be credited for moving the resource/association/sub-resource into the specific lifecycle stage. It is also the one used to authorize the change."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "impersonator",
    "default": null,
    "doc": "The entity (e.g. a service URN) which performs the change on behalf of the Actor and must be authorized to act as the Actor."
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "message",
    "default": null,
    "doc": "Additional context around how DataHub was informed of the particular change. For example: was the change created by an automated process, or manually."
    }
    ],
    "doc": "Data captured on a resource/association/sub-resource level giving insight into when that resource/association/sub-resource moved into a particular lifecycle stage, and who acted to move it into that specific lifecycle stage."
    },
    "name": "lastModified",
    "default": {
    "actor": "urn:li:corpuser:unknown",
    "impersonator": null,
    "time": 0,
    "message": null
    },
    "doc": "Audit stamp containing who last modified the record and when. A value of 0 in the time field indicates missing data."
    }
    ],
    "doc": "Ownership information of an entity."
    }

    tagProperties

    Properties associated with a Tag

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "tagProperties"
    },
    "name": "TagProperties",
    "namespace": "com.linkedin.tag",
    "fields": [
    {
    "Searchable": {
    "boostScore": 10.0,
    "enableAutocomplete": true,
    "fieldNameAliases": [
    "_entityName"
    ],
    "fieldType": "WORD_GRAM"
    },
    "type": "string",
    "name": "name",
    "doc": "Display name of the tag"
    },
    {
    "Searchable": {},
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Documentation of the tag"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "colorHex",
    "default": null,
    "doc": "The color associated with the Tag in Hex. For example #FFFFFF."
    }
    ],
    "doc": "Properties associated with a Tag"
    }

    status

    The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc. This aspect is used to represent soft deletes conventionally.

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "status"
    },
    "name": "Status",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN"
    },
    "type": "boolean",
    "name": "removed",
    "default": false,
    "doc": "Whether the entity has been removed (soft-deleted)."
    }
    ],
    "doc": "The lifecycle status metadata of an entity, e.g. dataset, metric, feature, etc.\nThis aspect is used to represent soft deletes conventionally."
    }

    deprecation

    Deprecation status of an entity

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "deprecation"
    },
    "name": "Deprecation",
    "namespace": "com.linkedin.common",
    "fields": [
    {
    "Searchable": {
    "fieldType": "BOOLEAN",
    "weightsPerFieldValue": {
    "true": 0.5
    }
    },
    "type": "boolean",
    "name": "deprecated",
    "doc": "Whether the entity is deprecated."
    },
    {
    "type": [
    "null",
    "long"
    ],
    "name": "decommissionTime",
    "default": null,
    "doc": "The time user plan to decommission this entity."
    },
    {
    "type": "string",
    "name": "note",
    "doc": "Additional information about the entity deprecation plan, such as the wiki, doc, RB."
    },
    {
    "java": {
    "class": "com.linkedin.common.urn.Urn"
    },
    "type": "string",
    "name": "actor",
    "doc": "The user URN which will be credited for modifying this deprecation content."
    }
    ],
    "doc": "Deprecation status of an entity"
    }

    Relationships

    Outgoing

    These are the relationships stored in this entity's aspects

    • OwnedBy

      • Corpuser via ownership.owners.owner
      • CorpGroup via ownership.owners.owner
    • ownershipType

      • OwnershipType via ownership.owners.typeUrn

    Incoming

    These are the relationships stored in other entity's aspects

    • SchemaFieldTaggedWith

      • Dataset via schemaMetadata.fields.globalTags
      • Chart via inputFields.fields.schemaField.globalTags
      • Dashboard via inputFields.fields.schemaField.globalTags
    • TaggedWith

      • Dataset via schemaMetadata.fields.globalTags.tags
      • Dataset via editableSchemaMetadata.editableSchemaFieldInfo.globalTags.tags
      • Dataset via globalTags.tags
      • DataJob via globalTags.tags
      • DataFlow via globalTags.tags
      • Chart via globalTags.tags
      • Chart via inputFields.fields.schemaField.globalTags.tags
      • Dashboard via globalTags.tags
      • Dashboard via inputFields.fields.schemaField.globalTags.tags
      • Notebook via globalTags.tags
      • Corpuser via globalTags.tags
      • CorpGroup via globalTags.tags
      • Container via globalTags.tags
    • EditableSchemaFieldTaggedWith

      • Dataset via editableSchemaMetadata.editableSchemaFieldInfo.globalTags

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/telemetry/index.html b/docs/generated/metamodel/entities/telemetry/index.html index d7ca14a62971f..9b992b311079d 100644 --- a/docs/generated/metamodel/entities/telemetry/index.html +++ b/docs/generated/metamodel/entities/telemetry/index.html @@ -8,13 +8,13 @@ - +
    - + \ No newline at end of file diff --git a/docs/generated/metamodel/entities/test/index.html b/docs/generated/metamodel/entities/test/index.html index 9eb8bf3cdd7f0..040f64ffe490c 100644 --- a/docs/generated/metamodel/entities/test/index.html +++ b/docs/generated/metamodel/entities/test/index.html @@ -8,13 +8,13 @@ - +

    Test

    A DataHub test

    Aspects

    testInfo

    Information about a DataHub Test

    Schema
    {
    "type": "record",
    "Aspect": {
    "name": "testInfo"
    },
    "name": "TestInfo",
    "namespace": "com.linkedin.test",
    "fields": [
    {
    "Searchable": {
    "fieldType": "TEXT_PARTIAL"
    },
    "type": "string",
    "name": "name",
    "doc": "The name of the test"
    },
    {
    "Searchable": {
    "fieldType": "KEYWORD"
    },
    "type": "string",
    "name": "category",
    "doc": "Category of the test"
    },
    {
    "Searchable": {
    "fieldType": "TEXT"
    },
    "type": [
    "null",
    "string"
    ],
    "name": "description",
    "default": null,
    "doc": "Description of the test"
    },
    {
    "type": {
    "type": "record",
    "name": "TestDefinition",
    "namespace": "com.linkedin.test",
    "fields": [
    {
    "type": {
    "type": "enum",
    "symbolDocs": {
    "JSON": "JSON / YAML test def"
    },
    "name": "TestDefinitionType",
    "namespace": "com.linkedin.test",
    "symbols": [
    "JSON"
    ]
    },
    "name": "type",
    "doc": "The Test Definition Type"
    },
    {
    "type": [
    "null",
    "string"
    ],
    "name": "json",
    "default": null,
    "doc": "JSON format configuration for the test"
    }
    ]
    },
    "name": "definition",
    "doc": "Configuration for the Test"
    }
    ],
    "doc": "Information about a DataHub Test"
    }

    Relationships

    Incoming

    These are the relationships stored in other entity's aspects

    • IsFailing

      • Dataset via testResults.failing
    • IsPassing

      • Dataset via testResults.passing

    Global Metadata Model

    Global Graph

    - + \ No newline at end of file diff --git a/docs/glossary/business-glossary/index.html b/docs/glossary/business-glossary/index.html index af59884048d2e..d639a885ebb45 100644 --- a/docs/glossary/business-glossary/index.html +++ b/docs/glossary/business-glossary/index.html @@ -8,7 +8,7 @@ - + @@ -24,7 +24,7 @@ data type, and then associate this with a higher-level PII Glossary Term via an Inheritance relationship. This allows you to easily maintain a set of all Data Assets that contain or process PII, while keeping it easy to add and remove new Terms from the PII Classification, e.g. without requiring re-annotation of individual Data Assets or Columns.

    Demo

    Check out our demo site to see an example Glossary and how it works!

    GraphQL

    You can easily fetch the Glossary Terms for an entity with a given its URN using the glossaryTerms property. Check out Working with Metadata Entities for an example.

    Resources

    Feedback / Questions / Concerns

    We want to hear from you! For any inquiries, including Feedback, Questions, or Concerns, reach out on Slack!

    - + \ No newline at end of file diff --git a/docs/graphql/enums/index.html b/docs/graphql/enums/index.html index 9ab018b36bbe3..55cca8ad484da 100644 --- a/docs/graphql/enums/index.html +++ b/docs/graphql/enums/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ Note that this field will soon become deprecated due to low usage

    Values

    ValueDescription
    TECHNICAL_OWNER

    A person or group who is responsible for technical aspects of the asset.

    BUSINESS_OWNER

    A person or group who is responsible for logical, or business related, aspects of the asset.

    DATA_STEWARD

    A steward, expert, or delegate responsible for the asset.

    NONE

    No specific type associated with the owner.

    CUSTOM

    Associated ownership type is a custom ownership type. Please check OwnershipTypeEntity urn for custom value.

    DATAOWNER

    A person or group that owns the data. Deprecated! This ownership type is no longer supported. Use TECHNICAL_OWNER instead.

    DEVELOPER

    A person or group that is in charge of developing the code Deprecated! This ownership type is no longer supported. Use TECHNICAL_OWNER instead.

    DELEGATE

    A person or a group that overseas the operation, eg a DBA or SRE Deprecated! This ownership type is no longer supported. Use TECHNICAL_OWNER instead.

    PRODUCER

    A person, group, or service that produces or generates the data Deprecated! This ownership type is no longer supported. Use TECHNICAL_OWNER instead.

    STAKEHOLDER

    A person or a group that has direct business interest Deprecated! Use BUSINESS_OWNER instead.

    CONSUMER

    A person, group, or service that consumes the data Deprecated! This ownership type is no longer supported.

    PartitionType

    Values

    ValueDescription
    FULL_TABLE
    QUERY
    PARTITION

    PlatformNativeType

    Deprecated, do not use this type The logical type associated with an individual Dataset

    Values

    ValueDescription
    TABLE

    Table

    VIEW

    View

    DIRECTORY

    Directory in file system

    STREAM

    Stream

    BUCKET

    Bucket in key value store

    PlatformType

    The category of a specific Data Platform

    Values

    ValueDescription
    FILE_SYSTEM

    Value for a file system

    KEY_VALUE_STORE

    Value for a key value store

    MESSAGE_BROKER

    Value for a message broker

    OBJECT_STORE

    Value for an object store

    OLAP_DATASTORE

    Value for an OLAP datastore

    QUERY_ENGINE

    Value for a query engine

    RELATIONAL_DB

    Value for a relational database

    SEARCH_ENGINE

    Value for a search engine

    OTHERS

    Value for other platforms

    PolicyMatchCondition

    Match condition

    Values

    ValueDescription
    EQUALS

    Whether the field matches the value

    PolicyState

    The state of an Access Policy

    Values

    ValueDescription
    DRAFT

    A Policy that has not been officially created, but in progress Currently unused

    ACTIVE

    A Policy that is active and being enforced

    INACTIVE

    A Policy that is not active or being enforced

    PolicyType

    The type of the Access Policy

    Values

    ValueDescription
    METADATA

    An access policy that grants privileges pertaining to Metadata Entities

    PLATFORM

    An access policy that grants top level administrative privileges pertaining to the DataHub Platform itself

    PostContentType

    The type of post

    Values

    ValueDescription
    TEXT

    Text content

    LINK

    Link content

    PostType

    The type of post

    Values

    ValueDescription
    HOME_PAGE_ANNOUNCEMENT

    Posts on the home page

    QueryLanguage

    A query language / dialect.

    Values

    ValueDescription
    SQL

    Standard ANSI SQL

    QuerySource

    The source of the query

    Values

    ValueDescription
    MANUAL

    The query was provided manually, e.g. from the UI.

    RecommendationRenderType

    Enum that defines how the modules should be rendered. There should be two frontend implementation of large and small modules per type.

    Values

    ValueDescription
    ENTITY_NAME_LIST

    Simple list of entities

    PLATFORM_SEARCH_LIST

    List of platforms

    TAG_SEARCH_LIST

    Tag search list

    SEARCH_QUERY_LIST

    A list of recommended search queries

    GLOSSARY_TERM_SEARCH_LIST

    Glossary Term search list

    DOMAIN_SEARCH_LIST

    Domain Search List

    RelationshipDirection

    Direction between a source and destination node

    Values

    ValueDescription
    INCOMING

    A directed edge pointing at the source Entity

    OUTGOING

    A directed edge pointing at the destination Entity

    ScenarioType

    Type of the scenario requesting recommendation

    Values

    ValueDescription
    HOME

    Recommendations to show on the users home page

    SEARCH_RESULTS

    Recommendations to show on the search results page

    ENTITY_PROFILE

    Recommendations to show on an Entity Profile page

    SEARCH_BAR

    Recommendations to show on the search bar when clicked

    SchemaFieldDataType

    The type associated with a single Dataset schema field

    Values

    ValueDescription
    BOOLEAN

    A boolean type

    FIXED

    A fixed bytestring type

    STRING

    A string type

    BYTES

    A string of bytes

    NUMBER

    A number, including integers, floats, and doubles

    DATE

    A datestrings type

    TIME

    A timestamp type

    ENUM

    An enum type

    NULL

    A NULL type

    MAP

    A map collection type

    ARRAY

    An array collection type

    UNION

    An union type

    STRUCT

    An complex struct type

    SortOrder

    Order for sorting

    Values

    ValueDescription
    ASCENDING
    DESCENDING

    SourceCodeUrlType

    Values

    ValueDescription
    ML_MODEL_SOURCE_CODE

    MLModel Source Code

    TRAINING_PIPELINE_SOURCE_CODE

    Training Pipeline Source Code

    EVALUATION_PIPELINE_SOURCE_CODE

    Evaluation Pipeline Source Code

    SubResourceType

    A type of Metadata Entity sub resource

    Values

    ValueDescription
    DATASET_FIELD

    A Dataset field or column

    TermRelationshipType

    A type of Metadata Entity sub resource

    Values

    ValueDescription
    isA

    When a Term inherits from, or has an 'Is A' relationship with another Term

    hasA

    When a Term contains, or has a 'Has A' relationship with another Term

    TestResultType

    The result type of a test that has been run

    Values

    ValueDescription
    SUCCESS

    The test succeeded.

    FAILURE

    The test failed.

    TimeRange

    A time range used in fetching Usage statistics

    Values

    ValueDescription
    DAY

    Last day

    WEEK

    Last week

    MONTH

    Last month

    QUARTER

    Last quarter

    YEAR

    Last year

    ALL

    All time

    UserSetting

    An individual setting type for a Corp User.

    Values

    ValueDescription
    SHOW_SIMPLIFIED_HOMEPAGE

    Show simplified homepage

    WindowDuration

    The duration of a fixed window of time

    Values

    ValueDescription
    DAY

    A one day window

    WEEK

    A one week window

    MONTH

    A one month window

    YEAR

    A one year window

    - + \ No newline at end of file diff --git a/docs/graphql/inputObjects/index.html b/docs/graphql/inputObjects/index.html index e60323751a203..5d6c5fa8b71b0 100644 --- a/docs/graphql/inputObjects/index.html +++ b/docs/graphql/inputObjects/index.html @@ -8,7 +8,7 @@ - + @@ -17,10 +17,10 @@ Update to the Tags associated with a Metadata Entity

    Arguments

    NameDescription
    tags
    [TagAssociationUpdate!]

    The new set of tags

    InstitutionalMemoryMetadataUpdate

    An institutional memory to add to a Metadata Entity TODO Add a USER or GROUP actor enum

    Arguments

    NameDescription
    url
    String!

    Link to a document or wiki page or another internal resource

    description
    String

    Description of the resource

    author
    String!

    The corp user urn of the author of the metadata

    createdAt
    Long

    The time at which this metadata was created

    InstitutionalMemoryUpdate

    An update for the institutional memory information for a Metadata Entity

    Arguments

    NameDescription
    elements
    [InstitutionalMemoryMetadataUpdate!]!

    The individual references in the institutional memory

    LineageEdge

    Arguments

    NameDescription
    downstreamUrn
    String!

    Urn of the source entity. This urn is downstream of the destinationUrn.

    upstreamUrn
    String!

    Urn of the destination entity. This urn is upstream of the destinationUrn

    LineageInput

    Input for the list lineage property of an Entity

    Arguments

    NameDescription
    direction
    LineageDirection!

    The direction of the relationship, either incoming or outgoing from the source entity

    start
    Int

    The starting offset of the result set

    count
    Int

    The number of results to be returned

    separateSiblings
    Boolean

    Optional flag to not merge siblings in the response. They are merged by default.

    startTimeMillis
    Long

    An optional starting time to filter on

    endTimeMillis
    Long

    An optional ending time to filter on

    ListAccessTokenInput

    Input arguments for listing access tokens

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set

    count
    Int

    The number of results to be returned

    filters
    [FacetFilterInput!]

    Facet filters to apply to search results

    ListDomainsInput

    Input required when listing DataHub Domains

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Domains to be returned in the result set

    query
    String

    Optional search query

    ListGlobalViewsInput

    Input provided when listing DataHub Global Views

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Views to be returned in the result set

    query
    String

    Optional search query

    ListGroupsInput

    Input required when listing DataHub Groups

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Policies to be returned in the result set

    query
    String

    Optional search query

    ListIngestionSourcesInput

    Input arguments for listing Ingestion Sources

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set

    count
    Int

    The number of results to be returned

    query
    String

    An optional search query

    filters
    [FacetFilterInput!]

    Optional Facet filters to apply to the result set

    ListMyViewsInput

    Input provided when listing DataHub Views

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Views to be returned in the result set

    query
    String

    Optional search query

    viewType
    DataHubViewType

    Optional - List the type of View to filter for.

    ListOwnershipTypesInput

    Input required for listing custom ownership types entities

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned, default is 0

    count
    Int

    The maximum number of Custom Ownership Types to be returned in the result set, default is 20

    query
    String

    Optional search query

    filters
    [FacetFilterInput!]

    Optional Facet filters to apply to the result set

    ListPoliciesInput

    Input required when listing DataHub Access Policies

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Policies to be returned in the result set

    query
    String

    Optional search query

    ListPostsInput

    Input provided when listing existing posts

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Roles to be returned in the result set

    query
    String

    Optional search query

    ListQueriesInput

    Input required for listing query entities

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Queries to be returned in the result set

    query
    String

    A raw search query

    source
    QuerySource

    An optional source for the query

    datasetUrn
    String

    An optional Urn for the parent dataset that the query is associated with.

    ListRecommendationsInput

    Input arguments for fetching UI recommendations

    Arguments

    NameDescription
    userUrn
    String!

    Urn of the actor requesting recommendations

    requestContext
    RecommendationRequestContext

    Context provider by the caller requesting recommendations

    limit
    Int

    Max number of modules to return

    ListRolesInput

    Input provided when listing existing roles

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Roles to be returned in the result set

    query
    String

    Optional search query

    ListSecretsInput

    Input for listing DataHub Secrets

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set

    count
    Int

    The number of results to be returned

    query
    String

    An optional search query

    ListTestsInput

    Input required when listing DataHub Tests

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Domains to be returned in the result set

    query
    String

    Optional query string to match on

    ListUsersInput

    Input required when listing DataHub Users

    Arguments

    NameDescription
    start
    Int

    The starting offset of the result set returned

    count
    Int

    The maximum number of Policies to be returned in the result set

    query
    String

    Optional search query

    MetadataAnalyticsInput

    Input to fetch metadata analytics charts

    Arguments

    NameDescription
    entityType
    EntityType

    Entity type to fetch analytics for (If empty, queries across all entities)

    domain
    String

    Urn of the domain to fetch analytics for (If empty or GLOBAL, queries across all domains)

    query
    String

    Search query to filter down result (If empty, does not apply any search query)

    NotebookEditablePropertiesUpdate

    Update to writable Notebook fields

    Arguments

    NameDescription
    description
    String!

    Writable description aka documentation for a Notebook

    NotebookUpdateInput

    Arguments provided to update a Notebook Entity

    Arguments

    NameDescription
    ownership
    OwnershipUpdate

    Update to ownership

    tags
    GlobalTagsUpdate

    Update to tags

    editableProperties
    NotebookEditablePropertiesUpdate

    Update to editable properties

    OwnerInput

    Input provided when adding an owner to an asset

    Arguments

    NameDescription
    ownerUrn
    String!

    The primary key of the Owner to add or remove

    ownerEntityType
    OwnerEntityType!

    The owner type, either a user or group

    type
    OwnershipType
    Deprecated: No longer supported

    The ownership type for the new owner. If none is provided, then a new NONE will be added. Deprecated - Use ownershipTypeUrn field instead.

    ownershipTypeUrn
    String

    The urn of the ownership type entity.

    OwnershipUpdate

    An update for the ownership information for a Metadata Entity

    Arguments

    NameDescription
    owners
    [OwnerUpdate!]!

    The updated list of owners

    OwnerUpdate

    An owner to add to a Metadata Entity TODO Add a USER or GROUP actor enum

    Arguments

    NameDescription
    owner
    String!

    The owner URN, either a corpGroup or corpuser

    type
    OwnershipType
    Deprecated: No longer supported

    The owner type. Deprecated - Use ownershipTypeUrn field instead.

    ownershipTypeUrn
    String

    The urn of the ownership type entity.

    PolicyMatchCriterionInput

    Criterion to define relationship between field and values

    Arguments

    NameDescription
    field
    String!

    The name of the field that the criterion refers to e.g. entity_type, entity_urn, domain

    values
    [String!]!

    Values. Matches criterion if any one of the values matches condition (OR-relationship)

    condition
    PolicyMatchCondition!

    The name of the field that the criterion refers to

    PolicyMatchFilterInput

    Filter object that encodes a complex filter logic with OR + AND

    Arguments

    NameDescription
    criteria
    [PolicyMatchCriterionInput!]

    List of criteria to apply

    PolicyUpdateInput

    Input provided when creating or updating an Access Policy

    Arguments

    NameDescription
    type
    PolicyType!

    The Policy Type

    name
    String!

    The Policy name

    state
    PolicyState!

    The Policy state

    description
    String

    A Policy description

    resources
    ResourceFilterInput

    The set of resources that the Policy privileges apply to

    privileges
    [String!]!

    The set of privileges that the Policy grants

    actors
    ActorFilterInput!

    The set of actors that the Policy privileges are granted to

    QueryStatementInput

    Input required for creating a Query Statement

    Arguments

    NameDescription
    value
    String!

    The query text

    language
    QueryLanguage!

    The query language

    RecommendationRequestContext

    Context that defines the page requesting recommendations -i.e. for search pages, the query/filters. for entity pages, the entity urn and tab

    Arguments

    NameDescription
    scenario
    ScenarioType!

    Scenario in which the recommendations will be displayed

    searchRequestContext
    SearchRequestContext

    Additional context for defining the search page requesting recommendations

    entityRequestContext
    EntityRequestContext

    Additional context for defining the entity page requesting recommendations

    RelatedTermsInput

    Input provided when adding Terms to an asset

    Arguments

    NameDescription
    urn
    String!

    The Glossary Term urn to add or remove this relationship to/from

    termUrns
    [String!]!

    The primary key of the Glossary Term to add or remove

    relationshipType
    TermRelationshipType!

    The type of relationship we're adding or removing to/from for a Glossary Term

    RelationshipsInput

    Input for the list relationships field of an Entity

    Arguments

    NameDescription
    types
    [String!]!

    The types of relationships to query, representing an OR

    direction
    RelationshipDirection!

    The direction of the relationship, either incoming or outgoing from the source entity

    start
    Int

    The starting offset of the result set

    count
    Int

    The number of results to be returned

    RemoveGroupMembersInput

    Input required to remove members from an external DataHub group

    Arguments

    NameDescription
    groupUrn
    String!

    The group to remove members from

    userUrns
    [String!]!

    The members to remove from the group

    RemoveLinkInput

    Input provided when removing the association between a Metadata Entity and a Link

    Arguments

    NameDescription
    linkUrl
    String!

    The url of the link to add or remove, which uniquely identifies the Link

    resourceUrn
    String!

    The urn of the resource or entity to attach the link to, for example a dataset urn

    RemoveNativeGroupMembersInput

    Input required to remove members from a native DataHub group

    Arguments

    NameDescription
    groupUrn
    String!

    The group to remove members from

    userUrns
    [String!]!

    The members to remove from the group

    RemoveOwnerInput

    Input provided when removing the association between a Metadata Entity and an user or group owner

    Arguments

    NameDescription
    ownerUrn
    String!

    The primary key of the Owner to add or remove

    ownershipTypeUrn
    String

    The ownership type to remove, optional. By default will remove regardless of ownership type.

    resourceUrn
    String!

    The urn of the resource or entity to attach or remove the owner from, for example a dataset urn

    ReportOperationInput

    Input provided to report an asset operation

    Arguments

    NameDescription
    urn
    String!

    The urn of the asset (e.g. dataset) to report the operation for

    operationType
    OperationType!

    The type of operation that was performed. Required

    customOperationType
    String

    A custom type of operation. Required if operation type is CUSTOM.

    sourceType
    OperationSourceType!

    The source or reporter of the operation

    customProperties
    [StringMapEntryInput!]

    A list of key-value parameters to include

    partition
    String

    An optional partition identifier

    numAffectedRows
    Long

    Optional: The number of affected rows

    timestampMillis
    Long

    Optional: Provide a timestamp associated with the operation. If not provided, one will be generated for you based on the current time.

    ResourceFilterInput

    Input required when creating or updating an Access Policies Determines which resources the Policy applies to

    Arguments

    NameDescription
    type
    String

    The type of the resource the policy should apply to Not required because in the future we want to support filtering by type OR by domain

    resources
    [String!]

    A list of specific resource urns to apply the filter to

    allResources
    Boolean

    Whether of not to apply the filter to all resources of the type

    filter
    PolicyMatchFilterInput

    Whether of not to apply the filter to all resources of the type

    ResourceRefInput

    Reference to a resource to apply an action to

    Arguments

    NameDescription
    resourceUrn
    String!

    The urn of the resource being referenced

    subResourceType
    SubResourceType

    An optional type of a sub resource to attach the Tag to

    subResource
    String

    An optional sub resource identifier to attach the Tag to

    ResourceSpec

    Spec to identify resource

    Arguments

    NameDescription
    resourceType
    EntityType!

    Resource type

    resourceUrn
    String!

    Resource urn

    RollbackIngestionInput

    Input for rolling back an ingestion execution

    Arguments

    NameDescription
    runId
    String!

    An ingestion run ID

    ScrollAcrossEntitiesInput

    Input arguments for a full text search query across entities, specifying a starting pointer. Allows paging beyond 10k results

    Arguments

    NameDescription
    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String!

    The query string

    scrollId
    String

    The starting point of paginated results, an opaque ID the backend understands as a pointer

    keepAlive
    String

    The amount of time to keep the point in time snapshot alive, takes a time unit based string ex: 5m or 30s

    count
    Int

    The number of elements included in the results

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    viewUrn
    String

    Optional - A View to apply when generating results

    searchFlags
    SearchFlags

    Flags controlling search options

    ScrollAcrossLineageInput

    Input arguments for a search query over the results of a multi-hop graph query, uses scroll API

    Arguments

    NameDescription
    urn
    String

    Urn of the source node

    direction
    LineageDirection!

    The direction of the relationship, either incoming or outgoing from the source entity

    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String

    The query string

    scrollId
    String

    The starting point of paginated results, an opaque ID the backend understands as a pointer

    keepAlive
    String

    The amount of time to keep the point in time snapshot alive, takes a time unit based string ex: 5m or 30s

    count
    Int

    The number of elements included in the results

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    startTimeMillis
    Long

    An optional starting time to filter on

    endTimeMillis
    Long

    An optional ending time to filter on

    searchFlags
    SearchFlags

    Flags controlling search options

    SearchAcrossEntitiesInput

    Input arguments for a full text search query across entities

    Arguments

    NameDescription
    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String!

    The query string

    start
    Int

    The starting point of paginated results

    count
    Int

    The number of elements included in the results

    filters
    [FacetFilterInput!]
    Deprecated: Use `orFilters`- they are more expressive

    Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together.

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    viewUrn
    String

    Optional - A View to apply when generating results

    searchFlags
    SearchFlags

    Flags controlling search options

    sortInput
    SearchSortInput

    Optional - Information on how to sort this search result

    SearchAcrossLineageInput

    Input arguments for a search query over the results of a multi-hop graph query

    Arguments

    NameDescription
    urn
    String

    Urn of the source node

    direction
    LineageDirection!

    The direction of the relationship, either incoming or outgoing from the source entity

    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String

    The query string

    start
    Int

    The starting point of paginated results

    count
    Int

    The number of elements included in the results

    filters
    [FacetFilterInput!]
    Deprecated: Use `orFilters`- they are more expressive

    Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together.

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    startTimeMillis
    Long

    An optional starting time to filter on

    endTimeMillis
    Long

    An optional ending time to filter on

    searchFlags
    SearchFlags

    Flags controlling search options

    SearchFlags

    Set of flags to control search behavior

    Arguments

    NameDescription
    skipCache
    Boolean

    Whether to skip cache

    maxAggValues
    Int

    The maximum number of values in an facet aggregation

    fulltext
    Boolean

    Structured or unstructured fulltext query

    skipHighlighting
    Boolean

    Whether to skip highlighting

    skipAggregates
    Boolean

    Whether to skip aggregates/facets

    SearchInput

    Input arguments for a full text search query

    Arguments

    NameDescription
    type
    EntityType!

    The Metadata Entity type to be searched against

    query
    String!

    The raw query string

    start
    Int

    The offset of the result set

    count
    Int

    The number of entities to include in result set

    filters
    [FacetFilterInput!]
    Deprecated: Use `orFilters`- they are more expressive

    Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together.

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    searchFlags
    SearchFlags

    Flags controlling search options

    SearchRequestContext

    Context that defines a search page requesting recommendatinos

    Arguments

    NameDescription
    query
    String!

    Search query

    filters
    [FacetFilterInput!]

    Faceted filters applied to search results

    SearchSortInput

    Input required in order to sort search results

    Arguments

    NameDescription
    sortCriterion
    SortCriterion!

    A criterion to sort search results on

    SortCriterion

    A single sorting criterion for sorting search.

    Arguments

    NameDescription
    field
    String!

    A field upon which we'll do sorting on.

    sortOrder
    SortOrder!

    The order in which we will be sorting

    StepStateInput

    The input required to update the state of a step

    Arguments

    NameDescription
    id
    String!

    The globally unique id for the step

    properties
    [StringMapEntryInput]!

    The new properties for the step

    StringMapEntryInput

    String map entry input

    Arguments

    NameDescription
    key
    String!

    The key of the map entry

    value
    String

    The value fo the map entry

    TagAssociationInput

    Input provided when updating the association between a Metadata Entity and a Tag

    Arguments

    NameDescription
    tagUrn
    String!

    The primary key of the Tag to add or remove

    resourceUrn
    String!

    The target Metadata Entity to add or remove the Tag to

    subResourceType
    SubResourceType

    An optional type of a sub resource to attach the Tag to

    subResource
    String

    An optional sub resource identifier to attach the Tag to

    TagAssociationUpdate

    Deprecated, use addTag or removeTag mutation instead +i.e. for search pages, the query/filters. for entity pages, the entity urn and tab

    Arguments

    NameDescription
    scenario
    ScenarioType!

    Scenario in which the recommendations will be displayed

    searchRequestContext
    SearchRequestContext

    Additional context for defining the search page requesting recommendations

    entityRequestContext
    EntityRequestContext

    Additional context for defining the entity page requesting recommendations

    RelatedTermsInput

    Input provided when adding Terms to an asset

    Arguments

    NameDescription
    urn
    String!

    The Glossary Term urn to add or remove this relationship to/from

    termUrns
    [String!]!

    The primary key of the Glossary Term to add or remove

    relationshipType
    TermRelationshipType!

    The type of relationship we're adding or removing to/from for a Glossary Term

    RelationshipsInput

    Input for the list relationships field of an Entity

    Arguments

    NameDescription
    types
    [String!]!

    The types of relationships to query, representing an OR

    direction
    RelationshipDirection!

    The direction of the relationship, either incoming or outgoing from the source entity

    start
    Int

    The starting offset of the result set

    count
    Int

    The number of results to be returned

    RemoveGroupMembersInput

    Input required to remove members from an external DataHub group

    Arguments

    NameDescription
    groupUrn
    String!

    The group to remove members from

    userUrns
    [String!]!

    The members to remove from the group

    RemoveLinkInput

    Input provided when removing the association between a Metadata Entity and a Link

    Arguments

    NameDescription
    linkUrl
    String!

    The url of the link to add or remove, which uniquely identifies the Link

    resourceUrn
    String!

    The urn of the resource or entity to attach the link to, for example a dataset urn

    RemoveNativeGroupMembersInput

    Input required to remove members from a native DataHub group

    Arguments

    NameDescription
    groupUrn
    String!

    The group to remove members from

    userUrns
    [String!]!

    The members to remove from the group

    RemoveOwnerInput

    Input provided when removing the association between a Metadata Entity and an user or group owner

    Arguments

    NameDescription
    ownerUrn
    String!

    The primary key of the Owner to add or remove

    ownershipTypeUrn
    String

    The ownership type to remove, optional. By default will remove regardless of ownership type.

    resourceUrn
    String!

    The urn of the resource or entity to attach or remove the owner from, for example a dataset urn

    ReportOperationInput

    Input provided to report an asset operation

    Arguments

    NameDescription
    urn
    String!

    The urn of the asset (e.g. dataset) to report the operation for

    operationType
    OperationType!

    The type of operation that was performed. Required

    customOperationType
    String

    A custom type of operation. Required if operation type is CUSTOM.

    sourceType
    OperationSourceType!

    The source or reporter of the operation

    customProperties
    [StringMapEntryInput!]

    A list of key-value parameters to include

    partition
    String

    An optional partition identifier

    numAffectedRows
    Long

    Optional: The number of affected rows

    timestampMillis
    Long

    Optional: Provide a timestamp associated with the operation. If not provided, one will be generated for you based on the current time.

    ResourceFilterInput

    Input required when creating or updating an Access Policies Determines which resources the Policy applies to

    Arguments

    NameDescription
    type
    String

    The type of the resource the policy should apply to Not required because in the future we want to support filtering by type OR by domain

    resources
    [String!]

    A list of specific resource urns to apply the filter to

    allResources
    Boolean

    Whether of not to apply the filter to all resources of the type

    filter
    PolicyMatchFilterInput

    Whether of not to apply the filter to all resources of the type

    ResourceRefInput

    Reference to a resource to apply an action to

    Arguments

    NameDescription
    resourceUrn
    String!

    The urn of the resource being referenced

    subResourceType
    SubResourceType

    An optional type of a sub resource to attach the Tag to

    subResource
    String

    An optional sub resource identifier to attach the Tag to

    ResourceSpec

    Spec to identify resource

    Arguments

    NameDescription
    resourceType
    EntityType!

    Resource type

    resourceUrn
    String!

    Resource urn

    RollbackIngestionInput

    Input for rolling back an ingestion execution

    Arguments

    NameDescription
    runId
    String!

    An ingestion run ID

    ScrollAcrossEntitiesInput

    Input arguments for a full text search query across entities, specifying a starting pointer. Allows paging beyond 10k results

    Arguments

    NameDescription
    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String!

    The query string

    scrollId
    String

    The starting point of paginated results, an opaque ID the backend understands as a pointer

    keepAlive
    String

    The amount of time to keep the point in time snapshot alive, takes a time unit based string ex: 5m or 30s

    count
    Int

    The number of elements included in the results

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    viewUrn
    String

    Optional - A View to apply when generating results

    searchFlags
    SearchFlags

    Flags controlling search options

    ScrollAcrossLineageInput

    Input arguments for a search query over the results of a multi-hop graph query, uses scroll API

    Arguments

    NameDescription
    urn
    String

    Urn of the source node

    direction
    LineageDirection!

    The direction of the relationship, either incoming or outgoing from the source entity

    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String

    The query string

    scrollId
    String

    The starting point of paginated results, an opaque ID the backend understands as a pointer

    keepAlive
    String

    The amount of time to keep the point in time snapshot alive, takes a time unit based string ex: 5m or 30s

    count
    Int

    The number of elements included in the results

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    startTimeMillis
    Long

    An optional starting time to filter on

    endTimeMillis
    Long

    An optional ending time to filter on

    searchFlags
    SearchFlags

    Flags controlling search options

    SearchAcrossEntitiesInput

    Input arguments for a full text search query across entities

    Arguments

    NameDescription
    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String!

    The query string

    start
    Int

    The starting point of paginated results

    count
    Int

    The number of elements included in the results

    filters
    [FacetFilterInput!]
    Deprecated: Use `orFilters`- they are more expressive

    Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together.

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    viewUrn
    String

    Optional - A View to apply when generating results

    searchFlags
    SearchFlags

    Flags controlling search options

    sortInput
    SearchSortInput

    Optional - Information on how to sort this search result

    SearchAcrossLineageInput

    Input arguments for a search query over the results of a multi-hop graph query

    Arguments

    NameDescription
    urn
    String

    Urn of the source node

    direction
    LineageDirection!

    The direction of the relationship, either incoming or outgoing from the source entity

    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String

    The query string

    start
    Int

    The starting point of paginated results

    count
    Int

    The number of elements included in the results

    filters
    [FacetFilterInput!]
    Deprecated: Use `orFilters`- they are more expressive

    Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together.

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    startTimeMillis
    Long

    An optional starting time to filter on

    endTimeMillis
    Long

    An optional ending time to filter on

    searchFlags
    SearchFlags

    Flags controlling search options

    SearchFlags

    Set of flags to control search behavior

    Arguments

    NameDescription
    skipCache
    Boolean

    Whether to skip cache

    maxAggValues
    Int

    The maximum number of values in an facet aggregation

    fulltext
    Boolean

    Structured or unstructured fulltext query

    skipHighlighting
    Boolean

    Whether to skip highlighting

    skipAggregates
    Boolean

    Whether to skip aggregates/facets

    getSuggestions
    Boolean

    Whether to request for search suggestions on the _entityName virtualized field

    SearchInput

    Input arguments for a full text search query

    Arguments

    NameDescription
    type
    EntityType!

    The Metadata Entity type to be searched against

    query
    String!

    The raw query string

    start
    Int

    The offset of the result set

    count
    Int

    The number of entities to include in result set

    filters
    [FacetFilterInput!]
    Deprecated: Use `orFilters`- they are more expressive

    Deprecated in favor of the more expressive orFilters field Facet filters to apply to search results. These will be 'AND'-ed together.

    orFilters
    [AndFilterInput!]

    A list of disjunctive criterion for the filter. (or operation to combine filters)

    searchFlags
    SearchFlags

    Flags controlling search options

    SearchRequestContext

    Context that defines a search page requesting recommendatinos

    Arguments

    NameDescription
    query
    String!

    Search query

    filters
    [FacetFilterInput!]

    Faceted filters applied to search results

    SearchSortInput

    Input required in order to sort search results

    Arguments

    NameDescription
    sortCriterion
    SortCriterion!

    A criterion to sort search results on

    SortCriterion

    A single sorting criterion for sorting search.

    Arguments

    NameDescription
    field
    String!

    A field upon which we'll do sorting on.

    sortOrder
    SortOrder!

    The order in which we will be sorting

    StepStateInput

    The input required to update the state of a step

    Arguments

    NameDescription
    id
    String!

    The globally unique id for the step

    properties
    [StringMapEntryInput]!

    The new properties for the step

    StringMapEntryInput

    String map entry input

    Arguments

    NameDescription
    key
    String!

    The key of the map entry

    value
    String

    The value fo the map entry

    TagAssociationInput

    Input provided when updating the association between a Metadata Entity and a Tag

    Arguments

    NameDescription
    tagUrn
    String!

    The primary key of the Tag to add or remove

    resourceUrn
    String!

    The target Metadata Entity to add or remove the Tag to

    subResourceType
    SubResourceType

    An optional type of a sub resource to attach the Tag to

    subResource
    String

    An optional sub resource identifier to attach the Tag to

    TagAssociationUpdate

    Deprecated, use addTag or removeTag mutation instead A tag update to be applied

    Arguments

    NameDescription
    tag
    TagUpdateInput!

    The tag being applied

    TagUpdateInput

    Deprecated, use addTag or removeTag mutations instead An update for a particular Tag entity

    Arguments

    NameDescription
    urn
    String!

    The primary key of the Tag

    name
    String!

    The display name of a Tag

    description
    String

    Description of the tag

    ownership
    OwnershipUpdate

    Ownership metadata of the tag

    TermAssociationInput

    Input provided when updating the association between a Metadata Entity and a Glossary Term

    Arguments

    NameDescription
    termUrn
    String!

    The primary key of the Glossary Term to add or remove

    resourceUrn
    String!

    The target Metadata Entity to add or remove the Glossary Term from

    subResourceType
    SubResourceType

    An optional type of a sub resource to attach the Glossary Term to

    subResource
    String

    An optional sub resource identifier to attach the Glossary Term to

    TestDefinitionInput

    Arguments

    NameDescription
    json
    String

    The string representation of the Test

    UpdateCorpUserViewsSettingsInput

    Input required to update a users settings.

    Arguments

    NameDescription
    defaultView
    String

    The URN of the View that serves as this user's personal default. If not provided, any existing default view will be removed.

    UpdateDataProductInput

    Input properties required for update a DataProduct

    Arguments

    NameDescription
    name
    String

    A display name for the DataProduct

    description
    String

    An optional description for the DataProduct

    UpdateDeprecationInput

    Input provided when setting the Deprecation status for an Entity.

    Arguments

    NameDescription
    urn
    String!

    The urn of the Entity to set deprecation for.

    deprecated
    Boolean!

    Whether the Entity is marked as deprecated.

    decommissionTime
    Long

    Optional - The time user plan to decommission this entity

    note
    String

    Optional - Additional information about the entity deprecation plan

    UpdateEmbedInput

    Input required to set or clear information related to rendering a Data Asset inside of DataHub.

    Arguments

    NameDescription
    urn
    String!

    The URN associated with the Data Asset to update. Only dataset, dashboard, and chart urns are currently supported.

    renderUrl
    String

    Set or clear a URL used to render an embedded asset.

    UpdateGlobalViewsSettingsInput

    Input required to update Global View Settings.

    Arguments

    NameDescription
    defaultView
    String

    The URN of the View that serves as the Global, or organization-wide, default. If this field is not provided, the existing Global Default will be cleared.

    UpdateIngestionSourceConfigInput

    Input parameters for creating / updating an Ingestion Source

    Arguments

    NameDescription
    recipe
    String!

    A JSON-encoded recipe

    version
    String

    The version of DataHub Ingestion Framework to use when executing the recipe.

    executorId
    String!

    The id of the executor to use for executing the recipe

    debugMode
    Boolean

    Whether or not to run ingestion in debug mode

    UpdateIngestionSourceInput

    Input arguments for creating / updating an Ingestion Source

    Arguments

    NameDescription
    name
    String!

    A name associated with the ingestion source

    type
    String!

    The type of the source itself, e.g. mysql, bigquery, bigquery-usage. Should match the recipe.

    description
    String

    An optional description associated with the ingestion source

    schedule
    UpdateIngestionSourceScheduleInput

    An optional schedule for the ingestion source. If not provided, the source is only available for run on-demand.

    config
    UpdateIngestionSourceConfigInput!

    A set of type-specific ingestion source configurations

    UpdateIngestionSourceScheduleInput

    Input arguments for creating / updating the schedule of an Ingestion Source

    Arguments

    NameDescription
    interval
    String!

    The cron-formatted interval describing when the job should be executed

    timezone
    String!

    The name of the timezone in which the cron interval should be scheduled (e.g. America/Los Angeles)

    UpdateLineageInput

    Input required in order to upsert lineage edges

    Arguments

    NameDescription
    edgesToAdd
    [LineageEdge]!

    New lineage edges to upsert

    edgesToRemove
    [LineageEdge]!

    Lineage edges to remove. Takes precedence over edgesToAdd - so edges existing both edgesToAdd and edgesToRemove will be removed.

    UpdateMediaInput

    Input provided for filling in a post content

    Arguments

    NameDescription
    type
    MediaType!

    The type of media

    location
    String!

    The location of the media (a URL)

    UpdateNameInput

    Input for updating the name of an entity

    Arguments

    NameDescription
    name
    String!

    The new name

    urn
    String!

    The primary key of the resource to update the name for

    UpdateOwnershipTypeInput

    Arguments

    NameDescription
    name
    String

    The name of the Custom Ownership Type

    description
    String

    The description of the Custom Ownership Type

    UpdateParentNodeInput

    Input for updating the parent node of a resource. Currently only GlossaryNodes and GlossaryTerms have parentNodes.

    Arguments

    NameDescription
    parentNode
    String

    The new parent node urn. If parentNode is null, this will remove the parent from this entity

    resourceUrn
    String!

    The primary key of the resource to update the parent node for

    UpdatePostContentInput

    Input provided for filling in a post content

    Arguments

    NameDescription
    contentType
    PostContentType!

    The type of post content

    title
    String!

    The title of the post

    description
    String

    Optional content of the post

    link
    String

    Optional link that the post is associated with

    media
    UpdateMediaInput

    Optional media contained in the post

    UpdateQueryInput

    Input required for updating an existing Query. Requires the 'Edit Queries' privilege for all query subjects.

    Arguments

    NameDescription
    properties
    UpdateQueryPropertiesInput

    Properties about the Query

    subjects
    [UpdateQuerySubjectInput!]

    Subjects for the query

    UpdateQueryPropertiesInput

    Input properties required for creating a Query. Any non-null fields will be updated if provided.

    Arguments

    NameDescription
    name
    String

    An optional display name for the Query

    description
    String

    An optional description for the Query

    statement
    QueryStatementInput

    The Query contents

    UpdateQuerySubjectInput

    Input required for creating a Query. For now, only datasets are supported.

    Arguments

    NameDescription
    datasetUrn
    String!

    The urn of the dataset that is the subject of the query

    UpdateTestInput

    Arguments

    NameDescription
    name
    String!

    The name of the Test

    category
    String!

    The category of the Test (user defined)

    description
    String

    Description of the test

    definition
    TestDefinitionInput!

    The test definition

    UpdateUserSettingInput

    Input for updating a user setting

    Arguments

    NameDescription
    name
    UserSetting!

    The name of the setting

    value
    Boolean!

    The new value of the setting

    UpdateViewInput

    Input provided when updating a DataHub View

    Arguments

    NameDescription
    name
    String

    The name of the View

    description
    String

    An optional description of the View

    definition
    DataHubViewDefinitionInput

    The view definition itself

    - + \ No newline at end of file diff --git a/docs/graphql/interfaces/index.html b/docs/graphql/interfaces/index.html index 2171c1a267cd5..e7a4963a819d4 100644 --- a/docs/graphql/interfaces/index.html +++ b/docs/graphql/interfaces/index.html @@ -8,13 +8,13 @@ - +

    Interfaces

    Aspect

    A versioned aspect, or single group of related metadata, associated with an Entity and having a unique version

    Implemented by

    Fields

    NameDescription
    version
    Long

    The version of the aspect, where zero represents the latest version

    BrowsableEntity

    A Metadata Entity which is browsable, or has browse paths.

    Implemented by

    Fields

    NameDescription
    browsePaths
    [BrowsePath!]

    The browse paths corresponding to an entity. If no Browse Paths have been generated before, this will be null.

    Entity

    A top level Metadata Entity

    Implemented by

    Fields

    NameDescription
    urn
    String!

    A primary key of the Metadata Entity

    type
    EntityType!

    A standard Entity Type

    relationships
    EntityRelationshipsResult

    List of relationships between the source Entity and some destination entities with a given types

    Arguments

    NameDescription
    input
    RelationshipsInput!

    EntityWithRelationships

    Deprecated, use relationships field instead

    Implements

    Implemented by

    Fields

    NameDescription
    urn
    String!

    A primary key associated with the Metadata Entity

    type
    EntityType!

    A standard Entity Type

    relationships
    EntityRelationshipsResult

    Granular API for querying edges extending from this entity

    Arguments

    NameDescription
    input
    RelationshipsInput!
    lineage
    EntityLineageResult

    Edges extending from this entity grouped by direction in the lineage graph

    Arguments

    NameDescription
    input
    LineageInput!

    TimeSeriesAspect

    A time series aspect, or a group of related metadata associated with an Entity and corresponding to a particular timestamp

    Implemented by

    Fields

    NameDescription
    timestampMillis
    Long!

    The timestamp associated with the time series aspect in milliseconds

    - + \ No newline at end of file diff --git a/docs/graphql/mutations/index.html b/docs/graphql/mutations/index.html index d60f83eeab69c..dce02b955b02c 100644 --- a/docs/graphql/mutations/index.html +++ b/docs/graphql/mutations/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ it will be overwritten.

    Arguments

    NameDescription
    input
    CreateTagInput!

    Inputs required to create a new Tag.

    createTest

    Type: String

    Create a new test

    Arguments

    NameDescription
    input
    CreateTestInput!

    createTestConnectionRequest

    Type: String

    Create a request to execute a test ingestion connection job input: Input required for creating a test connection request

    Arguments

    NameDescription
    input
    CreateTestConnectionRequestInput!

    createView

    Type: DataHubView

    Create a new DataHub View (Saved Filter)

    Arguments

    NameDescription
    input
    CreateViewInput!

    Input required to create a new DataHub View

    deleteAssertion

    Type: Boolean

    Remove an assertion associated with an entity. Requires the 'Edit Assertions' privilege on the entity.

    Arguments

    NameDescription
    urn
    String!

    The assertion to remove

    deleteDataProduct

    Type: Boolean

    Delete a DataProduct by urn.

    Arguments

    NameDescription
    urn
    String!

    Urn of the data product to remove.

    deleteDomain

    Type: Boolean

    Delete a Domain

    Arguments

    NameDescription
    urn
    String!

    The urn of the Domain to delete

    deleteGlossaryEntity

    Type: Boolean

    Remove a glossary entity (GlossaryTerm or GlossaryNode). Return boolean whether it was successful or not.

    Arguments

    NameDescription
    urn
    String!

    deleteIngestionSource

    Type: String

    Delete an existing ingestion source

    Arguments

    NameDescription
    urn
    String!

    deleteOwnershipType

    Type: Boolean

    Delete a Custom Ownership Type by urn. This requires the 'Manage Ownership Types' Metadata Privilege.

    Arguments

    NameDescription
    urn
    String!

    Urn of the Custom Ownership Type to remove.

    deleteReferences
    Boolean

    deletePolicy

    Type: String

    Remove an existing policy and returns the policy urn

    Arguments

    NameDescription
    urn
    String!

    deletePost

    Type: Boolean

    Delete a post

    Arguments

    NameDescription
    urn
    String!

    deleteQuery

    Type: Boolean

    Delete a Query by urn. This requires the 'Edit Queries' Metadata Privilege.

    Arguments

    NameDescription
    urn
    String!

    Urn of the query to remove.

    deleteSecret

    Type: String

    Delete a Secret

    Arguments

    NameDescription
    urn
    String!

    deleteTag

    Type: Boolean

    Delete a Tag

    Arguments

    NameDescription
    urn
    String!

    The urn of the Tag to delete

    deleteTest

    Type: Boolean

    Delete an existing test - note that this will NOT delete dangling pointers until the next execution of the test.

    Arguments

    NameDescription
    urn
    String!

    deleteView

    Type: Boolean

    Delete a DataHub View (Saved Filter)

    Arguments

    NameDescription
    urn
    String!

    The urn of the View to delete

    removeGroup

    Type: Boolean

    Remove a group. Requires Manage Users & Groups Platform Privilege

    Arguments

    NameDescription
    urn
    String!

    removeGroupMembers

    Type: Boolean

    Remove members from a group

    Arguments

    NameDescription
    input
    RemoveGroupMembersInput!

    Type: Boolean

    Remove a link, or institutional memory, from a particular Entity

    Arguments

    NameDescription
    input
    RemoveLinkInput!

    removeOwner

    Type: Boolean

    Remove an owner from a particular Entity

    Arguments

    NameDescription
    input
    RemoveOwnerInput!

    removeRelatedTerms

    Type: Boolean

    Remove multiple related Terms for a Glossary Term

    Arguments

    NameDescription
    input
    RelatedTermsInput!

    removeTag

    Type: Boolean

    Remove a tag from a particular Entity or subresource

    Arguments

    NameDescription
    input
    TagAssociationInput!

    removeTerm

    Type: Boolean

    Remove a glossary term from a particular Entity or subresource

    Arguments

    NameDescription
    input
    TermAssociationInput!

    removeUser

    Type: Boolean

    Remove a user. Requires Manage Users & Groups Platform Privilege

    Arguments

    NameDescription
    urn
    String!

    reportOperation

    Type: String

    Report a new operation for an asset

    Arguments

    NameDescription
    input
    ReportOperationInput!

    Input required to report an operation

    revokeAccessToken

    Type: Boolean!

    Revokes access tokens.

    Arguments

    NameDescription
    tokenId
    String!

    rollbackIngestion

    Type: String

    Rollback a specific ingestion execution run based on its runId

    Arguments

    NameDescription
    input
    RollbackIngestionInput!

    setDomain

    Type: Boolean

    Sets the Domain for a Dataset, Chart, Dashboard, Data Flow (Pipeline), or Data Job (Task). Returns true if the Domain was successfully added, or already exists. Requires the Edit Domains privilege for the Entity.

    Arguments

    NameDescription
    entityUrn
    String!
    domainUrn
    String!

    setTagColor

    Type: Boolean

    Set the hex color associated with an existing Tag

    Arguments

    NameDescription
    urn
    String!
    colorHex
    String!

    unsetDomain

    Type: Boolean

    Sets the Domain for a Dataset, Chart, Dashboard, Data Flow (Pipeline), or Data Job (Task). Returns true if the Domain was successfully removed, or was already removed. Requires the Edit Domains privilege for an asset.

    Arguments

    NameDescription
    entityUrn
    String!

    updateChart

    Type: Chart

    Update the metadata about a particular Chart

    Arguments

    NameDescription
    urn
    String!
    input
    ChartUpdateInput!

    updateCorpGroupProperties

    Type: CorpGroup

    Update a particular Corp Group's editable properties

    Arguments

    NameDescription
    urn
    String!
    input
    CorpGroupUpdateInput!

    updateCorpUserProperties

    Type: CorpUser

    Update a particular Corp User's editable properties

    Arguments

    NameDescription
    urn
    String!
    input
    CorpUserUpdateInput!

    updateCorpUserViewsSettings

    Type: Boolean

    Update the View-related settings for a user.

    Arguments

    NameDescription
    input
    UpdateCorpUserViewsSettingsInput!

    updateDashboard

    Type: Dashboard

    Update the metadata about a particular Dashboard

    Arguments

    NameDescription
    urn
    String!
    input
    DashboardUpdateInput!

    updateDataFlow

    Type: DataFlow

    Update the metadata about a particular Data Flow (Pipeline)

    Arguments

    NameDescription
    urn
    String!
    input
    DataFlowUpdateInput!

    updateDataJob

    Type: DataJob

    Update the metadata about a particular Data Job (Task)

    Arguments

    NameDescription
    urn
    String!
    input
    DataJobUpdateInput!

    updateDataProduct

    Type: DataProduct

    Update a Data Product

    Arguments

    NameDescription
    urn
    String!

    The urn identifier for the Data Product to update.

    input
    UpdateDataProductInput!

    Inputs required to create a new DataProduct.

    updateDataset

    Type: Dataset

    Update the metadata about a particular Dataset

    Arguments

    NameDescription
    urn
    String!
    input
    DatasetUpdateInput!

    updateDatasets

    Type: [Dataset]

    Update the metadata about a batch of Datasets

    Arguments

    NameDescription
    input
    [BatchDatasetUpdateInput!]!

    updateDeprecation

    Type: Boolean

    Sets the Deprecation status for a Metadata Entity. Requires the Edit Deprecation status privilege for an entity.

    Arguments

    NameDescription
    input
    UpdateDeprecationInput!

    Input required to set deprecation for an Entity.

    updateDescription

    Type: Boolean

    Incubating. Updates the description of a resource. Currently only supports Dataset Schema Fields, Containers

    Arguments

    NameDescription
    input
    DescriptionUpdateInput!

    updateEmbed

    Type: Boolean

    Update the Embed information for a Dataset, Dashboard, or Chart.

    Arguments

    NameDescription
    input
    UpdateEmbedInput!

    updateGlobalViewsSettings

    Type: Boolean!

    Update the global settings related to the Views feature. Requires the 'Manage Global Views' Platform Privilege.

    Arguments

    NameDescription
    input
    UpdateGlobalViewsSettingsInput!

    updateIngestionSource

    Type: String

    Update an existing ingestion source

    Arguments

    NameDescription
    urn
    String!
    input
    UpdateIngestionSourceInput!

    updateLineage

    Type: Boolean

    Update lineage for an entity

    Arguments

    NameDescription
    input
    UpdateLineageInput!

    updateName

    Type: Boolean

    Updates the name of the entity.

    Arguments

    NameDescription
    input
    UpdateNameInput!

    updateNotebook

    Type: Notebook

    Update the metadata about a particular Notebook

    Arguments

    NameDescription
    urn
    String!
    input
    NotebookUpdateInput!

    updateOwnershipType

    Type: OwnershipTypeEntity

    Update an existing Query. This requires the 'Manage Ownership Types' Metadata Privilege.

    Arguments

    NameDescription
    urn
    String!

    The urn identifier for the custom ownership type to update.

    input
    UpdateOwnershipTypeInput!

    Inputs required to update an existing Custom Ownership Type.

    updateParentNode

    Type: Boolean

    Updates the parent node of a resource. Currently only GlossaryNodes and GlossaryTerms have parentNodes.

    Arguments

    NameDescription
    input
    UpdateParentNodeInput!

    updatePolicy

    Type: String

    Update an existing policy and returns the resulting urn

    Arguments

    NameDescription
    urn
    String!
    input
    PolicyUpdateInput!

    updateQuery

    Type: QueryEntity

    Update an existing Query

    Arguments

    NameDescription
    urn
    String!

    The urn identifier for the query to update.

    input
    UpdateQueryInput!

    Inputs required to update a Query.

    updateTag

    Type: Tag

    Update the information about a particular Entity Tag

    Arguments

    NameDescription
    urn
    String!
    input
    TagUpdateInput!

    updateTest

    Type: String

    Update an existing test

    Arguments

    NameDescription
    urn
    String!
    input
    UpdateTestInput!

    updateUserSetting

    Type: Boolean

    Update a user setting

    Arguments

    NameDescription
    input
    UpdateUserSettingInput!

    updateUserStatus

    Type: String

    Change the status of a user. Requires Manage Users & Groups Platform Privilege

    Arguments

    NameDescription
    urn
    String!
    status
    CorpUserStatus!

    updateView

    Type: DataHubView

    Delete a DataHub View (Saved Filter)

    Arguments

    NameDescription
    urn
    String!

    The urn of the View to update

    input
    UpdateViewInput!

    Input required to updat an existing DataHub View

    - + \ No newline at end of file diff --git a/docs/graphql/objects/index.html b/docs/graphql/objects/index.html index 12c5445b04b9c..db74bc4f8b810 100644 --- a/docs/graphql/objects/index.html +++ b/docs/graphql/objects/index.html @@ -8,7 +8,7 @@ - + @@ -48,10 +48,11 @@ An DataHub Platform Access Policy Access Policies determine who can perform what actions against which resources on the platform

    Fields

    NameDescription
    urn
    String!

    The primary key of the Policy

    type
    PolicyType!

    The type of the Policy

    name
    String!

    The name of the Policy

    state
    PolicyState!

    The present state of the Policy

    description
    String

    The description of the Policy

    resources
    ResourceFilter

    The resources that the Policy privileges apply to

    privileges
    [String!]!

    The privileges that the Policy grants

    actors
    ActorFilter!

    The actors that the Policy grants privileges to

    editable
    Boolean!

    Whether the Policy is editable, ie system policies, or not

    PolicyMatchCriterion

    Criterion to define relationship between field and values

    Fields

    NameDescription
    field
    String!

    The name of the field that the criterion refers to e.g. entity_type, entity_urn, domain

    values
    [PolicyMatchCriterionValue!]!

    Values. Matches criterion if any one of the values matches condition (OR-relationship)

    condition
    PolicyMatchCondition!

    The name of the field that the criterion refers to

    PolicyMatchCriterionValue

    Value in PolicyMatchCriterion with hydrated entity if value is urn

    Fields

    NameDescription
    value
    String!

    The value of the field to match

    entity
    Entity

    Hydrated entities of the above values. Only set if the value is an urn

    PolicyMatchFilter

    Filter object that encodes a complex filter logic with OR + AND

    Fields

    NameDescription
    criteria
    [PolicyMatchCriterion!]

    List of criteria to apply

    Post

    Input provided when creating a Post

    Implements

    Fields

    NameDescription
    urn
    String!

    The primary key of the Post

    type
    EntityType!

    The standard Entity Type

    relationships
    EntityRelationshipsResult

    Granular API for querying edges extending from the Post

    Arguments

    NameDescription
    input
    RelationshipsInput!
    postType
    PostType!

    The type of post

    content
    PostContent!

    The content of the post

    lastModified
    AuditStamp!

    When the post was last modified

    PostContent

    Post content

    Fields

    NameDescription
    contentType
    PostContentType!

    The type of post content

    title
    String!

    The title of the post

    description
    String

    Optional content of the post

    link
    String

    Optional link that the post is associated with

    media
    Media

    Optional media contained in the post

    Privilege

    An individual DataHub Access Privilege

    Fields

    NameDescription
    type
    String!

    Standardized privilege type, serving as a unique identifier for a privilege eg EDIT_ENTITY

    displayName
    String

    The name to appear when displaying the privilege, eg Edit Entity

    description
    String

    A description of the privilege to display

    Privileges

    Object that encodes the privileges the actor has for a given resource

    Fields

    NameDescription
    privileges
    [String!]!

    Granted Privileges

    QuantitativeAnalyses

    Fields

    NameDescription
    unitaryResults
    ResultsType

    Link to a dashboard with results showing how the model performed with respect to each factor

    intersectionalResults
    ResultsType

    Link to a dashboard with results showing how the model performed with respect to the intersection of evaluated factors

    QueriesTabConfig

    Configuration for the queries tab

    Fields

    NameDescription
    queriesTabResultSize
    Int

    Number of queries to show in the queries tab

    QueryCell

    A Notebook cell which contains Query as content

    Fields

    NameDescription
    cellTitle
    String!

    Title of the cell

    cellId
    String!

    Unique id for the cell.

    changeAuditStamps
    ChangeAuditStamps

    Captures information about who created/last modified/deleted this TextCell and when

    rawQuery
    String!

    Raw query to explain some specific logic in a Notebook

    lastExecuted
    AuditStamp

    Captures information about who last executed this query cell and when

    QueryEntity

    An individual Query

    Implements

    Fields

    NameDescription
    urn
    String!

    A primary key associated with the Query

    type
    EntityType!

    A standard Entity Type

    properties
    QueryProperties

    Properties about the Query

    subjects
    [QuerySubject!]

    Subjects for the query

    relationships
    EntityRelationshipsResult

    Granular API for querying edges extending from this entity

    Arguments

    NameDescription
    input
    RelationshipsInput!

    QueryProperties

    Properties about an individual Query

    Fields

    NameDescription
    statement
    QueryStatement!

    The Query statement itself

    source
    QuerySource!

    The source of the Query

    name
    String

    The name of the Query

    description
    String

    The description of the Query

    created
    AuditStamp!

    An Audit Stamp corresponding to the creation of this resource

    lastModified
    AuditStamp!

    An Audit Stamp corresponding to the update of this resource

    QueryStatement

    An individual Query Statement

    Fields

    NameDescription
    value
    String!

    The query statement value

    language
    QueryLanguage!

    The language for the Query Statement

    QuerySubject

    The subject for a Query

    Fields

    NameDescription
    dataset
    Dataset!

    The dataset which is the subject of the Query

    QuickFilter

    A quick filter in search and auto-complete

    Fields

    NameDescription
    field
    String!

    Name of field to filter by

    value
    String!

    Value to filter on

    entity
    Entity

    Entity that the value maps to if any

    RawAspect

    Payload representing data about a single aspect

    Fields

    NameDescription
    aspectName
    String!

    The name of the aspect

    payload
    String

    JSON string containing the aspect's payload

    renderSpec
    AspectRenderSpec

    Details for the frontend on how the raw aspect should be rendered

    RecommendationContent

    Content to display within each recommendation module

    Fields

    NameDescription
    value
    String!

    String representation of content

    entity
    Entity

    Entity being recommended. Empty if the content being recommended is not an entity

    params
    RecommendationParams

    Additional context required to generate the the recommendation

    RecommendationModule

    Fields

    NameDescription
    title
    String!

    Title of the module to display

    moduleId
    String!

    Unique id of the module being recommended

    renderType
    RecommendationRenderType!

    Type of rendering that defines how the module should be rendered

    content
    [RecommendationContent!]!

    List of content to display inside the module

    RecommendationParams

    Parameters required to render a recommendation of a given type

    Fields

    NameDescription
    searchParams
    SearchParams

    Context to define the search recommendations

    entityProfileParams
    EntityProfileParams

    Context to define the entity profile page

    contentParams
    ContentParams

    Context about the recommendation

    ResetToken

    Token that allows native users to reset their credentials

    Fields

    NameDescription
    resetToken
    String!

    The reset token

    ResourceFilter

    The resources that a DataHub Access Policy applies to

    Fields

    NameDescription
    type
    String

    The type of the resource the policy should apply to Not required because in the future we want to support filtering by type OR by domain

    resources
    [String!]

    A list of specific resource urns to apply the filter to

    allResources
    Boolean

    Whether of not to apply the filter to all resources of the type

    filter
    PolicyMatchFilter

    Whether of not to apply the filter to all resources of the type

    ResourcePrivileges

    A privilege associated with a particular resource type A resource is most commonly a DataHub Metadata Entity

    Fields

    NameDescription
    resourceType
    String!

    Resource type associated with the Access Privilege, eg dataset

    resourceTypeDisplayName
    String

    The name to used for displaying the resourceType

    entityType
    EntityType

    An optional entity type to use when performing search and navigation to the entity

    privileges
    [Privilege!]!

    A list of privileges that are supported against this resource

    Role

    Implements

    Fields

    NameDescription
    urn
    String!

    A primary key of the Metadata Entity

    type
    EntityType!

    A standard Entity Type

    relationships
    EntityRelationshipsResult

    List of relationships between the source Entity and some destination entities with a given types

    Arguments

    NameDescription
    input
    RelationshipsInput!
    id
    String!

    Id of the Role

    properties
    RoleProperties!

    Role properties to include Request Access Url

    actors
    Actor!

    A standard Entity Type

    RoleAssociation

    Fields

    NameDescription
    role
    Role!

    The Role entity itself

    associatedUrn
    String!

    Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together

    RoleProperties

    Fields

    NameDescription
    name
    String!

    Name of the Role in an organisation

    description
    String

    Description about the role

    type
    String

    Role type can be READ, WRITE or ADMIN

    requestUrl
    String

    Url to request a role for a user in an organisation

    RoleUser

    Fields

    NameDescription
    user
    CorpUser!

    Linked corp user of a role

    Row

    For consumption by UI only

    Fields

    NameDescription
    values
    [String!]!
    cells
    [Cell!]

    Schema

    Deprecated, use SchemaMetadata instead Metadata about a Dataset schema

    Fields

    NameDescription
    datasetUrn
    String

    Dataset this schema metadata is associated with

    name
    String!

    Schema name

    platformUrn
    String!

    Platform this schema metadata is associated with

    version
    Long!

    The version of the GMS Schema metadata

    cluster
    String

    The cluster this schema metadata is derived from

    hash
    String!

    The SHA1 hash of the schema content

    platformSchema
    PlatformSchema

    The native schema in the datasets platform, schemaless if it was not provided

    fields
    [SchemaField!]!

    Client provided a list of fields from value schema

    primaryKeys
    [String!]

    Client provided list of fields that define primary keys to access record

    foreignKeys
    [ForeignKeyConstraint]

    Client provided list of foreign key constraints

    createdAt
    Long

    The time at which the schema metadata information was created

    lastObserved
    Long

    The time at which the schema metadata information was last ingested

    SchemaField

    Information about an individual field in a Dataset schema

    Fields

    NameDescription
    fieldPath
    String!

    Flattened name of the field computed from jsonPath field

    jsonPath
    String

    Flattened name of a field in JSON Path notation

    label
    String

    Human readable label for the field. Not supplied by all data sources

    nullable
    Boolean!

    Indicates if this field is optional or nullable

    description
    String

    Description of the field

    type
    SchemaFieldDataType!

    Platform independent field type of the field

    nativeDataType
    String

    The native type of the field in the datasets platform as declared by platform schema

    recursive
    Boolean!

    Whether the field references its own type recursively

    globalTags
    GlobalTags
    Deprecated: No longer supported

    Deprecated, use tags field instead Tags associated with the field

    tags
    GlobalTags

    Tags associated with the field

    glossaryTerms
    GlossaryTerms

    Glossary terms associated with the field

    isPartOfKey
    Boolean

    Whether the field is part of a key schema

    isPartitioningKey
    Boolean

    Whether the field is part of a partitioning key schema

    SchemaFieldBlame

    Blame for a single field

    Fields

    NameDescription
    fieldPath
    String!

    Flattened name of a schema field

    schemaFieldChange
    SchemaFieldChange!

    Attributes identifying a field change

    SchemaFieldChange

    Attributes identifying a field change

    Fields

    NameDescription
    timestampMillis
    Long!

    The time at which the schema was updated

    lastSemanticVersion
    String!

    The last semantic version that this schema was changed in

    versionStamp
    String!

    Version stamp of the change

    changeType
    ChangeOperationType!

    The type of the change

    lastSchemaFieldChange
    String

    Last column update, such as Added/Modified/Removed in v1.2.3.

    SchemaFieldEntity

    Standalone schema field entity. Differs from the SchemaField struct because it is not directly nested inside a -schema field

    Implements

    Fields

    NameDescription
    urn
    String!

    Primary key of the schema field

    type
    EntityType!

    A standard Entity Type

    fieldPath
    String!

    Field path identifying the field in its dataset

    parent
    Entity!

    The field's parent.

    relationships
    EntityRelationshipsResult

    Granular API for querying edges extending from this entity

    Arguments

    NameDescription
    input
    RelationshipsInput!

    SchemaFieldRef

    A Dataset schema field (i.e. column)

    Fields

    NameDescription
    urn
    String!

    A schema field urn

    path
    String!

    A schema field path

    SchemaMetadata

    Metadata about a Dataset schema

    Implements

    Fields

    NameDescription
    aspectVersion
    Long

    The logical version of the schema metadata, where zero represents the latest version with otherwise monotonic ordering starting at one

    datasetUrn
    String

    Dataset this schema metadata is associated with

    name
    String!

    Schema name

    platformUrn
    String!

    Platform this schema metadata is associated with

    version
    Long!

    The version of the GMS Schema metadata

    cluster
    String

    The cluster this schema metadata is derived from

    hash
    String!

    The SHA1 hash of the schema content

    platformSchema
    PlatformSchema

    The native schema in the datasets platform, schemaless if it was not provided

    fields
    [SchemaField!]!

    Client provided a list of fields from value schema

    primaryKeys
    [String!]

    Client provided list of fields that define primary keys to access record

    foreignKeys
    [ForeignKeyConstraint]

    Client provided list of foreign key constraints

    createdAt
    Long

    The time at which the schema metadata information was created

    ScrollAcrossLineageResults

    Results returned by issueing a search across relationships query using scroll API

    Fields

    NameDescription
    nextScrollId
    String

    Opaque ID to pass to the next request to the server

    count
    Int!

    The number of entities included in the result set

    total
    Int!

    The total number of search results matching the query and filters

    searchResults
    [SearchAcrossLineageResult!]!

    The search result entities

    facets
    [FacetMetadata!]

    Candidate facet aggregations used for search filtering

    ScrollResults

    Results returned by issuing a search query

    Fields

    NameDescription
    nextScrollId
    String

    Opaque ID to pass to the next request to the server

    count
    Int!

    The number of entities included in the result set

    total
    Int!

    The total number of search results matching the query and filters

    searchResults
    [SearchResult!]!

    The search result entities for a scroll request

    facets
    [FacetMetadata!]

    Candidate facet aggregations used for search filtering

    SearchAcrossLineageResult

    Individual search result from a search across relationships query (has added metadata about the path)

    Fields

    NameDescription
    entity
    Entity!

    The resolved DataHub Metadata Entity matching the search query

    insights
    [SearchInsight!]

    Insights about why the search result was matched

    matchedFields
    [MatchedField!]!

    Matched field hint

    paths
    [EntityPath]

    Optional list of entities between the source and destination node

    degree
    Int!

    Degree of relationship (number of hops to get to entity)

    SearchAcrossLineageResults

    Results returned by issueing a search across relationships query

    Fields

    NameDescription
    start
    Int!

    The offset of the result set

    count
    Int!

    The number of entities included in the result set

    total
    Int!

    The total number of search results matching the query and filters

    searchResults
    [SearchAcrossLineageResult!]!

    The search result entities

    facets
    [FacetMetadata!]

    Candidate facet aggregations used for search filtering

    freshness
    FreshnessStats

    Optional freshness characteristics of this query (cached, staleness etc.)

    SearchInsight

    Insights about why a search result was returned or ranked in the way that it was

    Fields

    NameDescription
    text
    String!

    The insight to display

    icon
    String

    An optional emoji to display in front of the text

    SearchParams

    Context to define the search recommendations

    Fields

    NameDescription
    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String!

    Search query

    filters
    [FacetFilter!]

    Filters

    SearchResult

    An individual search result hit

    Fields

    NameDescription
    entity
    Entity!

    The resolved DataHub Metadata Entity matching the search query

    insights
    [SearchInsight!]

    Insights about why the search result was matched

    matchedFields
    [MatchedField!]!

    Matched field hint

    SearchResults

    Results returned by issuing a search query

    Fields

    NameDescription
    start
    Int!

    The offset of the result set

    count
    Int!

    The number of entities included in the result set

    total
    Int!

    The total number of search results matching the query and filters

    searchResults
    [SearchResult!]!

    The search result entities

    facets
    [FacetMetadata!]

    Candidate facet aggregations used for search filtering

    SearchResultsVisualConfig

    Configuration for a search result

    Fields

    NameDescription
    enableNameHighlight
    Boolean

    Whether a search result should highlight the name/description if it was matched on those fields.

    Secret

    A referencible secret stored in DataHub's system. Notice that we do not return the actual secret value.

    Fields

    NameDescription
    urn
    String!

    The urn of the secret

    name
    String!

    The name of the secret

    description
    String

    An optional description for the secret

    SecretValue

    A plaintext secret value

    Fields

    NameDescription
    name
    String!

    The name of the secret

    value
    String!

    The plaintext value of the secret.

    SemanticVersionStruct

    Properties identify a semantic version

    Fields

    NameDescription
    semanticVersion
    String

    Semantic version of the change

    semanticVersionTimestamp
    Long

    Semantic version timestamp

    versionStamp
    String

    Version stamp of the change

    SiblingProperties

    Metadata about the entity's siblings

    Fields

    NameDescription
    isPrimary
    Boolean

    If this entity is the primary sibling among the sibling set

    siblings
    [Entity]

    The sibling entities

    SourceCode

    Fields

    NameDescription
    sourceCode
    [SourceCodeUrl!]

    Source Code along with types

    SourceCodeUrl

    Fields

    NameDescription
    type
    SourceCodeUrlType!

    Source Code Url Types

    sourceCodeUrl
    String!

    Source Code Url

    Status

    The status of a particular Metadata Entity

    Fields

    NameDescription
    removed
    Boolean!

    Whether the entity is removed or not

    StepStateResult

    A single step state

    Fields

    NameDescription
    id
    String!

    Unique id of the step

    properties
    [StringMapEntry!]!

    The properties for the step state

    StringBox

    Fields

    NameDescription
    stringValue
    String!

    StringMapEntry

    An entry in a string string map represented as a tuple

    Fields

    NameDescription
    key
    String!

    The key of the map entry

    value
    String

    The value fo the map entry

    StructuredReport

    A flexible carrier for structured results of an execution request.

    Fields

    NameDescription
    type
    String!

    The type of the structured report. (e.g. INGESTION_REPORT, TEST_CONNECTION_REPORT, etc.)

    serializedValue
    String!

    The serialized value of the structured report

    contentType
    String!

    The content-type of the serialized value (e.g. application/json, application/json;gzip etc.)

    SubTypes

    Fields

    NameDescription
    typeNames
    [String!]

    The sub-types that this entity implements. e.g. Datasets that are views will implement the "view" subtype

    SystemFreshness

    Fields

    NameDescription
    systemName
    String!

    Name of the system

    freshnessMillis
    Long!

    The latest timestamp in millis of the system that was used to respond to this query In case a cache was consulted, this reflects the freshness of the cache In case an index was consulted, this reflects the freshness of the index

    TableChart

    For consumption by UI only

    Fields

    NameDescription
    title
    String!
    columns
    [String!]!
    rows
    [Row!]!

    TableSchema

    Information about a raw Table Schema

    Fields

    NameDescription
    schema
    String!

    Raw table schema

    Tag

    A Tag Entity, which can be associated with other Metadata Entities and subresources

    Implements

    Fields

    NameDescription
    urn
    String!

    The primary key of the TAG

    type
    EntityType!

    A standard Entity Type

    name
    String!
    Deprecated: No longer supported

    A unique identifier for the Tag. Deprecated - Use properties.name field instead.

    properties
    TagProperties

    Additional properties about the Tag

    editableProperties
    EditableTagProperties
    Deprecated: No longer supported

    Additional read write properties about the Tag Deprecated! Use 'properties' field instead.

    ownership
    Ownership

    Ownership metadata of the dataset

    relationships
    EntityRelationshipsResult

    Granular API for querying edges extending from this entity

    Arguments

    NameDescription
    input
    RelationshipsInput!
    description
    String
    Deprecated: No longer supported

    Deprecated, use properties.description field instead

    TagAssociation

    An edge between a Metadata Entity and a Tag Modeled as a struct to permit +schema field

    Implements

    Fields

    NameDescription
    urn
    String!

    Primary key of the schema field

    type
    EntityType!

    A standard Entity Type

    fieldPath
    String!

    Field path identifying the field in its dataset

    parent
    Entity!

    The field's parent.

    relationships
    EntityRelationshipsResult

    Granular API for querying edges extending from this entity

    Arguments

    NameDescription
    input
    RelationshipsInput!

    SchemaFieldRef

    A Dataset schema field (i.e. column)

    Fields

    NameDescription
    urn
    String!

    A schema field urn

    path
    String!

    A schema field path

    SchemaMetadata

    Metadata about a Dataset schema

    Implements

    Fields

    NameDescription
    aspectVersion
    Long

    The logical version of the schema metadata, where zero represents the latest version with otherwise monotonic ordering starting at one

    datasetUrn
    String

    Dataset this schema metadata is associated with

    name
    String!

    Schema name

    platformUrn
    String!

    Platform this schema metadata is associated with

    version
    Long!

    The version of the GMS Schema metadata

    cluster
    String

    The cluster this schema metadata is derived from

    hash
    String!

    The SHA1 hash of the schema content

    platformSchema
    PlatformSchema

    The native schema in the datasets platform, schemaless if it was not provided

    fields
    [SchemaField!]!

    Client provided a list of fields from value schema

    primaryKeys
    [String!]

    Client provided list of fields that define primary keys to access record

    foreignKeys
    [ForeignKeyConstraint]

    Client provided list of foreign key constraints

    createdAt
    Long

    The time at which the schema metadata information was created

    ScrollAcrossLineageResults

    Results returned by issueing a search across relationships query using scroll API

    Fields

    NameDescription
    nextScrollId
    String

    Opaque ID to pass to the next request to the server

    count
    Int!

    The number of entities included in the result set

    total
    Int!

    The total number of search results matching the query and filters

    searchResults
    [SearchAcrossLineageResult!]!

    The search result entities

    facets
    [FacetMetadata!]

    Candidate facet aggregations used for search filtering

    ScrollResults

    Results returned by issuing a search query

    Fields

    NameDescription
    nextScrollId
    String

    Opaque ID to pass to the next request to the server

    count
    Int!

    The number of entities included in the result set

    total
    Int!

    The total number of search results matching the query and filters

    searchResults
    [SearchResult!]!

    The search result entities for a scroll request

    facets
    [FacetMetadata!]

    Candidate facet aggregations used for search filtering

    SearchAcrossLineageResult

    Individual search result from a search across relationships query (has added metadata about the path)

    Fields

    NameDescription
    entity
    Entity!

    The resolved DataHub Metadata Entity matching the search query

    insights
    [SearchInsight!]

    Insights about why the search result was matched

    matchedFields
    [MatchedField!]!

    Matched field hint

    paths
    [EntityPath]

    Optional list of entities between the source and destination node

    degree
    Int!

    Degree of relationship (number of hops to get to entity)

    SearchAcrossLineageResults

    Results returned by issueing a search across relationships query

    Fields

    NameDescription
    start
    Int!

    The offset of the result set

    count
    Int!

    The number of entities included in the result set

    total
    Int!

    The total number of search results matching the query and filters

    searchResults
    [SearchAcrossLineageResult!]!

    The search result entities

    facets
    [FacetMetadata!]

    Candidate facet aggregations used for search filtering

    freshness
    FreshnessStats

    Optional freshness characteristics of this query (cached, staleness etc.)

    SearchInsight

    Insights about why a search result was returned or ranked in the way that it was

    Fields

    NameDescription
    text
    String!

    The insight to display

    icon
    String

    An optional emoji to display in front of the text

    SearchParams

    Context to define the search recommendations

    Fields

    NameDescription
    types
    [EntityType!]

    Entity types to be searched. If this is not provided, all entities will be searched.

    query
    String!

    Search query

    filters
    [FacetFilter!]

    Filters

    SearchResult

    An individual search result hit

    Fields

    NameDescription
    entity
    Entity!

    The resolved DataHub Metadata Entity matching the search query

    insights
    [SearchInsight!]

    Insights about why the search result was matched

    matchedFields
    [MatchedField!]!

    Matched field hint

    SearchResults

    Results returned by issuing a search query

    Fields

    NameDescription
    start
    Int!

    The offset of the result set

    count
    Int!

    The number of entities included in the result set

    total
    Int!

    The total number of search results matching the query and filters

    searchResults
    [SearchResult!]!

    The search result entities

    facets
    [FacetMetadata!]

    Candidate facet aggregations used for search filtering

    suggestions
    [SearchSuggestion!]

    Search suggestions based on the query provided for alternate query texts

    SearchResultsVisualConfig

    Configuration for a search result

    Fields

    NameDescription
    enableNameHighlight
    Boolean

    Whether a search result should highlight the name/description if it was matched on those fields.

    SearchSuggestion

    A suggestion for an alternate search query given an original query compared to all +of the entity names in our search index.

    Fields

    NameDescription
    text
    String!

    The suggested text based on the provided query text compared to the entity name field in the search index.

    score
    Float

    The "edit distance" for this suggestion. The closer this number is to 1, the closer the suggested text is to the original text. The closer it is to 0, the further from the original text it is.

    frequency
    Int

    The number of entities that would match on the name field given the suggested text

    Secret

    A referencible secret stored in DataHub's system. Notice that we do not return the actual secret value.

    Fields

    NameDescription
    urn
    String!

    The urn of the secret

    name
    String!

    The name of the secret

    description
    String

    An optional description for the secret

    SecretValue

    A plaintext secret value

    Fields

    NameDescription
    name
    String!

    The name of the secret

    value
    String!

    The plaintext value of the secret.

    SemanticVersionStruct

    Properties identify a semantic version

    Fields

    NameDescription
    semanticVersion
    String

    Semantic version of the change

    semanticVersionTimestamp
    Long

    Semantic version timestamp

    versionStamp
    String

    Version stamp of the change

    SiblingProperties

    Metadata about the entity's siblings

    Fields

    NameDescription
    isPrimary
    Boolean

    If this entity is the primary sibling among the sibling set

    siblings
    [Entity]

    The sibling entities

    SourceCode

    Fields

    NameDescription
    sourceCode
    [SourceCodeUrl!]

    Source Code along with types

    SourceCodeUrl

    Fields

    NameDescription
    type
    SourceCodeUrlType!

    Source Code Url Types

    sourceCodeUrl
    String!

    Source Code Url

    Status

    The status of a particular Metadata Entity

    Fields

    NameDescription
    removed
    Boolean!

    Whether the entity is removed or not

    StepStateResult

    A single step state

    Fields

    NameDescription
    id
    String!

    Unique id of the step

    properties
    [StringMapEntry!]!

    The properties for the step state

    StringBox

    Fields

    NameDescription
    stringValue
    String!

    StringMapEntry

    An entry in a string string map represented as a tuple

    Fields

    NameDescription
    key
    String!

    The key of the map entry

    value
    String

    The value fo the map entry

    StructuredReport

    A flexible carrier for structured results of an execution request.

    Fields

    NameDescription
    type
    String!

    The type of the structured report. (e.g. INGESTION_REPORT, TEST_CONNECTION_REPORT, etc.)

    serializedValue
    String!

    The serialized value of the structured report

    contentType
    String!

    The content-type of the serialized value (e.g. application/json, application/json;gzip etc.)

    SubTypes

    Fields

    NameDescription
    typeNames
    [String!]

    The sub-types that this entity implements. e.g. Datasets that are views will implement the "view" subtype

    SystemFreshness

    Fields

    NameDescription
    systemName
    String!

    Name of the system

    freshnessMillis
    Long!

    The latest timestamp in millis of the system that was used to respond to this query In case a cache was consulted, this reflects the freshness of the cache In case an index was consulted, this reflects the freshness of the index

    TableChart

    For consumption by UI only

    Fields

    NameDescription
    title
    String!
    columns
    [String!]!
    rows
    [Row!]!

    TableSchema

    Information about a raw Table Schema

    Fields

    NameDescription
    schema
    String!

    Raw table schema

    Tag

    A Tag Entity, which can be associated with other Metadata Entities and subresources

    Implements

    Fields

    NameDescription
    urn
    String!

    The primary key of the TAG

    type
    EntityType!

    A standard Entity Type

    name
    String!
    Deprecated: No longer supported

    A unique identifier for the Tag. Deprecated - Use properties.name field instead.

    properties
    TagProperties

    Additional properties about the Tag

    editableProperties
    EditableTagProperties
    Deprecated: No longer supported

    Additional read write properties about the Tag Deprecated! Use 'properties' field instead.

    ownership
    Ownership

    Ownership metadata of the dataset

    relationships
    EntityRelationshipsResult

    Granular API for querying edges extending from this entity

    Arguments

    NameDescription
    input
    RelationshipsInput!
    description
    String
    Deprecated: No longer supported

    Deprecated, use properties.description field instead

    TagAssociation

    An edge between a Metadata Entity and a Tag Modeled as a struct to permit additional attributes -TODO Consider whether this query should be serviced by the relationships field

    Fields

    NameDescription
    tag
    Tag!

    The tag itself

    associatedUrn
    String!

    Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together

    TagProperties

    Properties for a DataHub Tag

    Fields

    NameDescription
    name
    String!

    A display name for the Tag

    description
    String

    A description of the Tag

    colorHex
    String

    An optional RGB hex code for a Tag color, e.g. #FFFFFF

    TelemetryConfig

    Configurations related to tracking users in the app

    Fields

    NameDescription
    enableThirdPartyLogging
    Boolean

    Env variable for whether or not third party logging should be enabled for this instance

    Test

    A metadata entity representing a DataHub Test

    Implements

    Fields

    NameDescription
    urn
    String!

    The primary key of the Test itself

    type
    EntityType!

    The standard Entity Type

    name
    String!

    The name of the Test

    category
    String!

    The category of the Test (user defined)

    description
    String

    Description of the test

    definition
    TestDefinition!

    Definition for the test

    relationships
    EntityRelationshipsResult

    Unused for tests

    Arguments

    NameDescription
    input
    RelationshipsInput!

    TestDefinition

    Definition of the test

    Fields

    NameDescription
    json
    String

    JSON-based def for the test

    TestResult

    The result of running a test

    Fields

    NameDescription
    test
    Test

    The test itself, or null if the test has been deleted

    type
    TestResultType!

    The final result, e.g. either SUCCESS or FAILURE.

    TestResults

    A set of test results

    Fields

    NameDescription
    passing
    [TestResult!]!

    The tests passing

    failing
    [TestResult!]!

    The tests failing

    TestsConfig

    Configurations related to DataHub Tests feature

    Fields

    NameDescription
    enabled
    Boolean!

    Whether Tests feature is enabled

    TextCell

    A Notebook cell which contains text as content

    Fields

    NameDescription
    cellTitle
    String!

    Title of the cell

    cellId
    String!

    Unique id for the cell.

    changeAuditStamps
    ChangeAuditStamps

    Captures information about who created/last modified/deleted this TextCell and when

    text
    String!

    The actual text in a TextCell in a Notebook

    TimeSeriesChart

    For consumption by UI only

    Fields

    NameDescription
    title
    String!
    lines
    [NamedLine!]!
    dateRange
    DateRange!
    interval
    DateInterval!

    TimeWindow

    A time window with a finite start and end time

    Fields

    NameDescription
    startTimeMillis
    Long!

    The start time of the time window

    durationMillis
    Long!

    The end time of the time window

    UpdateStepStateResult

    Result returned when fetching step state

    Fields

    NameDescription
    id
    String!

    Id of the step

    succeeded
    Boolean!

    Whether the update succeeded.

    UpstreamEntityRelationships

    Deprecated, use relationships query instead

    Fields

    NameDescription
    entities
    [EntityRelationshipLegacy]

    UsageAggregation

    An aggregation of Dataset usage statistics

    Fields

    NameDescription
    bucket
    Long

    The time window start time

    duration
    WindowDuration

    The time window span

    resource
    String

    The resource urn associated with the usage information, eg a Dataset urn

    metrics
    UsageAggregationMetrics

    The rolled up usage metrics

    UsageAggregationMetrics

    Rolled up metrics about Dataset usage over time

    Fields

    NameDescription
    uniqueUserCount
    Int

    The unique number of users who have queried the dataset within the time range

    users
    [UserUsageCounts]

    Usage statistics within the time range by user

    totalSqlQueries
    Int

    The total number of queries issued against the dataset within the time range

    topSqlQueries
    [String]

    A set of common queries issued against the dataset within the time range

    fields
    [FieldUsageCounts]

    Per field usage statistics within the time range

    UsageQueryResult

    The result of a Dataset usage query

    Fields

    NameDescription
    buckets
    [UsageAggregation]

    A set of relevant time windows for use in displaying usage statistics

    aggregations
    UsageQueryResultAggregations

    A set of rolled up aggregations about the Dataset usage

    UsageQueryResultAggregations

    A set of rolled up aggregations about the Dataset usage

    Fields

    NameDescription
    uniqueUserCount
    Int

    The count of unique Dataset users within the queried time range

    users
    [UserUsageCounts]

    The specific per user usage counts within the queried time range

    fields
    [FieldUsageCounts]

    The specific per field usage counts within the queried time range

    totalSqlQueries
    Int

    The total number of queries executed within the queried time range Note that this field will likely be deprecated in favor of a totalQueries field

    UserUsageCounts

    Information about individual user usage of a Dataset

    Fields

    NameDescription
    user
    CorpUser

    The user of the Dataset

    count
    Int

    The number of queries issued by the user

    userEmail
    String

    The extracted user email Note that this field will soon be deprecated and merged with user

    VersionedDataset

    A Dataset entity, which encompasses Relational Tables, Document store collections, streaming topics, and other sets of data having an independent lifecycle

    Implements

    Fields

    NameDescription
    urn
    String!

    The primary key of the Dataset

    type
    EntityType!

    The standard Entity Type

    platform
    DataPlatform!

    Standardized platform urn where the dataset is defined

    container
    Container

    The parent container in which the entity resides

    parentContainers
    ParentContainersResult

    Recursively get the lineage of containers for this entity

    name
    String!

    Unique guid for dataset No longer to be used as the Dataset display name. Use properties.name instead

    properties
    DatasetProperties

    An additional set of read only properties

    editableProperties
    DatasetEditableProperties

    An additional set of of read write properties

    ownership
    Ownership

    Ownership metadata of the dataset

    deprecation
    Deprecation

    The deprecation status of the dataset

    institutionalMemory
    InstitutionalMemory

    References to internal resources related to the dataset

    editableSchemaMetadata
    EditableSchemaMetadata

    Editable schema metadata of the dataset

    status
    Status

    Status of the Dataset

    tags
    GlobalTags

    Tags used for searching dataset

    glossaryTerms
    GlossaryTerms

    The structured glossary terms associated with the dataset

    domain
    DomainAssociation

    The Domain associated with the Dataset

    health
    [Health!]

    Experimental! The resolved health status of the Dataset

    schema
    Schema

    Schema metadata of the dataset

    subTypes
    SubTypes

    Sub Types that this entity implements

    viewProperties
    ViewProperties

    View related properties. Only relevant if subtypes field contains view.

    origin
    FabricType!
    Deprecated: No longer supported

    Deprecated, see the properties field instead Environment in which the dataset belongs to or where it was generated Note that this field will soon be deprecated in favor of a more standardized concept of Environment

    relationships
    EntityRelationshipsResult
    Deprecated: No longer supported

    No-op, has to be included due to model

    Arguments

    NameDescription
    input
    RelationshipsInput!

    VersionTag

    The technical version associated with a given Metadata Entity

    Fields

    NameDescription
    versionTag
    String

    ViewProperties

    Properties about a Dataset of type view

    Fields

    NameDescription
    materialized
    Boolean!

    Whether the view is materialized or not

    logic
    String!

    The logic associated with the view, most commonly a SQL statement

    language
    String!

    The language in which the view logic is written, for example SQL

    ViewsConfig

    Configurations related to DataHub Views feature

    Fields

    NameDescription
    enabled
    Boolean!

    Whether Views feature is enabled

    VisualConfig

    Configurations related to visual appearance of the app

    Fields

    NameDescription
    logoUrl
    String

    Custom logo url for the homepage & top banner

    faviconUrl
    String

    Custom favicon url for the homepage & top banner

    queriesTab
    QueriesTabConfig

    Configuration for the queries tab

    entityProfiles
    EntityProfilesConfig

    Configuration for the queries tab

    searchResult
    SearchResultsVisualConfig

    Configuration for search results

    - +TODO Consider whether this query should be serviced by the relationships field

    Fields

    NameDescription
    tag
    Tag!

    The tag itself

    associatedUrn
    String!

    Reference back to the tagged urn for tracking purposes e.g. when sibling nodes are merged together

    TagProperties

    Properties for a DataHub Tag

    Fields

    NameDescription
    name
    String!

    A display name for the Tag

    description
    String

    A description of the Tag

    colorHex
    String

    An optional RGB hex code for a Tag color, e.g. #FFFFFF

    TelemetryConfig

    Configurations related to tracking users in the app

    Fields

    NameDescription
    enableThirdPartyLogging
    Boolean

    Env variable for whether or not third party logging should be enabled for this instance

    Test

    A metadata entity representing a DataHub Test

    Implements

    Fields

    NameDescription
    urn
    String!

    The primary key of the Test itself

    type
    EntityType!

    The standard Entity Type

    name
    String!

    The name of the Test

    category
    String!

    The category of the Test (user defined)

    description
    String

    Description of the test

    definition
    TestDefinition!

    Definition for the test

    relationships
    EntityRelationshipsResult

    Unused for tests

    Arguments

    NameDescription
    input
    RelationshipsInput!

    TestDefinition

    Definition of the test

    Fields

    NameDescription
    json
    String

    JSON-based def for the test

    TestResult

    The result of running a test

    Fields

    NameDescription
    test
    Test

    The test itself, or null if the test has been deleted

    type
    TestResultType!

    The final result, e.g. either SUCCESS or FAILURE.

    TestResults

    A set of test results

    Fields

    NameDescription
    passing
    [TestResult!]!

    The tests passing

    failing
    [TestResult!]!

    The tests failing

    TestsConfig

    Configurations related to DataHub Tests feature

    Fields

    NameDescription
    enabled
    Boolean!

    Whether Tests feature is enabled

    TextCell

    A Notebook cell which contains text as content

    Fields

    NameDescription
    cellTitle
    String!

    Title of the cell

    cellId
    String!

    Unique id for the cell.

    changeAuditStamps
    ChangeAuditStamps

    Captures information about who created/last modified/deleted this TextCell and when

    text
    String!

    The actual text in a TextCell in a Notebook

    TimeSeriesChart

    For consumption by UI only

    Fields

    NameDescription
    title
    String!
    lines
    [NamedLine!]!
    dateRange
    DateRange!
    interval
    DateInterval!

    TimeWindow

    A time window with a finite start and end time

    Fields

    NameDescription
    startTimeMillis
    Long!

    The start time of the time window

    durationMillis
    Long!

    The end time of the time window

    UpdateStepStateResult

    Result returned when fetching step state

    Fields

    NameDescription
    id
    String!

    Id of the step

    succeeded
    Boolean!

    Whether the update succeeded.

    UpstreamEntityRelationships

    Deprecated, use relationships query instead

    Fields

    NameDescription
    entities
    [EntityRelationshipLegacy]

    UsageAggregation

    An aggregation of Dataset usage statistics

    Fields

    NameDescription
    bucket
    Long

    The time window start time

    duration
    WindowDuration

    The time window span

    resource
    String

    The resource urn associated with the usage information, eg a Dataset urn

    metrics
    UsageAggregationMetrics

    The rolled up usage metrics

    UsageAggregationMetrics

    Rolled up metrics about Dataset usage over time

    Fields

    NameDescription
    uniqueUserCount
    Int

    The unique number of users who have queried the dataset within the time range

    users
    [UserUsageCounts]

    Usage statistics within the time range by user

    totalSqlQueries
    Int

    The total number of queries issued against the dataset within the time range

    topSqlQueries
    [String]

    A set of common queries issued against the dataset within the time range

    fields
    [FieldUsageCounts]

    Per field usage statistics within the time range

    UsageQueryResult

    The result of a Dataset usage query

    Fields

    NameDescription
    buckets
    [UsageAggregation]

    A set of relevant time windows for use in displaying usage statistics

    aggregations
    UsageQueryResultAggregations

    A set of rolled up aggregations about the Dataset usage

    UsageQueryResultAggregations

    A set of rolled up aggregations about the Dataset usage

    Fields

    NameDescription
    uniqueUserCount
    Int

    The count of unique Dataset users within the queried time range

    users
    [UserUsageCounts]

    The specific per user usage counts within the queried time range

    fields
    [FieldUsageCounts]

    The specific per field usage counts within the queried time range

    totalSqlQueries
    Int

    The total number of queries executed within the queried time range Note that this field will likely be deprecated in favor of a totalQueries field

    UserUsageCounts

    Information about individual user usage of a Dataset

    Fields

    NameDescription
    user
    CorpUser

    The user of the Dataset

    count
    Int

    The number of queries issued by the user

    userEmail
    String

    The extracted user email Note that this field will soon be deprecated and merged with user

    VersionedDataset

    A Dataset entity, which encompasses Relational Tables, Document store collections, streaming topics, and other sets of data having an independent lifecycle

    Implements

    Fields

    NameDescription
    urn
    String!

    The primary key of the Dataset

    type
    EntityType!

    The standard Entity Type

    platform
    DataPlatform!

    Standardized platform urn where the dataset is defined

    container
    Container

    The parent container in which the entity resides

    parentContainers
    ParentContainersResult

    Recursively get the lineage of containers for this entity

    name
    String!

    Unique guid for dataset No longer to be used as the Dataset display name. Use properties.name instead

    properties
    DatasetProperties

    An additional set of read only properties

    editableProperties
    DatasetEditableProperties

    An additional set of of read write properties

    ownership
    Ownership

    Ownership metadata of the dataset

    deprecation
    Deprecation

    The deprecation status of the dataset

    institutionalMemory
    InstitutionalMemory

    References to internal resources related to the dataset

    editableSchemaMetadata
    EditableSchemaMetadata

    Editable schema metadata of the dataset

    status
    Status

    Status of the Dataset

    tags
    GlobalTags

    Tags used for searching dataset

    glossaryTerms
    GlossaryTerms

    The structured glossary terms associated with the dataset

    domain
    DomainAssociation

    The Domain associated with the Dataset

    health
    [Health!]

    Experimental! The resolved health status of the Dataset

    schema
    Schema

    Schema metadata of the dataset

    subTypes
    SubTypes

    Sub Types that this entity implements

    viewProperties
    ViewProperties

    View related properties. Only relevant if subtypes field contains view.

    origin
    FabricType!
    Deprecated: No longer supported

    Deprecated, see the properties field instead Environment in which the dataset belongs to or where it was generated Note that this field will soon be deprecated in favor of a more standardized concept of Environment

    relationships
    EntityRelationshipsResult
    Deprecated: No longer supported

    No-op, has to be included due to model

    Arguments

    NameDescription
    input
    RelationshipsInput!

    VersionTag

    The technical version associated with a given Metadata Entity

    Fields

    NameDescription
    versionTag
    String

    ViewProperties

    Properties about a Dataset of type view

    Fields

    NameDescription
    materialized
    Boolean!

    Whether the view is materialized or not

    logic
    String!

    The logic associated with the view, most commonly a SQL statement

    language
    String!

    The language in which the view logic is written, for example SQL

    ViewsConfig

    Configurations related to DataHub Views feature

    Fields

    NameDescription
    enabled
    Boolean!

    Whether Views feature is enabled

    VisualConfig

    Configurations related to visual appearance of the app

    Fields

    NameDescription
    logoUrl
    String

    Custom logo url for the homepage & top banner

    faviconUrl
    String

    Custom favicon url for the homepage & top banner

    queriesTab
    QueriesTabConfig

    Configuration for the queries tab

    entityProfiles
    EntityProfilesConfig

    Configuration for the queries tab

    searchResult
    SearchResultsVisualConfig

    Configuration for search results

    + \ No newline at end of file diff --git a/docs/graphql/queries/index.html b/docs/graphql/queries/index.html index edb088ad3a54b..7b56a7c5b4515 100644 --- a/docs/graphql/queries/index.html +++ b/docs/graphql/queries/index.html @@ -8,7 +8,7 @@ - + @@ -24,7 +24,7 @@ Requires the 'Manage Global Views' Platform Privilege.

    glossaryNode

    Type: GlossaryNode

    Fetch a Glossary Node by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    glossaryTerm

    Type: GlossaryTerm

    Fetch a Glossary Term by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    ingestionSource

    Type: IngestionSource

    Fetch a specific ingestion source urn: The primary key associated with the ingestion source.

    Arguments

    NameDescription
    urn
    String!

    isAnalyticsEnabled

    Type: Boolean!

    Deprecated, use appConfig Query instead Whether the analytics feature is enabled in the UI

    listAccessTokens

    Type: ListAccessTokenResult!

    List access tokens stored in DataHub.

    Arguments

    NameDescription
    input
    ListAccessTokenInput!

    listDataProductAssets

    Type: SearchResults

    List Data Product assets for a given urn

    Arguments

    NameDescription
    urn
    String!
    input
    SearchAcrossEntitiesInput!

    listDomains

    Type: ListDomainsResult

    List all DataHub Domains

    Arguments

    NameDescription
    input
    ListDomainsInput!

    listGlobalViews

    Type: ListViewsResult

    List Global DataHub Views

    Arguments

    NameDescription
    input
    ListGlobalViewsInput!

    listGroups

    Type: ListGroupsResult

    List all DataHub Groups

    Arguments

    NameDescription
    input
    ListGroupsInput!

    listIngestionSources

    Type: ListIngestionSourcesResult

    List all ingestion sources

    Arguments

    NameDescription
    input
    ListIngestionSourcesInput!

    listMyViews

    Type: ListViewsResult

    List DataHub Views owned by the current user

    Arguments

    NameDescription
    input
    ListMyViewsInput!

    listOwnershipTypes

    Type: ListOwnershipTypesResult!

    List Custom Ownership Types

    Arguments

    NameDescription
    input
    ListOwnershipTypesInput!

    Input required for listing custom ownership types

    listPolicies

    Type: ListPoliciesResult

    List all DataHub Access Policies

    Arguments

    NameDescription
    input
    ListPoliciesInput!

    listPosts

    Type: ListPostsResult

    List all Posts

    Arguments

    NameDescription
    input
    ListPostsInput!

    listQueries

    Type: ListQueriesResult

    List Dataset Queries

    Arguments

    NameDescription
    input
    ListQueriesInput!

    Input required for listing queries

    listRecommendations

    Type: ListRecommendationsResult

    Fetch recommendations for a particular scenario

    Arguments

    NameDescription
    input
    ListRecommendationsInput!

    listRoles

    Type: ListRolesResult

    List all DataHub Roles

    Arguments

    NameDescription
    input
    ListRolesInput!

    listSecrets

    Type: ListSecretsResult

    List all secrets stored in DataHub (no values)

    Arguments

    NameDescription
    input
    ListSecretsInput!

    listTests

    Type: ListTestsResult

    List all DataHub Tests

    Arguments

    NameDescription
    input
    ListTestsInput!

    listUsers

    Type: ListUsersResult

    List all DataHub Users

    Arguments

    NameDescription
    input
    ListUsersInput!

    me

    Type: AuthenticatedUser

    Fetch details associated with the authenticated user, provided via an auth cookie or header

    mlFeature

    Type: MLFeature

    Incubating: Fetch a ML Feature by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    mlFeatureTable

    Type: MLFeatureTable

    Incubating: Fetch a ML Feature Table by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    mlModel

    Type: MLModel

    Incubating: Fetch an ML Model by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    mlModelGroup

    Type: MLModelGroup

    Incubating: Fetch an ML Model Group by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    mlPrimaryKey

    Type: MLPrimaryKey

    Incubating: Fetch a ML Primary Key by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    notebook

    Type: Notebook

    Fetch a Notebook by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    scrollAcrossEntities

    Type: ScrollResults

    Search DataHub entities by providing a pointer reference for scrolling through results.

    Arguments

    NameDescription
    input
    ScrollAcrossEntitiesInput!

    scrollAcrossLineage

    Type: ScrollAcrossLineageResults

    Search across the results of a graph query on a node, uses scroll API

    Arguments

    NameDescription
    input
    ScrollAcrossLineageInput!

    Type: SearchResults

    Full text search against a specific DataHub Entity Type

    Arguments

    NameDescription
    input
    SearchInput!

    searchAcrossEntities

    Type: SearchResults

    Search DataHub entities

    Arguments

    NameDescription
    input
    SearchAcrossEntitiesInput!

    searchAcrossLineage

    Type: SearchAcrossLineageResults

    Search across the results of a graph query on a node

    Arguments

    NameDescription
    input
    SearchAcrossLineageInput!

    tag

    Type: Tag

    Fetch a Tag by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    test

    Type: Test

    Fetch a Test by primary key (urn)

    Arguments

    NameDescription
    urn
    String!

    versionedDataset

    Type: VersionedDataset

    Fetch a Dataset by primary key (urn) at a point in time based on aspect versions (versionStamp)

    Arguments

    NameDescription
    urn
    String!
    versionStamp
    String
    - + \ No newline at end of file diff --git a/docs/graphql/scalars/index.html b/docs/graphql/scalars/index.html index b317f3e433ae1..2e54b4f482d5a 100644 --- a/docs/graphql/scalars/index.html +++ b/docs/graphql/scalars/index.html @@ -8,13 +8,13 @@ - +

    Scalars

    Boolean

    The Boolean scalar type represents true or false.

    Float

    The Float scalar type represents signed double-precision fractional values as specified by IEEE 754.

    Int

    The Int scalar type represents non-fractional signed whole numeric values. Int can represent values between -(2^31) and 2^31 - 1.

    Long

    String

    The String scalar type represents textual data, represented as UTF-8 character sequences. The String type is most often used by GraphQL to represent free-form human-readable text.

    - + \ No newline at end of file diff --git a/docs/graphql/unions/index.html b/docs/graphql/unions/index.html index 5580624459f95..eb2c46d40e03a 100644 --- a/docs/graphql/unions/index.html +++ b/docs/graphql/unions/index.html @@ -8,13 +8,13 @@ - +
    - + \ No newline at end of file diff --git a/docs/how/add-custom-data-platform/index.html b/docs/how/add-custom-data-platform/index.html index 0cb66d09d5874..a9a34bffb3f9d 100644 --- a/docs/how/add-custom-data-platform/index.html +++ b/docs/how/add-custom-data-platform/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ your custom Data Platform will persist even between full cleans (nukes) of DataHub.

    Changing Default Data Platforms

    Simply make a change to the data_platforms.json file to add a custom Data Platform:

    [ 
    .....
    {
    "urn": "urn:li:dataPlatform:MyCustomDataPlatform",
    "aspect": {
    "name": "My Custom Data Platform",
    "type": "OTHERS",
    "logoUrl": "https://<your-logo-url>"
    }
    }
    ]

    Ingesting Data Platform at runtime

    You can also ingest a Data Platform at runtime using either a file-based ingestion source, or using a normal curl to the GMS Rest.li APIs.

    Using the cli

    datahub put platform --name MyCustomDataPlatform --display_name "My Custom Data Platform" --logo "https://<your-logo-url>"

    Using File-Based Ingestion Recipe

    Step 1 Define a JSON file containing your custom Data Platform

    // my-custom-data-platform.json 
    [
    {
    "auditHeader": null,
    "proposedSnapshot": {
    "com.linkedin.pegasus2avro.metadata.snapshot.DataPlatformSnapshot": {
    "urn": "urn:li:dataPlatform:MyCustomDataPlatform",
    "aspects": [
    {
    "com.linkedin.pegasus2avro.dataplatform.DataPlatformInfo": {
    "datasetNameDelimiter": "/",
    "name": "My Custom Data Platform",
    "type": "OTHERS",
    "logoUrl": "https://<your-logo-url>"
    }
    }
    ]
    }
    },
    "proposedDelta": null
    }
    ]

    Step 2: Define an ingestion recipe

    ---
    # see https://datahubproject.io/docs/generated/ingestion/sources/file for complete documentation
    source:
    type: "file"
    config:
    filename: "./my-custom-data-platform.json"

    # see https://datahubproject.io/docs/metadata-ingestion/sink_docs/datahub for complete documentation
    sink:
    ...

    Using Rest.li API

    You can also issue a normal curl request to the Rest.li /entities API to add a custom Data Platform.

    curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{
    "entity":{
    "value":{
    "com.linkedin.metadata.snapshot.DataPlatformSnapshot":{
    "aspects":[
    {
    "com.linkedin.dataplatform.DataPlatformInfo":{
    "datasetNameDelimiter": "/",
    "name": "My Custom Data Platform",
    "type": "OTHERS",
    "logoUrl": "https://<your-logo-url>"
    }
    }
    ],
    "urn":"urn:li:dataPlatform:MyCustomDataPlatform"
    }
    }
    }
    }'
    - + \ No newline at end of file diff --git a/docs/how/add-custom-ingestion-source/index.html b/docs/how/add-custom-ingestion-source/index.html index a670df5924201..a249a4f368893 100644 --- a/docs/how/add-custom-ingestion-source/index.html +++ b/docs/how/add-custom-ingestion-source/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ with the custom source class named MySourceClass your YAML recipe would look like the following:

    source:
    type: my-source.custom_ingestion_source.MySourceClass
    config:
    # place for your custom config defined in the configModel

    If you now execute the ingestion the datahub client will pick up your code and call the get_workunits method and do the rest for you. That's it.

    Example code?

    For examples how this setup could look like and a good starting point for building your first custom source visit our meta-world example repository.

    - + \ No newline at end of file diff --git a/docs/how/add-new-aspect/index.html b/docs/how/add-new-aspect/index.html index 9537e7b4bd852..4162ad9140586 100644 --- a/docs/how/add-new-aspect/index.html +++ b/docs/how/add-new-aspect/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    How to add a new metadata aspect?

    Adding a new metadata aspect is one of the most common ways to extend an existing entity. We'll use the CorpUserEditableInfo as an example here.

    1. Add the aspect model to the corresponding namespace (e.g. com.linkedin.identity)

    2. Extend the entity's aspect union to include the new aspect (e.g. CorpUserAspect)

    3. Rebuild the rest.li IDL & snapshot by running the following command from the project root

    ./gradlew :metadata-service:restli-servlet-impl:build -Prest.model.compatibility=ignore
    1. To surface the new aspect at the top-level resource endpoint, extend the resource data model (e.g. CorpUser) with an optional field (e.g. editableInfo). You'll also need to extend the toValue & toSnapshot methods of the top-level resource (e.g. CorpUsers) to convert between the snapshot & value models.

    2. (Optional) If there's need to update the aspect via API (instead of/in addition to MCE), add a sub-resource endpoint for the new aspect (e.g. CorpUsersEditableInfoResource). The sub-resource endpiont also allows you to retrieve previous versions of the aspect as well as additional metadata such as the audit stamp.

    3. After rebuilding & restarting gms, mce-consumer-job & mae-consumer-job, you should be able to start emitting MCE with the new aspect and have it automatically ingested & stored in DB.

    - + \ No newline at end of file diff --git a/docs/how/add-user-data/index.html b/docs/how/add-user-data/index.html index 7faabd5d72c2c..deddc78c24826 100644 --- a/docs/how/add-user-data/index.html +++ b/docs/how/add-user-data/index.html @@ -8,13 +8,13 @@ - +

    Adding user metadata in DataHub

    This guide shares how you can add user metadata in DataHub. Usually you would want to use one of our sources for ingesting user metadata. But if there is no connector for your use case then you would want to use this guide.

    note

    This does not allow you to add new users for Authentication. If you want to add a new user in DataHub for Login please refer to Adding Users to DataHub

    You can look at all aspects supported for users in CorpUserAspect

    Using File-Based Ingestion Recipe

    Define a JSON File containing your user

    [
    {
    "auditHeader": null,
    "proposedSnapshot": {
    "com.linkedin.pegasus2avro.metadata.snapshot.CorpUserSnapshot": {
    "urn": "urn:li:corpuser:aseem.bansal",
    "aspects": [
    {
    "com.linkedin.pegasus2avro.identity.CorpUserInfo": {
    "active": true,
    "displayName": {
    "string": "Aseem Bansal"
    },
    "email": "aseem+examples@acryl.io",
    "title": {
    "string": "Software Engineer"
    },
    "managerUrn": null,
    "departmentId": null,
    "departmentName": null,
    "firstName": null,
    "lastName": null,
    "fullName": {
    "string": "Aseem Bansal"
    },
    "countryCode": null
    }
    }
    ]
    }
    }
    }
    ]

    Define an ingestion recipe

    ---
    # see https://datahubproject.io/docs/generated/ingestion/sources/file for complete documentation
    source:
    type: "file"
    config:
    filename: "./my-user.json"

    # see https://datahubproject.io/docs/metadata-ingestion/sink_docs/datahub for complete documentation
    sink:
    ...

    Use DataHub CLI to do the ingestion.

    Using Rest.li API

    curl 'http://localhost:8080/entities?action=ingest' -X POST --data '{
    "entity": {
    "value": {
    "com.linkedin.metadata.snapshot.CorpUserSnapshot": {
    "urn": "urn:li:corpuser:aseem.bansal",
    "aspects": [{
    "com.linkedin.identity.CorpUserInfo": {
    "active": true,
    "displayName": "Aseem Bansal",
    "email": "aseem+example@acryl.io",
    "title": "Software Engineer",
    "fullName": "Aseem Bansal"
    }
    }]
    }
    }
    }
    }'
    - + \ No newline at end of file diff --git a/docs/how/backup-datahub/index.html b/docs/how/backup-datahub/index.html index a54ac1b05006d..d29354fa18225 100644 --- a/docs/how/backup-datahub/index.html +++ b/docs/how/backup-datahub/index.html @@ -8,13 +8,13 @@ - +

    Taking backup of DataHub

    Production

    The recommended backup strategy is to periodically dump the database datahub.metadata_aspect_v2 so it can be recreated from the dump which most managed DB services will support (e.g. AWS RDS). Then run restore indices to recreate the indices.

    In order to back up Time Series Aspects (which power usage and dataset profiles), you'd have to do a backup of Elasticsearch, which is possible via AWS OpenSearch. Otherwise, you'd have to reingest dataset profiles from your sources in the event of a disaster scenario!

    Quickstart

    To take a backup of your quickstart, take a look at this document on how to accomplish it.

    - + \ No newline at end of file diff --git a/docs/how/configuring-authorization-with-apache-ranger/index.html b/docs/how/configuring-authorization-with-apache-ranger/index.html index 211907b3dcf42..410179f3e9f92 100644 --- a/docs/how/configuring-authorization-with-apache-ranger/index.html +++ b/docs/how/configuring-authorization-with-apache-ranger/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ Replace variables with corresponding values in curl command

    • <ranger-admin-username>
    • <ranger-admin-password>
    • <ranger-host>
      curl -u <ranger-admin-username>:<ranger-admin-password> -X POST -H "Accept: application/json" -H "Content-Type: application/json" --data @servicedef.json http://<ranger-host>:6080/service/public/v2/api/servicedef

    Defining a Ranger Policy

    Now, you should have the DataHub plugin registered with Apache Ranger. Next, we'll create a sample user and add them to our first resource policy.

    1. Login into the Apache Ranger UI (Privacera Portal) to performs below steps.

    2. Verify datahub-ranger-plugin is registered successfully: The datahub-ranger-plugin should be visible as DATAHUB in Access Management -> Resource Policies.

    3. Create a service under the plugin DATAHUB with name ranger_datahub

      DATAHUB plugin and ranger_datahub service is shown in below screenshot:

      Privacera Portal DATAHUB screenshot

    4. Create a new policy under service ranger_datahub - this will be used to control DataHub authorization.

    5. Create a test user & assign them to a policy. We'll use the datahub user, which is the default root user inside DataHub.

      To do this performs below steps

      • Create a user datahub

      • Create a policy under ranger_datahub service. To assign Platform Privileges (e.g. Admin privileges), simply use the "platform" resource type which is defined. To test the flow, we can simply assign the datahub user all platform privileges that are available through the Ranger UI. This will enable the "datahub" to have full platform admin privileges.

        To define fine-grained resource privileges, e.g. for DataHub Datasets, Dashboards, Charts, and more, you can simply select the appropriate Resource Type in the Ranger policy builder. You should also see a list of privileges that are supported for each resource type, which correspond to the actions that you can perform. To learn more about supported privileges, check out the DataHub Policies Guide.

        DataHub platform access policy screenshot:

        Privacera Portal DATAHUB screenshot

    Once we've created our first policy, we can set up DataHub to start authorizing requests using Ranger policies.

    Configuring your DataHub Deployment

    Perform the following steps to configure DataHub to send incoming requests to Apache Ranger for authorization.

    1. Download Apache Ranger security xml ranger-datahub-security.xml
    2. In ranger-datahub-security.xml edit the value of property ranger.plugin.datahub.policy.rest.url. Sample snippet is shown below
          <property>
      <name>ranger.plugin.datahub.policy.rest.url</name>
      <value>http://199.209.9.70:6080</value>
      <description>
      URL to Ranger Admin
      </description>
      </property>

    As per your deployment follow either Docker or Kubernetes section below

    Docker

    Build Ranger Authorizer Plugin

    1. Clone DataHub Repo: Clone the DataHub repository

          cd ~/
      git clone https://github.com/acryldata/datahub-ranger-auth-plugin.git
    2. Go inside the datahub directory: You should be inside the datahub-ranger-auth-plugin directory to execute build command

          cd ~/datahub-ranger-auth-plugin/
    3. Build plugin: Execute below gradle command to build Ranger Authorizer Plugin jar

        ./gradlew apache-ranger-plugin:shadowJar

      This step will generate a jar file i.e. ./apache-ranger-plugin/build/libs/apache-ranger-plugin-<version>-SNAPSHOT.jar.

      Let's call this jar as ranger-plugin-jar. We need this jar in below step (Configure Ranger Authorizer Plugin)

    Configure Ranger Authorizer Plugin

    On the host where datahub-gms is deployed, follow these steps:

    1. Create directory ~/.datahub/plugins/auth/apache-ranger-authorizer/: Executes below command
      mkdir -p ~/.datahub/plugins/auth/apache-ranger-authorizer/
    2. Copy ranger-datahub-security.xml file to ~/.datahub/plugins/auth/apache-ranger-authorizer/
    3. Copy ranger-plugin-jar: Copy the apache-ranger-plugin-<version>-SNAPSHOT.jar
      cp ./apache-ranger-plugin/build/libs/apache-ranger-plugin-<version>-SNAPSHOT.jar ~/.datahub/plugins/auth/apache-ranger-authorizer/apache-ranger-authorizer.jar
    4. Create config.yml: Create config.yml if not exist
          touch ~/.datahub/plugins/auth/config.yml 
    5. Set Apache Ranger Plugin config: Add below entry in config.yml file. Set username and password to Apache Ranger user credentials
         plugins:
      - name: "apache-ranger-authorizer"
      type: "authorizer"
      enabled: "true"
      params:
      className: "com.datahub.authorization.ranger.RangerAuthorizer"
      configs:
      username: "<Apache Ranger username>"
      password: "<Apache Ranger password>"
    6. Restart DataHub GMS container (i.e. datahub-gms)

    Kubernetes

    Helm support is coming soon.

    That's it! Now we can test out the integration.

    Validating your Setup

    To verify that things are working as expected, we can test that the root datahub user has all Platform Privileges and is able to perform all operations: managing users & groups, creating domains, and more. To do this, simply log into your DataHub deployment via the root DataHub user.

    Revert the Configuration

    If you want to revert your deployment configuration and don't want Apache Ranger to control the authorization of your DataHub deployment then follow the below sections to undo the configuration steps you have performed in section Configuring Authorization with Apache Ranger

    1. Revert Configuration of your Apache Ranger Deployment
    2. Revert Configuration of your DataHub Deployment

    Revert Configuration of your Apache Ranger Deployment

    For kubernetes example command, please replace the <ranger-pod-name> and <namespace> as per your environment.

    1. Delete ranger_datahub service: Login into the Privacera Portal and delete service ranger_datahub

      ranger_datahub service is shown in below screenshot:

      Privacera Portal DATAHUB screenshot

    2. Delete datahub plugin: Execute below curl command to delete datahub plugin Replace variables with corresponding values in curl command

      • <ranger-admin-username>
      • <ranger-admin-password>
      • <ranger-host>
      curl -u <ranger-admin-username>:<ranger-admin-password> -X DELETE -H "Accept: application/json" -H "Content-Type: application/json" http://<ranger-host>:6080/service/public/v2/api/servicedef/name/datahub
    3. Delete datahub plugin directory: Execute below command to delete the datahub plugin directory from Apache Ranger

      Docker command:

      docker exec privacera_ranger_1 rm -rf ews/webapp/WEB-INF/classes/ranger-plugins/datahub

      Kubernetes command:

      kubectl exec <ranger-pod-name> -n <namespace> -- sh -c 'rm -rf ews/webapp/WEB-INF/classes/ranger-plugins/datahub'

    Revert Configuration of your DataHub Deployment

    Docker

    1. Remove Apache Ranger Plugin entry: From config.yml file remove the entry which was added for Apache Ranger Plugin
    2. Redeploy DataHub (datahub-gms)

    Kubernetes

    For kubernetes example command, please replace the <namespace> as per your environment.

    1. Open deployment editor: Execute below command
        kubectl edit deployment datahub-datahub-gms -n <namespace>
    2. Remove below environments variables
      1. AUTH_POLICIES_ENABLED
      2. RANGER_AUTHORIZER_ENABLED
      3. RANGER_USERNAME
      4. RANGER_PASSWORD
    3. Remove below volumes related settings
      1. volumes
      2. volumeMounts
    4. Save and quit the editor and use below command to check status of datahub-datahub-gms deployment rollout
      kubectl rollout status deployment/datahub-datahub-gms -n <namespace>
      On successful rollout you should see a message deployment "datahub-datahub-gms" successfully rolled out

    Validating your Setup

    To verify that things are working as expected, we can test that the root datahub user has all Platform Privileges and is able to perform all operations: managing users & groups, creating domains, and more. To do this, simply log into your DataHub deployment via the root DataHub user.

    - + \ No newline at end of file diff --git a/docs/how/delete-metadata/index.html b/docs/how/delete-metadata/index.html index c63fd3abb4340..00ee470282851 100644 --- a/docs/how/delete-metadata/index.html +++ b/docs/how/delete-metadata/index.html @@ -8,14 +8,14 @@ - +

    Removing Metadata from DataHub

    tip

    To follow this guide, you'll need the DataHub CLI.

    There are a two ways to delete metadata from DataHub:

    1. Delete metadata attached to entities by providing a specific urn or filters that identify a set of urns (delete CLI).
    2. Delete metadata created by a single ingestion run (rollback).
    Be careful when deleting metadata
    • Always use --dry-run to test your delete command before executing it.
    • Prefer reversible soft deletes (--soft) over irreversible hard deletes (--hard).

    Delete CLI Usage

    info

    Deleting metadata using DataHub's CLI is a simple, systems-level action. If you attempt to delete an entity with children, such as a container, it will not delete those children. Instead, you will need to delete each child by URN in addition to deleting the parent.

    All the commands below support the following options:

    • -n/--dry-run: Execute a dry run instead of the actual delete.
    • --force: Skip confirmation prompts.

    Selecting entities to delete

    You can either provide a single urn to delete, or use filters to select a set of entities to delete.

    # Soft delete a single urn.
    datahub delete --urn "<my urn>"

    # Soft delete using a filter.
    datahub delete --platform snowflake

    # Filters can be combined, which will select entities that match all filters.
    datahub delete --platform looker --entity-type chart
    datahub delete --platform bigquery --env PROD

    When performing hard deletes, you can optionally add the --only-soft-deleted flag to only hard delete entities that were previously soft deleted.

    Performing the delete

    Soft delete an entity (default)

    By default, the delete command will perform a soft delete.

    This will set the status aspect's removed field to true, which will hide the entity from the UI. However, you'll still be able to view the entity's metadata in the UI with a direct link.

    # The `--soft` flag is redundant since it's the default.
    datahub delete --urn "<urn>" --soft
    # or using a filter
    datahub delete --platform snowflake --soft

    Hard delete an entity

    This will physically delete all rows for all aspects of the entity. This action cannot be undone, so execute this only after you are sure you want to delete all data associated with this entity.

    datahub delete --urn "<my urn>" --hard
    # or using a filter
    datahub delete --platform snowflake --hard

    As of datahub v0.10.2.3, hard deleting tags, glossary terms, users, and groups will also remove references to those entities across the metadata graph.

    Hard delete a timeseries aspect

    It's also possible to delete a range of timeseries aspect data for an entity without deleting the entire entity.

    For these deletes, the aspect and time ranges are required. You can delete all data for a timeseries aspect by providing --start-time min --end-time max.

    datahub delete --urn "<my urn>" --aspect <aspect name> --start-time '-30 days' --end-time '-7 days'
    # or using a filter
    datahub delete --platform snowflake --entity-type dataset --aspect datasetProfile --start-time '0' --end-time '2023-01-01'

    The start and end time fields filter on the timestampMillis field of the timeseries aspect. Allowed start and end times formats:

    • YYYY-MM-DD: a specific date
    • YYYY-MM-DD HH:mm:ss: a specific timestamp, assumed to be in UTC unless otherwise specified
    • +/-<number> <unit> (e.g. -7 days): a relative time, where <number> is an integer and <unit> is one of days, hours, minutes, seconds
    • ddddddddd (e.g. 1684384045): a unix timestamp
    • min, max, now: special keywords

    Delete CLI Examples

    note

    Make sure you surround your urn with quotes! If you do not include the quotes, your terminal may misinterpret the command.

    Note: All of the commands below support --dry-run and --force (skips confirmation prompts).

    Soft delete a single entity

    datahub delete --urn "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)"

    Hard delete a single entity

    datahub delete --urn "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)" --hard

    Delete everything from the Snowflake DEV environment

    datahub delete --platform snowflake --env DEV

    Delete all BigQuery datasets in the PROD environment

    # Note: this will leave BigQuery containers intact.
    datahub delete --env PROD --entity-type dataset --platform bigquery

    Delete all pipelines and tasks from Airflow

    datahub delete --platform "airflow"

    Delete all containers for a particular platform

    datahub delete --entity-type container --platform s3

    Delete everything in the DEV environment

    # This is a pretty broad filter, so make sure you know what you're doing!
    datahub delete --env DEV

    Delete all Looker dashboards and charts

    datahub delete --platform looker

    Delete all Looker charts (but not dashboards)

    datahub delete --platform looker --entity-type chart

    Clean up old datasetProfiles

    datahub delete --entity-type dataset --aspect datasetProfile --start-time 'min' --end-time '-60 days'

    Delete a tag

    # Soft delete.
    datahub delete --urn 'urn:li:tag:Legacy' --soft

    # Or, using a hard delete. This will automatically clean up all tag associations.
    datahub delete --urn 'urn:li:tag:Legacy' --hard

    Delete all datasets that match a query

    # Note: the query is an advanced feature, but can sometimes select extra entities - use it with caution!
    datahub delete --entity-type dataset --query "_tmp"

    Hard delete everything in Snowflake that was previously soft deleted

    datahub delete --platform snowflake --only-soft-deleted --hard

    Deletes using the SDK and APIs

    The Python SDK's DataHubGraph client supports deletes via the following methods:

    • soft_delete_entity
    • hard_delete_entity
    • hard_delete_timeseries_aspect

    Deletes via the REST API are also possible, although we recommend using the SDK instead.

    # hard delete an entity by urn
    curl "http://localhost:8080/entities?action=delete" -X POST --data '{"urn": "urn:li:dataset:(urn:li:dataPlatform:hive,fct_users_deleted,PROD)"}'

    Rollback Ingestion Run

    The second way to delete metadata is to identify entities (and the aspects affected) by using an ingestion run-id. Whenever you run datahub ingest -c ..., all the metadata ingested with that run will have the same run id.

    To view the ids of the most recent set of ingestion batches, execute

    datahub ingest list-runs

    That will print out a table of all the runs. Once you have an idea of which run you want to roll back, run

    datahub ingest show --run-id <run-id>

    to see more info of the run.

    Alternately, you can execute a dry-run rollback to achieve the same outcome.

    datahub ingest rollback --dry-run --run-id <run-id>

    Finally, once you are sure you want to delete this data forever, run

    datahub ingest rollback --run-id <run-id>

    to rollback all aspects added with this run and all entities created by this run. This deletes both the versioned and the timeseries aspects associated with these entities.

    Unsafe Entities and Rollback

    In some cases, entities that were initially ingested by a run might have had further modifications to their metadata (e.g. adding terms, tags, or documentation) through the UI or other means. During a roll back of the ingestion that initially created these entities (technically, if the key aspect for these entities are being rolled back), the ingestion process will analyse the metadata graph for aspects that will be left "dangling" and will:

    1. Leave these aspects untouched in the database, and soft delete the entity. A re-ingestion of these entities will result in this additional metadata becoming visible again in the UI, so you don't lose any of your work.
    2. The datahub cli will save information about these unsafe entities as a CSV for operators to later review and decide on next steps (keep or remove).

    The rollback command will report how many entities have such aspects and save as a CSV the urns of these entities under a rollback reports directory, which defaults to rollback_reports under the current directory where the cli is run, and can be configured further using the --reports-dir command line arg.

    The operator can use datahub get --urn <> to inspect the aspects that were left behind and either keep them (do nothing) or delete the entity (and its aspects) completely using datahub delete --urn <urn> --hard. If the operator wishes to remove all the metadata associated with these unsafe entities, they can re-issue the rollback command with the --nuke flag.

    - + \ No newline at end of file diff --git a/docs/how/extract-container-logs/index.html b/docs/how/extract-container-logs/index.html index 606bf3d718ca3..ec3fe558bc3f4 100644 --- a/docs/how/extract-container-logs/index.html +++ b/docs/how/extract-container-logs/index.html @@ -8,13 +8,13 @@ - +

    How to Extract Logs from DataHub Containers

    DataHub containers, datahub GMS (backend server) and datahub frontend (UI server), write log files to the local container filesystem. To extract these logs, you'll need to get them from inside the container where the services are running.

    You can do so easily using the Docker CLI if you're deploying with vanilla docker or compose, and kubectl if you're on K8s.

    Step 1: Find the id of the container you're interested in

    You'll first need to get the id of the container that you'd like to extract logs for. For example, datahub-gms.

    Docker & Docker Compose

    To do so, you can view all containers that Docker knows about by running the following command:

    johnjoyce@Johns-MBP datahub-fork % docker container ls
    CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
    6c4a280bc457 linkedin/datahub-frontend-react "datahub-frontend/bi…" 5 days ago Up 46 hours (healthy) 0.0.0.0:9002->9002/tcp datahub-frontend-react
    122a2488ab63 linkedin/datahub-gms "/bin/sh -c /datahub…" 5 days ago Up 5 days (healthy) 0.0.0.0:8080->8080/tcp datahub-gms
    7682dcc64afa confluentinc/cp-schema-registry:5.4.0 "/etc/confluent/dock…" 5 days ago Up 5 days 0.0.0.0:8081->8081/tcp schema-registry
    3680fcaef3ed confluentinc/cp-kafka:5.4.0 "/etc/confluent/dock…" 5 days ago Up 5 days 0.0.0.0:9092->9092/tcp, 0.0.0.0:29092->29092/tcp broker
    9d6730ddd4c4 neo4j:4.0.6 "/sbin/tini -g -- /d…" 5 days ago Up 5 days 0.0.0.0:7474->7474/tcp, 7473/tcp, 0.0.0.0:7687->7687/tcp neo4j
    c97edec663af confluentinc/cp-zookeeper:5.4.0 "/etc/confluent/dock…" 5 days ago Up 5 days 2888/tcp, 0.0.0.0:2181->2181/tcp, 3888/tcp zookeeper
    150ba161cf26 mysql:5.7 "docker-entrypoint.s…" 5 days ago Up 5 days 0.0.0.0:3306->3306/tcp, 33060/tcp mysql
    4b72a3eab73f elasticsearch:7.9.3 "/tini -- /usr/local…" 5 days ago Up 5 days (healthy) 0.0.0.0:9200->9200/tcp, 9300/tcp elasticsearch

    In this case, the container id we'd like to note is 122a2488ab63, which corresponds to the datahub-gms service.

    Kubernetes & Helm

    Find the name of the pod you're interested in using the following command:

    kubectl get pods

    ...
    default datahub-frontend-1231ead-6767 1/1 Running 0 42h
    default datahub-gms-c578b47cd-7676 1/1 Running 0 13d
    ...

    In this case the pod name we'd like to note is datahub-gms-c578b47cd-7676 , which contains the GMS backend service.

    Step 2: Find the log files

    The second step is to view all log files. Log files will live inside the container under the following directories for each service:

    • datahub-gms: /tmp/datahub/logs/gms
    • datahub-frontend: /tmp/datahub/logs/datahub-frontend

    There are 2 types of logs that are collected:

    1. Info Logs: These include info, warn, error log lines. They are what print to stdout when the container runs.
    2. Debug Logs: These files have shorter retention (past 1 day) but include more granular debug information from the DataHub code specifically. We ignore debug logs from external libraries that DataHub depends on.

    Docker & Docker Compose

    Since log files are named based on the current date, you'll need to use "ls" to see which files currently exist. To do so, you can use the docker exec command, using the container id recorded in step one:

    docker exec --privileged <container-id> <shell-command> 

    For example:

    johnjoyce@Johns-MBP datahub-fork % docker exec --privileged 122a2488ab63 ls -la /tmp/datahub/logs/gms 
    total 4664
    drwxr-xr-x 2 datahub datahub 4096 Jul 28 05:14 .
    drwxr-xr-x 3 datahub datahub 4096 Jul 23 08:37 ..
    -rw-r--r-- 1 datahub datahub 2001112 Jul 23 23:33 gms.2021-23-07-0.log
    -rw-r--r-- 1 datahub datahub 74343 Jul 24 20:29 gms.2021-24-07-0.log
    -rw-r--r-- 1 datahub datahub 70252 Jul 25 17:56 gms.2021-25-07-0.log
    -rw-r--r-- 1 datahub datahub 626985 Jul 26 23:36 gms.2021-26-07-0.log
    -rw-r--r-- 1 datahub datahub 712270 Jul 27 23:59 gms.2021-27-07-0.log
    -rw-r--r-- 1 datahub datahub 867707 Jul 27 23:59 gms.debug.2021-27-07-0.log
    -rw-r--r-- 1 datahub datahub 3563 Jul 28 05:26 gms.debug.log
    -rw-r--r-- 1 datahub datahub 382443 Jul 28 16:16 gms.log

    Depending on your issue, you may be interested to view both debug and normal info logs.

    Kubernetes & Helm

    Since log files are named based on the current date, you'll need to use "ls" to see which files currently exist. To do so, you can use the kubectl exec command, using the pod name recorded in step one:

    kubectl exec datahub-frontend-1231ead-6767 -n default -- ls -la /tmp/datahub/logs/gms

    total 36388
    drwxr-xr-x 2 datahub datahub 4096 Jul 29 07:45 .
    drwxr-xr-x 3 datahub datahub 17 Jul 15 08:47 ..
    -rw-r--r-- 1 datahub datahub 104548 Jul 15 22:24 gms.2021-15-07-0.log
    -rw-r--r-- 1 datahub datahub 12684 Jul 16 14:55 gms.2021-16-07-0.log
    -rw-r--r-- 1 datahub datahub 2482571 Jul 17 14:40 gms.2021-17-07-0.log
    -rw-r--r-- 1 datahub datahub 49120 Jul 18 14:31 gms.2021-18-07-0.log
    -rw-r--r-- 1 datahub datahub 14167 Jul 19 23:47 gms.2021-19-07-0.log
    -rw-r--r-- 1 datahub datahub 13255 Jul 20 22:22 gms.2021-20-07-0.log
    -rw-r--r-- 1 datahub datahub 668485 Jul 21 19:52 gms.2021-21-07-0.log
    -rw-r--r-- 1 datahub datahub 1448589 Jul 22 20:18 gms.2021-22-07-0.log
    -rw-r--r-- 1 datahub datahub 44187 Jul 23 13:51 gms.2021-23-07-0.log
    -rw-r--r-- 1 datahub datahub 14173 Jul 24 22:59 gms.2021-24-07-0.log
    -rw-r--r-- 1 datahub datahub 13263 Jul 25 21:11 gms.2021-25-07-0.log
    -rw-r--r-- 1 datahub datahub 13261 Jul 26 19:02 gms.2021-26-07-0.log
    -rw-r--r-- 1 datahub datahub 1118105 Jul 27 21:10 gms.2021-27-07-0.log
    -rw-r--r-- 1 datahub datahub 678423 Jul 28 23:57 gms.2021-28-07-0.log
    -rw-r--r-- 1 datahub datahub 1776274 Jul 28 07:19 gms.debug.2021-28-07-0.log
    -rw-r--r-- 1 datahub datahub 27576533 Jul 29 09:55 gms.debug.log
    -rw-r--r-- 1 datahub datahub 1195940 Jul 29 14:54 gms.log

    In the next step, we'll save specific log files to our local filesystem.

    Step 3: Save Container Log File to Local

    This step involves saving a copy of the container log files to your local filesystem for further investigation.

    Docker & Docker Compose

    Simply use the docker exec command to "cat" the log file(s) of interest and route them to a new file.

    docker exec --privileged 122a2488ab63 cat /tmp/datahub/logs/gms/gms.debug.log > my-local-log-file.log

    Now you should be able to view the logs locally.

    Kubernetes & Helm

    There are a few ways to get files out of the pod and into a local file. You can either use kubectl cp or simply cat and pipe the file of interest. We'll show an example using the latter approach:

    kubectl exec datahub-frontend-1231ead-6767 -n default -- cat /tmp/datahub/logs/gms/gms.log > my-local-gms.log
    - + \ No newline at end of file diff --git a/docs/how/jattach-guide/index.html b/docs/how/jattach-guide/index.html index c72533857a8ff..99c8eb681a702 100644 --- a/docs/how/jattach-guide/index.html +++ b/docs/how/jattach-guide/index.html @@ -8,7 +8,7 @@ - + @@ -34,7 +34,7 @@ behavior and performance.

    It's important to note that generating a thread dump file may impact the performance of the application. Therefore, caution should be exercised when using it in a production environment, ensuring sufficient resources and permissions are available for the operation.

    The command is as follows:

    jattach <pid> threaddump -l > /tmp/jstack.out
    - + \ No newline at end of file diff --git a/docs/how/kafka-config/index.html b/docs/how/kafka-config/index.html index ab2c10e0d33d3..8fb9ef286768a 100644 --- a/docs/how/kafka-config/index.html +++ b/docs/how/kafka-config/index.html @@ -8,7 +8,7 @@ - + @@ -30,7 +30,7 @@ not a configuration required for the producer. These WARN message can be safely ignored. Each of Datahub services are passed a full set of configuration but may not require all the configurations that are passed to them. These warn messages indicate that the service was passed a configuration that is not relevant to it and can be safely ignored.

    Other errors: Failed to start bean 'org.springframework.kafka.config.internalKafkaListenerEndpointRegistry'; nested exception is org.apache.kafka.common.errors.TopicAuthorizationException: Not authorized to access topics: [DataHubUsageEvent_v1]. Please check ranger permissions or kafka broker logs.

    - + \ No newline at end of file diff --git a/docs/how/migrating-graph-service-implementation/index.html b/docs/how/migrating-graph-service-implementation/index.html index 2e4e3833a8d95..1b15e70022b73 100644 --- a/docs/how/migrating-graph-service-implementation/index.html +++ b/docs/how/migrating-graph-service-implementation/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ values.yaml of datahub.

    See the deployment helm guide for more details on how to set up your helm deployment.

    Finally, follow the restore-indices helm guide to re-build your graph index.

    Once the job completes, your data will be migrated.

    - + \ No newline at end of file diff --git a/docs/how/restore-indices/index.html b/docs/how/restore-indices/index.html index 57092f7062b03..6416e0c87f057 100644 --- a/docs/how/restore-indices/index.html +++ b/docs/how/restore-indices/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ MAE events corresponding to the aspects to restore the search and graph indices.

    Quickstart

    If you're using the quickstart images, you can use the datahub cli to restore indices.

    datahub docker quickstart --restore-indices

    See this section for more information.

    Docker-compose

    If you are on a custom docker-compose deployment, run the following command (you need to checkout the source repository) from the root of the repo to send MAE for each aspect in the Local DB.

    ./docker/datahub-upgrade/datahub-upgrade.sh -u RestoreIndices

    If you need to clear the search and graph indices before restoring, add -a clean to the end of the command.

    Refer to this doc on how to set environment variables for your environment.

    Kubernetes

    Run kubectl get cronjobs to see if the restoration job template has been deployed. If you see results like below, you are good to go.

    NAME                                          SCHEDULE    SUSPEND   ACTIVE   LAST SCHEDULE   AGE
    datahub-datahub-cleanup-job-template * * * * * True 0 <none> 2d3h
    datahub-datahub-restore-indices-job-template * * * * * True 0 <none> 2d3h

    If not, deploy latest helm charts to use this functionality.

    Once restore indices job template has been deployed, run the following command to start a job that restores indices.

    kubectl create job --from=cronjob/datahub-datahub-restore-indices-job-template datahub-restore-indices-adhoc

    Once the job completes, your indices will have been restored.

    Through API

    See Restore Indices API.

    - + \ No newline at end of file diff --git a/docs/how/search/index.html b/docs/how/search/index.html index 3bc3cc1758d46..0e3ea76dfda64 100644 --- a/docs/how/search/index.html +++ b/docs/how/search/index.html @@ -8,7 +8,7 @@ - + @@ -42,7 +42,7 @@ However, it does not tell you the specific attribute name to use for specialized searches. One way to do so is to inspect the ElasticSearch indices, for example:
    curl http://localhost:9200/_cat/indices returns all the ES indices in the ElasticSearch container.

    yellow open chartindex_v2_1643510690325                           bQO_RSiCSUiKJYsmJClsew 1 1   2 0   8.5kb   8.5kb
    yellow open mlmodelgroupindex_v2_1643510678529 OjIy0wb7RyKqLz3uTENRHQ 1 1 0 0 208b 208b
    yellow open dataprocessindex_v2_1643510676831 2w-IHpuiTUCs6e6gumpYHA 1 1 0 0 208b 208b
    yellow open corpgroupindex_v2_1643510673894 O7myCFlqQWKNtgsldzBS6g 1 1 3 0 16.8kb 16.8kb
    yellow open corpuserindex_v2_1643510672335 0rIe_uIQTjme5Wy61MFbaw 1 1 6 2 32.4kb 32.4kb
    yellow open datasetindex_v2_1643510688970 bjBfUEswSoSqPi3BP4iqjw 1 1 15 0 29.2kb 29.2kb
    yellow open dataflowindex_v2_1643510681607 N8CMlRFvQ42rnYMVDaQJ2g 1 1 1 0 10.2kb 10.2kb
    yellow open dataset_datasetusagestatisticsaspect_v1_1643510694706 kdqvqMYLRWq1oZt1pcAsXQ 1 1 4 0 8.9kb 8.9kb
    yellow open .ds-datahub_usage_event-000003 YMVcU8sHTFilUwyI4CWJJg 1 1 186 0 203.9kb 203.9kb
    yellow open datajob_datahubingestioncheckpointaspect_v1 nTXJf7C1Q3GoaIJ71gONxw 1 1 0 0 208b 208b
    yellow open .ds-datahub_usage_event-000004 XRFwisRPSJuSr6UVmmsCsg 1 1 196 0 165.5kb 165.5kb
    yellow open .ds-datahub_usage_event-000005 d0O6l5wIRLOyG6iIfAISGw 1 1 77 0 108.1kb 108.1kb
    yellow open dataplatformindex_v2_1643510671426 _4SIIhfAT8yq_WROufunXA 1 1 0 0 208b 208b
    yellow open mlmodeldeploymentindex_v2_1643510670629 n81eJIypSp2Qx-fpjZHgRw 1 1 0 0 208b 208b
    yellow open .ds-datahub_usage_event-000006 oyrWKndjQ-a8Rt1IMD9aSA 1 1 143 0 127.1kb 127.1kb
    yellow open mlfeaturetableindex_v2_1643510677164 iEXPt637S1OcilXpxPNYHw 1 1 5 0 8.9kb 8.9kb
    yellow open .ds-datahub_usage_event-000001 S9EnGj64TEW8O3sLUb9I2Q 1 1 257 0 163.9kb 163.9kb
    yellow open .ds-datahub_usage_event-000002 2xJyvKG_RYGwJOG9yq8pJw 1 1 44 0 155.4kb 155.4kb
    yellow open dataset_datasetprofileaspect_v1_1643510693373 uahwTHGRRAC7w1c2VqVy8g 1 1 31 0 18.9kb 18.9kb
    yellow open mlprimarykeyindex_v2_1643510687579 MUcmT8ASSASzEpLL98vrWg 1 1 7 0 9.5kb 9.5kb
    yellow open glossarytermindex_v2_1643510686127 cQL8Pg6uQeKfMly9GPhgFQ 1 1 3 0 10kb 10kb
    yellow open datajob_datahubingestionrunsummaryaspect_v1 rk22mIsDQ02-52MpWLm1DA 1 1 0 0 208b 208b
    yellow open mlmodelindex_v2_1643510675399 gk-WSTVjRZmkDU5ggeFSqg 1 1 1 0 10.3kb 10.3kb
    yellow open dashboardindex_v2_1643510691686 PQjSaGhTRqWW6zYjcqXo6Q 1 1 1 0 8.7kb 8.7kb
    yellow open datahubpolicyindex_v2_1643510671774 ZyTrYx3-Q1e-7dYq1kn5Gg 1 1 0 0 208b 208b
    yellow open datajobindex_v2_1643510682977 K-rbEyjBS6ew5uOQQS4sPw 1 1 2 0 11.3kb 11.3kb
    yellow open datahubretentionindex_v2 8XrQTPwRTX278mx1SrNwZA 1 1 0 0 208b 208b
    yellow open glossarynodeindex_v2_1643510678826 Y3_bCz0YR2KPwCrrVngDdA 1 1 1 0 7.4kb 7.4kb
    yellow open system_metadata_service_v1 36spEDbDTdKgVlSjE8t-Jw 1 1 387 8 63.2kb 63.2kb
    yellow open schemafieldindex_v2_1643510684410 tZ1gC3haTReRLmpCxirVxQ 1 1 0 0 208b 208b
    yellow open mlfeatureindex_v2_1643510680246 aQO5HF0mT62Znn-oIWBC8A 1 1 20 0 17.4kb 17.4kb
    yellow open tagindex_v2_1643510684785 PfnUdCUORY2fnF3I3W7HwA 1 1 3 1 18.6kb 18.6kb

    The index name will vary from instance to instance. Indexed information about Datasets can be found in:
    curl http://localhost:9200/datasetindex_v2_1643510688970/_search?=pretty

    example information of a dataset:

    {
    "_index" : "datasetindex_v2_1643510688970",
    "_type" : "_doc",
    "_id" : "urn%3Ali%3Adataset%3A%28urn%3Ali%3AdataPlatform%3Akafka%2CSampleKafkaDataset%2CPROD%29",
    "_score" : 1.0,
    "_source" : {
    "urn" : "urn:li:dataset:(urn:li:dataPlatform:kafka,SampleKafkaDataset,PROD)",
    "name" : "SampleKafkaDataset",
    "browsePaths" : [
    "/prod/kafka/SampleKafkaDataset"
    ],
    "origin" : "PROD",
    "customProperties" : [
    "prop2=pikachu",
    "prop1=fakeprop"
    ],
    "hasDescription" : false,
    "hasOwners" : true,
    "owners" : [
    "urn:li:corpuser:jdoe",
    "urn:li:corpuser:datahub"
    ],
    "fieldPaths" : [
    "[version=2.0].[type=boolean].field_foo_2",
    "[version=2.0].[type=boolean].field_bar",
    "[version=2.0].[key=True].[type=int].id"
    ],
    "fieldGlossaryTerms" : [ ],
    "fieldDescriptions" : [
    "Foo field description",
    "Bar field description",
    "Id specifying which partition the message should go to"
    ],
    "fieldTags" : [
    "urn:li:tag:NeedsDocumentation"
    ],
    "platform" : "urn:li:dataPlatform:kafka"
    }
    },

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/how/ui-tabs-guide/index.html b/docs/how/ui-tabs-guide/index.html index c42f12fef0922..5af28dc719f0b 100644 --- a/docs/how/ui-tabs-guide/index.html +++ b/docs/how/ui-tabs-guide/index.html @@ -8,13 +8,13 @@ - +

    UI Tabs Guide

    Some of the tabs in the UI might not be enabled by default. This guide is supposed to tell Admins of DataHub how to enable those UI tabs.

    Datasets

    Stats and Queries Tab

    To enable these tabs you need to use one of the usage sources which gets the relevant metadata from your sources and ingests them into DataHub. These usage sources are listed under other sources which support them e.g. Snowflake source, BigQuery source

    Validation Tab

    This tab is enabled if you use Data Quality Integration with Great Expectations.

    Common to multiple entities

    Properties Tab

    Properties are a catch-all bag for metadata not captured in other aspects stored for a Dataset. These are populated via the various source connectors when metadata is ingested.

    - + \ No newline at end of file diff --git a/docs/how/updating-datahub/index.html b/docs/how/updating-datahub/index.html index 65f7f8f57d502..c48a41fce2aff 100644 --- a/docs/how/updating-datahub/index.html +++ b/docs/how/updating-datahub/index.html @@ -8,7 +8,7 @@ - + @@ -36,7 +36,7 @@ This version contains changes to support running ingestion in debug mode. Previous versions are not compatible with this release. Upgrading to helm chart version 0.2.103 will ensure that you have the compatible versions by default.

    Deprecations

    Other notable Changes

    v0.8.42

    Breaking Changes

    • Python 3.6 is no longer supported for metadata ingestion
    • #5451 GMS_HOST and GMS_PORT environment variables deprecated in v0.8.39 have been removed. Use DATAHUB_GMS_HOST and DATAHUB_GMS_PORT instead.
    • #5478 DataHub CLI delete command when used with --hard option will delete soft-deleted entities which match the other filters given.
    • #5471 Looker now populates userEmail in dashboard user usage stats. This version of looker connnector will not work with older version of datahub-gms if you have extract_usage_history looker config enabled.
    • #5529 - ANALYTICS_ENABLED environment variable in datahub-gms is now deprecated. Use DATAHUB_ANALYTICS_ENABLED instead.
    • #5485 --include-removed option was removed from delete CLI

    Potential Downtime

    Deprecations

    Other notable Changes

    v0.8.41

    Breaking Changes

    • The should_overwrite flag in csv-enricher has been replaced with write_semantics to match the format used for other sources. See the documentation for more details

    • Closing an authorization hole in creating tags adding a Platform Privilege called Create Tags for creating tags. This is assigned to datahub root user, along with default All Users policy. Notice: You may need to add this privilege (or Manage Tags) to existing users that need the ability to create tags on the platform.

    • #5329 Below profiling config parameters are now supported in BigQuery:

      • profiling.profile_if_updated_since_days (default=1)
      • profiling.profile_table_size_limit (default=1GB)
      • profiling.profile_table_row_limit (default=50000)

      Set above parameters to null if you want older behaviour.

    Potential Downtime

    Deprecations

    Other notable Changes

    v0.8.40

    Breaking Changes

    • #5240 lineage_client_project_id in bigquery source is removed. Use storage_project_id instead.

    Potential Downtime

    Deprecations

    Other notable Changes

    v0.8.39

    Breaking Changes

    • Refactored the health field of the Dataset GraphQL Type to be of type list of HealthStatus (was type HealthStatus). See this PR for more details.

    Potential Downtime

    Deprecations

    • #4875 Lookml view file contents will no longer be populated in custom_properties, instead view definitions will be always available in the View Definitions tab.
    • #5208 GMS_HOST and GMS_PORT environment variables being set in various containers are deprecated in favour of DATAHUB_GMS_HOST and DATAHUB_GMS_PORT.
    • KAFKA_TOPIC_NAME environment variable in datahub-mae-consumer and datahub-gms is now deprecated. Use METADATA_AUDIT_EVENT_NAME instead.
    • KAFKA_MCE_TOPIC_NAME environment variable in datahub-mce-consumer and datahub-gms is now deprecated. Use METADATA_CHANGE_EVENT_NAME instead.
    • KAFKA_FMCE_TOPIC_NAME environment variable in datahub-mce-consumer and datahub-gms is now deprecated. Use FAILED_METADATA_CHANGE_EVENT_NAME instead.

    Other notable Changes

    • #5132 Profile tables in snowflake source only if they have been updated since configured (default: 1) number of day(s). Update the config profiling.profile_if_updated_since_days as per your profiling schedule or set it to None if you want older behaviour.

    v0.8.38

    Breaking Changes

    Potential Downtime

    Deprecations

    Other notable Changes

    • Create & Revoke Access Tokens via the UI
    • Create and Manage new users via the UI
    • Improvements to Business Glossary UI
    • FIX - Do not require reindexing to migrate to using the UI business glossary

    v0.8.36

    Breaking Changes

    • In this release we introduce a brand new Business Glossary experience. With this new experience comes some new ways of indexing data in order to make viewing and traversing the different levels of your Glossary possible. Therefore, you will have to restore your indices in order for the new Glossary experience to work for users that already have existing Glossaries. If this is your first time using DataHub Glossaries, you're all set!

    Potential Downtime

    Deprecations

    Other notable Changes

    • #4961 Dropped profiling is not reported by default as that caused a lot of spurious logging in some cases. Set profiling.report_dropped_profiles to True if you want older behaviour.

    v0.8.35

    Breaking Changes

    Potential Downtime

    Deprecations

    • #4875 Lookml view file contents will no longer be populated in custom_properties, instead view definitions will be always available in the View Definitions tab.

    Other notable Changes

    v0.8.34

    Breaking Changes

    • #4644 Remove database option from snowflake source which was deprecated since v0.8.5
    • #4595 Rename confusing config report_upstream_lineage to upstream_lineage_in_report in snowflake connector which was added in 0.8.32

    Potential Downtime

    Deprecations

    • #4644 host_port option of snowflake and snowflake-usage sources deprecated as the name was confusing. Use account_id option instead.

    Other notable Changes

    • #4760 check_role_grants option was added in snowflake to disable checking roles in snowflake as some people were reporting long run times when checking roles.
    - + \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index 5fd93bb46a830..7e38b0bccfc63 100644 --- a/docs/index.html +++ b/docs/index.html @@ -8,13 +8,13 @@ - + - + \ No newline at end of file diff --git a/docs/introduction/index.html b/docs/introduction/index.html index e023282f60fa6..6b9b41a25024d 100644 --- a/docs/introduction/index.html +++ b/docs/introduction/index.html @@ -8,7 +8,7 @@ - + @@ -30,7 +30,7 @@ Demo | Town Hall


    📣 DataHub Town Hall is the 4th Thursday at 9am US PT of every month - add it to your calendar!

    ✨ DataHub Community Highlights:

    Introduction

    DataHub is an open-source metadata platform for the modern data stack. Read about the architectures of different metadata systems and why DataHub excels here. Also read our LinkedIn Engineering blog post, check out our Strata presentation and watch our Crunch Conference Talk. You should also visit DataHub Architecture to get a better understanding of how DataHub is implemented.

    Features & Roadmap

    Check out DataHub's Features & Roadmap.

    Demo and Screenshots

    There's a hosted demo environment courtesy of Acryl Data where you can explore DataHub without installing it locally

    Quickstart

    Please follow the DataHub Quickstart Guide to get a copy of DataHub up & running locally using Docker. As the guide assumes some basic knowledge of Docker, we'd recommend you to go through the "Hello World" example of A Docker Tutorial for Beginners if Docker is completely foreign to you.

    Development

    If you're looking to build & modify datahub please take a look at our Development Guide.

    DataHub Demo GIF

    Source Code and Repositories

    • datahub-project/datahub: This repository contains the complete source code for DataHub's metadata model, metadata services, integration connectors and the web application.
    • acryldata/datahub-actions: DataHub Actions is a framework for responding to changes to your DataHub Metadata Graph in real time.
    • acryldata/datahub-helm: Repository of helm charts for deploying DataHub on a Kubernetes cluster
    • acryldata/meta-world: A repository to store recipes, custom sources, transformations and other things to make your DataHub experience magical
    • dbt-impact-action : This repository contains a github action for commenting on your PRs with a summary of the impact of changes within a dbt project
    • datahub-tools : Additional python tools to interact with the DataHub GraphQL endpoints, built by Notion
    • business-glossary-sync-action : This repository contains a github action that opens PRs to update your business glossary yaml file.

    Releases

    See Releases page for more details. We follow the SemVer Specification when versioning the releases and adopt the Keep a Changelog convention for the changelog format.

    Contributing

    We welcome contributions from the community. Please refer to our Contributing Guidelines for more details. We also have a contrib directory for incubating experimental features.

    Community

    Join our Slack workspace for discussions and important announcements. You can also find out more about our upcoming town hall meetings and view past recordings.

    Adoption

    Here are the companies that have officially adopted DataHub. Please feel free to add yours to the list if we missed it.

    Select Articles & Talks

    See the full list here.

    License

    Apache License 2.0.

    - + \ No newline at end of file diff --git a/docs/lineage/airflow/index.html b/docs/lineage/airflow/index.html index 5264b55c19f96..3756b18351326 100644 --- a/docs/lineage/airflow/index.html +++ b/docs/lineage/airflow/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ On MWAA you should add this config to your Apache Airflow configuration options.
    airflow.cfg
    [core]
    lazy_load_plugins = False
    1. You must configure an Airflow hook for Datahub. We support both a Datahub REST hook and a Kafka-based hook, but you only need one.

      # For REST-based:
      airflow connections add --conn-type 'datahub_rest' 'datahub_rest_default' --conn-host 'http://datahub-gms:8080' --conn-password '<optional datahub auth token>'
      # For Kafka-based (standard Kafka sink config can be passed via extras):
      airflow connections add --conn-type 'datahub_kafka' 'datahub_kafka_default' --conn-host 'broker:9092' --conn-extra '{}'
    2. Add your datahub_conn_id and/or cluster to your airflow.cfg file if it is not align with the default values. See configuration parameters below

      Configuration options:

      NameDefault valueDescription
      datahub.enabledtrueIf the plugin should be enabled.
      datahub.conn_iddatahub_rest_defaultThe name of the datahub connection you set in step 1.
      datahub.clusterprodname of the airflow cluster
      datahub.capture_ownership_infotrueIf true, the owners field of the DAG will be capture as a DataHub corpuser.
      datahub.capture_tags_infotrueIf true, the tags field of the DAG will be captured as DataHub tags.
      datahub.capture_executionstrueIf true, we'll capture task runs in DataHub in addition to DAG definitions.
      datahub.graceful_exceptionstrueIf set to true, most runtime errors in the lineage backend will be suppressed and will not cause the overall task to fail. Note that configuration issues will still throw exceptions.
    3. Configure inlets and outlets for your Airflow operators. For reference, look at the sample DAG in lineage_backend_demo.py, or reference lineage_backend_taskflow_demo.py if you're using the TaskFlow API.

    4. [optional] Learn more about Airflow lineage, including shorthand notation and some automation.

    How to validate installation

    1. Go and check in Airflow at Admin -> Plugins menu if you can see the DataHub plugin
    2. Run an Airflow DAG. In the task logs, you should see Datahub related log messages like:
    Emitting DataHub ...

    Emitting lineage via a custom operator to the Airflow Plugin

    If you have created a custom Airflow operator docs that inherits from the BaseOperator class, when overriding the execute function, set inlets and outlets via context['ti'].task.inlets and context['ti'].task.outlets. The DataHub Airflow plugin will then pick up those inlets and outlets after the task runs.

    class DbtOperator(BaseOperator):
    ...

    def execute(self, context):
    # do something
    inlets, outlets = self._get_lineage()
    # inlets/outlets are lists of either datahub_provider.entities.Dataset or datahub_provider.entities.Urn
    context['ti'].task.inlets = self.inlets
    context['ti'].task.outlets = self.outlets

    def _get_lineage(self):
    # Do some processing to get inlets/outlets

    return inlets, outlets

    If you override the pre_execute and post_execute function, ensure they include the @prepare_lineage and @apply_lineage decorators respectively. source

    Using DataHub's Airflow lineage backend (deprecated)

    caution

    The DataHub Airflow plugin (above) is the recommended way to integrate Airflow with DataHub. For managed services like MWAA, the lineage backend is not supported and so you must use the Airflow plugin.

    If you're using Airflow 1.x, we recommend using the Airflow lineage backend with acryl-datahub <= 0.9.1.0.

    note

    If you are looking to run Airflow and DataHub using docker locally, follow the guide here. Otherwise proceed to follow the instructions below.

    Setting up Airflow to use DataHub as Lineage Backend

    1. You need to install the required dependency in your airflow. See https://registry.astronomer.io/providers/datahub/modules/datahublineagebackend
    pip install acryl-datahub[airflow]
    # If you need the Kafka-based emitter/hook:
    pip install acryl-datahub[airflow,datahub-kafka]
    1. You must configure an Airflow hook for Datahub. We support both a Datahub REST hook and a Kafka-based hook, but you only need one.

      # For REST-based:
      airflow connections add --conn-type 'datahub_rest' 'datahub_rest_default' --conn-host 'http://datahub-gms:8080' --conn-password '<optional datahub auth token>'
      # For Kafka-based (standard Kafka sink config can be passed via extras):
      airflow connections add --conn-type 'datahub_kafka' 'datahub_kafka_default' --conn-host 'broker:9092' --conn-extra '{}'
    2. Add the following lines to your airflow.cfg file.

      airflow.cfg
      [lineage]
      backend = datahub_provider.lineage.datahub.DatahubLineageBackend
      datahub_kwargs = {
      "enabled": true,
      "datahub_conn_id": "datahub_rest_default",
      "cluster": "prod",
      "capture_ownership_info": true,
      "capture_tags_info": true,
      "graceful_exceptions": true }
      # The above indentation is important!

      Configuration options:

      • datahub_conn_id (required): Usually datahub_rest_default or datahub_kafka_default, depending on what you named the connection in step 1.
      • cluster (defaults to "prod"): The "cluster" to associate Airflow DAGs and tasks with.
      • capture_ownership_info (defaults to true): If true, the owners field of the DAG will be capture as a DataHub corpuser.
      • capture_tags_info (defaults to true): If true, the tags field of the DAG will be captured as DataHub tags.
      • capture_executions (defaults to false): If true, it captures task runs as DataHub DataProcessInstances.
      • graceful_exceptions (defaults to true): If set to true, most runtime errors in the lineage backend will be suppressed and will not cause the overall task to fail. Note that configuration issues will still throw exceptions.
    3. Configure inlets and outlets for your Airflow operators. For reference, look at the sample DAG in lineage_backend_demo.py, or reference lineage_backend_taskflow_demo.py if you're using the TaskFlow API.

    4. [optional] Learn more about Airflow lineage, including shorthand notation and some automation.

    Emitting lineage via a separate operator

    Take a look at this sample DAG:

    In order to use this example, you must first configure the Datahub hook. Like in ingestion, we support a Datahub REST hook and a Kafka-based hook. See step 1 above for details.

    Debugging

    Incorrect URLs

    If your URLs aren't being generated correctly (usually they'll start with http://localhost:8080 instead of the correct hostname), you may need to set the webserver base_url config.

    airflow.cfg
    [webserver]
    base_url = http://airflow.example.com

    Additional references

    Related Datahub videos:

    - + \ No newline at end of file diff --git a/docs/lineage/lineage-feature-guide/index.html b/docs/lineage/lineage-feature-guide/index.html index 516644138f2f5..7028b7bf6a788 100644 --- a/docs/lineage/lineage-feature-guide/index.html +++ b/docs/lineage/lineage-feature-guide/index.html @@ -8,14 +8,14 @@ - +

    About DataHub Lineage

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Lineage is used to capture data dependencies within an organization. It allows you to track the inputs from which a data asset is derived, along with the data assets that depend on it downstream.

    If you're using an ingestion source that supports extraction of Lineage (e.g. the "Table Lineage Capability"), then lineage information can be extracted automatically. For detailed instructions, refer to the source documentation for the source you are using. If you are not using a Lineage-support ingestion source, you can programmatically emit lineage edges between entities via API.

    Alternatively, as of v0.9.5, DataHub supports the manual editing of lineage between entities. Data experts are free to add or remove upstream and downstream lineage edges in both the Lineage Visualization screen as well as the Lineage tab on entity pages. Use this feature to supplement automatic lineage extraction or establish important entity relationships in sources that do not support automatic extraction. Editing lineage by hand is supported for Datasets, Charts, Dashboards, and Data Jobs.

    note

    Lineage added by hand and programmatically may conflict with one another to cause unwanted overwrites. It is strongly recommend that lineage is edited manually in cases where lineage information is not also extracted in automated fashion, e.g. by running an ingestion source.

    Types of lineage connections supported in DataHub are:

    • Dataset-to-dataset
    • Pipeline lineage (dataset-to-job-to-dataset)
    • Dashboard-to-chart lineage
    • Chart-to-dataset lineage
    • Job-to-dataflow (dbt lineage)

    Lineage Setup, Prerequisites, and Permissions

    To edit lineage for an entity, you'll need the following Metadata Privilege:

    • Edit Lineage metadata privilege to edit lineage at the entity level

    It is important to know that the Edit Lineage privilege is required for all entities whose lineage is affected by the changes. For example, in order to add "Dataset B" as an upstream dependency of "Dataset A", you'll need the Edit Lineage privilege for both Dataset A and Dataset B.

    Managing Lineage via the DataHub UI

    Viewing lineage on the Datahub UI

    The UI shows the latest version of the lineage. The time picker can be used to filter out edges within the latest version to exclude those that were last updated outside of the time window. Selecting time windows in the patch will not show you historical lineages. It will only filter the view of the latest version of the lineage.

    Editing from Lineage Graph View

    The first place that you can edit lineage for entities is from the Lineage Visualization screen. Click on the "Lineage" button on the top right of an entity's profile to get to this view.

    Once you find the entity that you want to edit the lineage of, click on the three-dot menu dropdown to select whether you want to edit lineage in the upstream direction or the downstream direction.

    If you want to edit upstream lineage for entities downstream of the center node or downstream lineage for entities upstream of the center node, you can simply re-center to focus on the node you want to edit. Once focused on the desired node, you can edit lineage in either direction.

    Adding Lineage Edges

    Once you click "Edit Upstream" or "Edit Downstream," a modal will open that allows you to manage lineage for the selected entity in the chosen direction. In order to add a lineage edge to a new entity, search for it by name in the provided search bar and select it. Once you're satisfied with everything you've added, click "Save Changes." If you change your mind, you can always cancel or exit without saving the changes you've made.

    Removing Lineage Edges

    You can remove lineage edges from the same modal used to add lineage edges. Find the edge(s) that you want to remove, and click the "X" on the right side of it. And just like adding, you need to click "Save Changes" to save and if you exit without saving, your changes won't be applied.

    Reviewing Changes

    Any time lineage is edited manually, we keep track of who made the change and when they made it. You can see this information in the modal where you add and remove edges. If an edge was added manually, a user avatar will be in line with the edge that was added. You can hover over this avatar in order to see who added it and when.

    Editing from Lineage Tab

    The other place that you can edit lineage for entities is from the Lineage Tab on an entity's profile. Click on the "Lineage" tab in an entity's profile and then find the "Edit" dropdown that allows you to edit upstream or downstream lineage for the given entity.

    Using the modal from this view will work the same as described above for editing from the Lineage Visualization screen.

    Managing Lineage via API

    note

    When you emit any lineage aspect, the existing aspect gets completely overwritten, unless specifically using patch semantics. This means that the latest version visible in the UI will be your version.

    Using Dataset-to-Dataset Lineage

    This relationship model uses dataset -> dataset connection through the UpstreamLineage aspect in the Dataset entity.

    Here are a few samples for the usage of this type of lineage:

    Using dbt Lineage

    This model captures dbt specific nodes (tables, views, etc.) and

    • uses datasets as the base entity type and
    • extends subclass datasets for each dbt-specific concept, and
    • links them together for dataset-to-dataset lineage

    Here is a sample usage of this lineage:

    Using Pipeline Lineage

    The relationship model for this is datajob-to-dataset through the dataJobInputOutput aspect in the DataJob entity.

    For Airflow, this lineage is supported using Airflow’s lineage backend which allows you to specify the inputs to and output from that task.

    If you annotate that on your task we can pick up that information and push that as lineage edges into datahub automatically. You can install this package from Airflow’s Astronomer marketplace here.

    Here are a few samples for the usage of this type of lineage:

    Using Dashboard-to-Chart Lineage

    This relationship model uses the dashboardInfo aspect of the Dashboard entity and models an explicit edge between a dashboard and a chart (such that charts can be attached to multiple dashboards).

    Here is a sample usage of this lineage:

    Using Chart-to-Dataset Lineage

    This relationship model uses the chartInfo aspect of the Chart entity.

    Here is a sample usage of this lineage:

    Additional Resources

    Videos

    DataHub Basics: Lineage 101

    DataHub November 2022 Town Hall - Including Manual Lineage Demo

    GraphQL

    Examples

    Updating Lineage

    mutation updateLineage {
    updateLineage(input: {
    edgesToAdd: [
    {
    downstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:kafka,SampleKafkaDataset,PROD)",
    upstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:datahub,Dataset,PROD)"
    }
    ],
    edgesToRemove: [
    {
    downstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:hdfs,SampleHdfsDataset,PROD)",
    upstreamUrn: "urn:li:dataset:(urn:li:dataPlatform:kafka,SampleKafkaDataset,PROD)"
    }
    ]
    })
    }

    DataHub Blog

    FAQ and Troubleshooting

    The Lineage Tab is greyed out - why can’t I click on it?

    This means you have not yet ingested lineage metadata for that entity. Please ingest lineage to proceed.

    Are there any recommended practices for emitting lineage?

    We recommend emitting aspects as MetadataChangeProposalWrapper over emitting them via the MetadataChangeEvent.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/links/index.html b/docs/links/index.html index 023c46cb8aa9f..77517a9921e0f 100644 --- a/docs/links/index.html +++ b/docs/links/index.html @@ -8,13 +8,13 @@ - +

    Articles & Talks

    Overviews

    Best Practices

    Case Studies

    Talks & Presentations

    Non-English

    - + \ No newline at end of file diff --git a/docs/managed-datahub/approval-workflows/index.html b/docs/managed-datahub/approval-workflows/index.html index c0ed0f3b555a5..2ee74c34d79a9 100644 --- a/docs/managed-datahub/approval-workflows/index.html +++ b/docs/managed-datahub/approval-workflows/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ Today, this only applies for Dataset Metadata Entities, which have a "fields" sub-resource. In this case, the subResource value would be the field path for the schema field.

    Finding a Tag or Glossary Term Identifier

    Tags and Glossary Terms are also uniquely identified by an URN.

    Tag URNs have the following format: urn:li:tag:<id>

    Glossary Term URNs have the following format: urn:li:glossaryTerm:<id>

    These full identifiers can be copied from the entity pages of the Tag or Glossary Term.

    Issuing a GraphQL Query

    Once we've constructed an Entity URN, any relevant sub-resource identifiers, and a Tag or Term URN, we're ready to propose! To do so, we'll use the DataHub GraphQL API.

    In particular, we'll be using the proposeTag, proposeGlossaryTerm, and proposeUpdateDescription Mutations, which have the following interface:

    type Mutation {
    proposeTerm(input: TermAssociationInput!): String! # Returns Proposal URN.
    }

    input TermAssociationInput {
    resourceUrn: String! # Required. e.g. "urn:li:dataset:(...)"
    subResource: String # Optional. e.g. "fieldName"
    subResourceType: String # Optional. "DATASET_FIELD" for dataset fields
    term: String! # Required. e.g. "urn:li:tag:Marketing"
    }
    type Mutation {
    proposeTag(input: TagAssociationInput!): String! # Returns Proposal URN.
    }

    input TagAssociationInput {
    resourceUrn: String! # Required. e.g. "urn:li:dataset:(...)" subResource: String # Optional. e.g. "fieldName"
    subResourceType: String # Optional. "DATASET_FIELD" for dataset fields
    tagUrn: String! # Required. e.g. "urn:li:tag:Marketing"
    }
    mutation proposeUpdateDescription($input: DescriptionUpdateInput!) {
    proposeUpdateDescription(input: $input)
    }

    """
    Currently supports updates to Glossary Term descriptions only
    """
    input DescriptionUpdateInput {
    description: String! # the new description

    resourceUrn: String!

    subResourceType: SubResourceType

    subResource: String
    }

    Additional Resources

    Permissions

    To create & manage metadata proposals, certain access policies or roles are required.

    Privileges for Creating Proposals

    To create a new proposal one of these Metadata privileges are required. All roles have these priveleges by default.

    • Propose Tags - Allows to propose tags at the Entity level
    • Propose Dataset Column Tags - Allows to propose tags at the Dataset Field level
    • Propose Glossary Terms - Allows to propose terms at the Entity level
    • Propose Dataset Column Glossary Terms - Allows to propose terms at the Dataset Field level

    To be able to see the Proposals Tab you need the "View Metadata Proposals" PLATFORM privilege

    Privileges for Managing Proposals

    To be able to approve or deny proposals you need one of the following Metadata privileges. Admin and Editor roles already have these by default.

    • Manage Tag Proposals
    • Manage Glossary Term Proposals
    • Manage Dataset Column Tag Proposals
    • Manage Dataset Column Term Proposals

    These map directly to the 4 privileges for doing the proposals.

    To be able to approve or deny proposals to the glossary itself, you just need one permission:

    • Manage Glossaries

    Videos

    - + \ No newline at end of file diff --git a/docs/managed-datahub/chrome-extension/index.html b/docs/managed-datahub/chrome-extension/index.html index 1c5dfd701eb11..304dd27d32543 100644 --- a/docs/managed-datahub/chrome-extension/index.html +++ b/docs/managed-datahub/chrome-extension/index.html @@ -8,13 +8,13 @@ - +

    Acryl DataHub Chrome Extension

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Installing the Extension

    In order to use the Acryl DataHub Chrome extension, you need to download it onto your browser from the Chrome web store here.

    Simply click "Add to Chrome" then "Add extension" on the ensuing popup.

    Configuring the Extension

    Once you have your extension installed, you'll need to configure it to work with your Acryl DataHub deployment.

    1. Click the extension button on the right of your browser's address bar to view all of your installed extensions. Click on the newly installed DataHub extension.

    1. Fill in your DataHub domain and click "Continue" in the extension popup that appears.

    If your organization uses standard SaaS domains for Looker, you should be ready to go!

    Additional Configurations

    Some organizations have custom SaaS domains for Looker and some Acryl DataHub deployments utilize Platform Instances and set custom Environments when creating DataHub assets. If any of these situations applies to you, please follow the next few steps to finish configuring your extension.

    1. Click on the extension button and select your DataHub extension to open the popup again. Now click the settings icon in order to open the configurations page.

    1. Fill out any and save custom configurations you have in the TOOL CONFIGURATIONS section. Here you can configure a custom domain, a Platform Instance associated with that domain, and the Environment set on your DataHub assets. If you don't have a custom domain but do have a custom Platform Instance or Environment, feel free to leave the field domain empty.

    Using the Extension

    Once you have everything configured on your extension, it's time to use it!

    1. First ensure that you are logged in to your Acryl DataHub instance.

    2. Navigate to Looker or Tableau and log in to view your data assets.

    3. Navigate to a page where DataHub can provide insights on your data assets (Dashboards and Explores).

    4. Click the Acryl DataHub extension button on the bottom right of your page to open a drawer where you can now see additional information about this asset right from your DataHub instance.

    Advanced: Self-Hosted DataHub

    If you are using the Acryl DataHub Chrome extension for your self-hosted DataHub instance, everything above is applicable. However, there is one additional step you must take in order to set up your instance to be compatible with the extension.

    Configure Auth Cookies

    In order for the Chrome extension to work with your instance, it needs to be able to make authenticated requests. Therefore, authentication cookies need to be set up so that they can be shared with the extension on your browser. You must update the values of two environment variables in your datahub-frontend container:

    AUTH_COOKIE_SAME_SITE="NONE"
    AUTH_COOKIE_SECURE=true

    Once your re-deploy your datahub-frontend container with these values, you should be good to go!

    - + \ No newline at end of file diff --git a/docs/managed-datahub/datahub-api/entity-events-api/index.html b/docs/managed-datahub/datahub-api/entity-events-api/index.html index 33de383c690be..d2c649999a976 100644 --- a/docs/managed-datahub/datahub-api/entity-events-api/index.html +++ b/docs/managed-datahub/datahub-api/entity-events-api/index.html @@ -8,13 +8,13 @@ - +

    Entity Events API

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Introduction

    The Events API allows you to integrate changes happening on the DataHub Metadata Graph in real time into a broader event-based architecture.

    Supported Integrations

    Use Cases

    Real-time use cases broadly fall into the following categories:

    • Workflow Integration: Integrate DataHub flows into your organization's internal workflow management system. For example, create a Jira ticket when specific Tags or Terms are proposed on a Dataset.
    • Notifications: Generate organization-specific notifications when a change is made on DataHub. For example, send an email to the governance team when a "PII" tag is added to any data asset.
    • Metadata Enrichment: Trigger downstream metadata changes when an upstream change occurs. For example, propagating glossary terms or tags to downstream entities.
    • Synchronization: Syncing changes made in DataHub into a 3rd party system. For example, reflecting Tag additions in DataHub into Snowflake.
    • Auditing: Audit **** who is making what changes on DataHub through time.

    Event Structure

    Each entity event is serialized to JSON & follows a common base structure.

    Common Fields

    NameTypeDescriptionOptional
    entityUrnStringThe unique identifier for the Entity being changed. For example, a Dataset's urn.False
    entityTypeStringThe type of the entity being changed. Supported values include dataset, chart, dashboard, dataFlow (Pipeline), dataJob (Task), domain, tag, glossaryTerm, corpGroup, & corpUser.False
    categoryStringThe category of the change, related to the kind of operation that was performed. Examples include TAG, GLOSSARY_TERM, DOMAIN, LIFECYCLE, and more.False
    operationStringThe operation being performed on the entity given the category. For example, ADD ,REMOVE, MODIFY. For the set of valid operations, see the full catalog below.False
    modifierStringThe modifier that has been applied to the entity. The value depends on the category. An example includes the URN of a tag being applied to a Dataset or Schema Field.True
    parametersDictAdditional key-value parameters used to provide specific context. The precise contents depends on the category + operation of the event. See the catalog below for a full summary of the combinations.True
    auditStamp.actorStringThe urn of the actor who triggered the change.False
    auditStamp.timeNumberThe timestamp in milliseconds corresponding to the event.False

    For example, an event indicating that a Tag has been added to a particular Dataset would populate each of these fields:

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "TAG",
    "operation": "ADD",
    "modifier": "urn:li:tag:PII",
    "parameters": {
    "tagUrn": "urn:li:tag:PII"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    In the following sections, we'll take a closer look at the purpose and structure of each supported event type.

    Event Types

    Below, we will review the catalog of events available for consumption.

    Add Tag Event

    This event is emitted when a Tag has been added to an entity on DataHub.

    CategoryOperationEntity Types
    TAGADDdataset, dashboard, chart, dataJob, container, dataFlow , schemaField

    Parameters

    NameTypeDescriptionOptional
    tagUrnStringThe urn of the tag that has been added.False
    fieldPathStringThe path of the schema field which the tag is being added to. This field is only present if the entity type is schemaField.True
    parentUrnStringThe urn of a parent entity. This field is only present if the entity type is schemaField, and will contain the parent Dataset to which the field belongs.True

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "TAG",
    "operation": "ADD",
    "modifier": "urn:li:tag:PII"
    "parameters": {
    "tagUrn": "urn:li:tag:PII"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Remove Tag Event

    This event is emitted when a Tag has been removed from an entity on DataHub.

    Header

    CategoryOperationEntity Types
    TAGREMOVEdataset, dashboard, chart, dataJob, container, dataFlow, schemaField

    Parameters

    NameTypeDescriptionOptional
    tagUrnStringThe urn of the tag that has been removed.False
    fieldPathStringThe path of the schema field which the tag is being removed from. This field is only present if the entity type is schemaField.True
    parentUrnStringThe urn of a parent entity. This field is only present if the entity type is schemaField, and will contain the parent Dataset to which the field belongs.True

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "TAG",
    "operation": "REMOVE",
    "modifier": "urn:li:tag:PII",
    "parameters": {
    "tagUrn": "urn:li:tag:PII"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Add Glossary Term Event

    This event is emitted when a Glossary Term has been added to an entity on DataHub.

    Header

    CategoryOperationEntity Types
    GLOSSARY_TERMADDdataset, dashboard, chart, dataJob, container, dataFlow , schemaField

    Parameters

    NameTypeDescriptionOptional
    termUrnStringThe urn of the glossary term that has been added.False
    fieldPathStringThe path of the schema field to which the term is being added. This field is only present if the entity type is schemaField.True
    parentUrnStringThe urn of a parent entity. This field is only present if the entity type is schemaField, and will contain the parent Dataset to which the field belongs.True

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "GLOSSARY_TERM",
    "operation": "ADD",
    "modifier": "urn:li:glossaryTerm:ExampleNode.ExampleTerm",
    "parameters": {
    "termUrn": "urn:li:glossaryTerm:ExampleNode.ExampleTerm"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Remove Glossary Term Event

    This event is emitted when a Glossary Term has been removed from an entity on DataHub.

    Header

    CategoryOperationEntity Types
    GLOSSARY_TERMREMOVEdataset, dashboard, chart, dataJob, container, dataFlow , schemaField

    Parameters

    NameTypeDescriptionOptional
    termUrnStringThe urn of the glossary term that has been removed.False
    fieldPathStringThe path of the schema field from which the term is being removed. This field is only present if the entity type is schemaField.True
    parentUrnStringThe urn of a parent entity. This field is only present if the entity type is schemaField, and will contain the parent Dataset to which the field belongs.True

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "GLOSSARY_TERM",
    "operation": "REMOVE",
    "modifier": "urn:li:glossaryTerm:ExampleNode.ExampleTerm",
    "parameters": {
    "termUrn": "urn:li:glossaryTerm:ExampleNode.ExampleTerm"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Add Domain Event

    This event is emitted when Domain has been added to an entity on DataHub.

    Header

    CategoryOperationEntity Types
    DOMAINADDdataset, dashboard, chart, dataJob, container, dataFlow

    Parameters

    NameTypeDescriptionOptional
    domainUrnStringThe urn of the domain that has been added.False

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "DOMAIN",
    "operation": "ADD",
    "modifier": "urn:li:domain:ExampleDomain",
    "parameters": {
    "domainUrn": "urn:li:domain:ExampleDomain"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Remove Domain Event

    This event is emitted when Domain has been removed from an entity on DataHub.

    Header

    CategoryOperationEntity Types
    DOMAINREMOVEdataset, dashboard, chart, dataJob, container ,dataFlow

    Parameters

    NameTypeDescriptionOptional
    domainUrnStringThe urn of the domain that has been removed.False

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "DOMAIN",
    "operation": "REMOVE",
    "modifier": "urn:li:domain:ExampleDomain",
    "parameters": {
    "domainUrn": "urn:li:domain:ExampleDomain"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Add Owner Event

    This event is emitted when a new owner has been assigned to an entity on DataHub.

    Header

    CategoryOperationEntity Types
    OWNERADDdataset, dashboard, chart, dataJob, dataFlow , container, glossaryTerm, domain, tag

    Parameters

    NameTypeDescriptionOptional
    ownerUrnStringThe urn of the owner that has been added.False
    ownerTypeStringThe type of the owner that has been added. TECHNICAL_OWNER, BUSINESS_OWNER, DATA_STEWARD, NONE, etc.False

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "OWNER",
    "operation": "ADD",
    "modifier": "urn:li:corpuser:jdoe",
    "parameters": {
    "ownerUrn": "urn:li:corpuser:jdoe",
    "ownerType": "BUSINESS_OWNER"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Remove Owner Event

    This event is emitted when an existing owner has been removed from an entity on DataHub.

    Header

    CategoryOperationEntity Types
    OWNERREMOVEdataset, dashboard, chart, dataJob, container ,dataFlow , glossaryTerm, domain, tag

    Parameters

    NameTypeDescriptionOptional
    ownerUrnStringThe urn of the owner that has been removed.False
    ownerTypeStringThe type of the owner that has been removed. TECHNICAL_OWNER, BUSINESS_OWNER, DATA_STEWARD, NONE, etc.False

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "OWNER",
    "operation": "REMOVE",
    "modifier": "urn:li:corpuser:jdoe",
    "parameters": {
    "ownerUrn": "urn:li:corpuser:jdoe",
    "ownerType": "BUSINESS_OWNER"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Modify Deprecation Event

    This event is emitted when the deprecation status of an entity has been modified on DataHub.

    Header

    CategoryOperationEntity Types
    DEPRECATIONMODIFYdataset, dashboard, chart, dataJob, dataFlow , container

    Parameters

    NameTypeDescriptionOptional
    statusStringThe new deprecation status of the entity, either DEPRECATED or ACTIVE.False

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "DEPRECATION",
    "operation": "MODIFY",
    "modifier": "DEPRECATED",
    "parameters": {
    "status": "DEPRECATED"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Add Dataset Schema Field Event

    This event is emitted when a new field has been added to a Dataset Schema.

    Header

    CategoryOperationEntity Types
    TECHNICAL_SCHEMAADDdataset

    Parameters

    NameTypeDescriptionOptional
    fieldUrnStringThe urn of the new schema field.False
    fieldPathStringThe path of the new field. For more information about field paths, check out Dataset Field Paths ExplainedFalse
    nullableBooleanWhether the new field is nullable.False

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "TECHNICAL_SCHEMA",
    "operation": "ADD",
    "modifier": "urn:li:schemaField:(urn:li:dataset:abc,newFieldName)",
    "parameters": {
    "fieldUrn": "urn:li:schemaField:(urn:li:dataset:abc,newFieldName)",
    "fieldPath": "newFieldName",
    "nullable": false
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Remove Dataset Schema Field Event

    This event is emitted when a new field has been remove from a Dataset Schema.

    Header

    CategoryOperationEntity Types
    TECHNICAL_SCHEMAREMOVEdataset

    Parameters

    NameTypeDescriptionOptional
    fieldUrnStringThe urn of the removed schema field.False
    fieldPathStringThe path of the removed field. For more information about field paths, check out Dataset Field Paths ExplainedFalse
    nullableBooleanWhether the removed field is nullable.False

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "TECHNICAL_SCHEMA",
    "operation": "REMOVE",
    "modifier": "urn:li:schemaField:(urn:li:dataset:abc,newFieldName)",
    "parameters": {
    "fieldUrn": "urn:li:schemaField:(urn:li:dataset:abc,newFieldName)",
    "fieldPath": "newFieldName",
    "nullable": false
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Entity Create Event

    This event is emitted when a new entity has been created on DataHub.

    Header

    CategoryOperationEntity Types
    LIFECYCLECREATEdataset, dashboard, chart, dataJob, dataFlow , glossaryTerm, domain, tag, container

    Parameters

    None

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "LIFECYCLE",
    "operation": "CREATE",
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Entity Soft-Delete Event

    This event is emitted when a new entity has been soft-deleted on DataHub.

    Header

    CategoryOperationEntity Types
    LIFECYCLESOFT_DELETEdataset, dashboard, chart, dataJob, dataFlow , glossaryTerm, domain, tag, container

    Parameters

    None

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "LIFECYCLE",
    "operation": "SOFT_DELETE",
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Entity Hard-Delete Event

    This event is emitted when a new entity has been hard-deleted on DataHub.

    Header

    CategoryOperationEntity Types
    LIFECYCLEHARD_DELETEdataset, dashboard, chart, dataJob, dataFlow , glossaryTerm, domain, tag, container

    Parameters

    None

    Sample Event

    {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "LIFECYCLE",
    "operation": "HARD_DELETE",
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Completed Assertion Run Event

    This event is emitted when an Assertion has been run has succeeded on DataHub.

    Header

    CategoryOperationEntity Types
    RUNCOMPLETEDassertion

    Parameters

    NameTypeDescriptionOptional
    runResultStringThe result of the run, either SUCCESS or FAILURE.False
    runIdStringNative (platform-specific) identifier for this run.False
    aserteeUrnStringUrn of entity on which the assertion is applicable.False

    Sample Event

    {
    "entityUrn": "urn:li:assertion:abc",
    "entityType": "assertion",
    "category": "RUN",
    "operation": "COMPLETED",
    "parameters": {
    "runResult": "SUCCESS",
    "runId": "123",
    "aserteeUrn": "urn:li:dataset:def"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Started Data Process Instance Run Event

    This event is emitted when a Data Process Instance Run has STARTED on DataHub.

    Header

    CategoryOperationEntity Types
    RUNSTARTEDdataProcessInstance

    Parameters

    NameTypeDescriptionOptional
    attemptIntegerThe number of attempts that have been made.True
    dataFlowUrnStringThe urn of the associated Data Flow. Only filled in if this run is associated with a Data Flow.True
    dataJobUrnStringThe urn of the associated Data Flow. Only filled in if this run is associated with a Data Job.True
    parentInstanceUrnStringUrn of the parent DataProcessInstance (if there is one).True

    Sample Event

    {
    "entityUrn": "urn:li:dataProcessInstance:abc",
    "entityType": "dataProcessInstance",
    "category": "RUN",
    "operation": "STARTED",
    "parameters": {
    "dataFlowUrn": "urn:li:dataFlow:def",
    "attempt": "1",
    "parentInstanceUrn": ""urn:li:dataProcessInstance:ghi"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Completed Data Process Instance Run Event

    This event is emitted when a Data Process Instance Run has been COMPLETED on DataHub.

    Header

    CategoryOperationEntity Types
    RUNCOMPLETEDdataProcessInstance

    Parameters

    NameTypeDescriptionOptional
    runResultStringThe result of the run, one of SUCCESS , FAILURE, SKIPPED, or UP_FOR_RETRY .False
    attemptIntegerThe number of attempts that have been made.True
    dataFlowUrnStringThe urn of the associated Data Flow. Only filled in if this run is associated with a Data Flow.True
    dataJobUrnStringThe urn of the associated Data Flow. Only filled in if this run is associated with a Data Job.True
    parentInstanceUrnStringUrn of the parent DataProcessInstance.True

    Sample Event

    {
    "entityUrn": "urn:li:dataProcessInstance:abc",
    "entityType": "dataProcessInstance",
    "category": "RUN",
    "operation": "COMPLETED",
    "parameters": {
    "runResult": "SUCCESS"
    "attempt": "2",
    "dataFlowUrn": "urn:li:dataFlow:def",
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Action Request Created Event

    This event is emitted when a new Action Request (Metadata Proposal) has been created.

    Header

    CategoryOperationEntity Types
    LIFECYCLECREATEDactionRequest

    Parameters

    These are the common parameters for all Action Request create events.

    NameTypeDescriptionOptional
    actionRequestTypeStringThe type of Action Request. One of TAG_ASSOCIATION, TERM_ASSOCIATION, CREATE_GLOSSARY_NODE, CREATE_GLOSSARY_TERM, or UPDATE_DESCRIPTION.False
    resourceTypeStringThe type of entity this Action Request is applied on, such as dataset.True
    resourceUrnStringThe entity this Action Request is applied on.True
    subResourceTypeStringFilled if this Action Request is applied on a sub-resource, such as a schemaField.True
    subResourceStringIdentifier of the sub-resource if this proposal is applied on one.True

    Parameters specific to different proposal types are listed below.

    Tag Association Proposal Specific Parameters and Sample Event

    NameTypeDescriptionOptional
    tagUrnStringThe urn of the Tag that would be applied.False
    {
    "entityUrn": "urn:li:actionRequest:abc",
    "entityType": "actionRequest",
    "category": "LIFECYCLE",
    "operation": "CREATED",
    "parameters": {
    "actionRequestType": "TAG_ASSOCIATION",
    "resourceType": "dataset",
    "resourceUrn": "urn:li:dataset:snowflakeDataset,
    "tagUrn": "urn:li:tag:Classification"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Term Association Proposal Specific Parameters and Sample Event

    NameTypeDescriptionOptional
    termUrnStringThe urn of the Glossary Term that would be applied.False
    {
    "entityUrn": "urn:li:actionRequest:abc",
    "entityType": "actionRequest",
    "category": "LIFECYCLE",
    "operation": "CREATED",
    "parameters": {
    "actionRequestType": "TERM_ASSOCIATION",
    "resourceType": "dataset",
    "resourceUrn": "urn:li:dataset:snowflakeDataset,
    "termUrn": "urn:li:glossaryTerm:Classification"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Create Glossary Node/Term Proposal Specific Parameters and Sample Event

    NameTypeDescriptionOptional
    glossaryEntityNameStringThe name of the Glossary Entity that would be created.False
    parentNodeUrnStringThe urn of the Parent Node that would be associated with the new Glossary Entity.True
    descriptionStringThe description of the new Glossary Entity.True
    {
    "entityUrn": "urn:li:actionRequest:abc",
    "entityType": "actionRequest",
    "category": "LIFECYCLE",
    "operation": "CREATED",
    "parameters": {
    "actionRequestType": "CREATE_GLOSSARY_TERM",
    "resourceType": "glossaryNode",
    "glossaryEntityName": "PII",
    "parentNodeUrn": "urn:li:glossaryNode:Classification",
    "description": "Personally Identifiable Information"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Update Description Proposal Specific Parameters

    NameTypeDescriptionOptional
    descriptionStringThe proposed updated description.False
    {
    "entityUrn": "urn:li:actionRequest:abc",
    "entityType": "actionRequest",
    "category": "LIFECYCLE",
    "operation": "CREATED",
    "parameters": {
    "actionRequestType": "UPDATE_DESCRIPTION",
    "resourceType": "glossaryNode",
    "description": "Personally Identifiable Information"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }

    Action Request Status Change Event

    This event is emitted when an existing Action Request (proposal) changes status. For example, this event will be emitted when an Action Request transitions from pending to completed.

    Header

    CategoryOperationEntity Types
    LIFECYCLEPENDING, COMPLETEDactionRequest

    Parameters

    These are the common parameters for all parameters.

    NameTypeDescriptionOptional
    actionRequestStatusStringThe status of the Action Request.False
    actionRequestResultStringOnly filled if the actionRequestStatus is COMPLETED. Either ACCEPTED or REJECTED.True

    Sample Event

    {
    "entityUrn": "urn:li:actionRequest:abc",
    "entityType": "actionRequest",
    "category": "LIFECYCLE",
    "operation": "COMPLETED",
    "parameters": {
    "actionRequestStatus": "COMPLETED",
    "actionRequestResult": "ACCEPTED"
    },
    "auditStamp": {
    "actor": "urn:li:corpuser:jdoe",
    "time": 1649953100653
    }
    }
    - + \ No newline at end of file diff --git a/docs/managed-datahub/datahub-api/graphql-api/getting-started/index.html b/docs/managed-datahub/datahub-api/graphql-api/getting-started/index.html index 80cca81dc24f2..b6c96b84bca3c 100644 --- a/docs/managed-datahub/datahub-api/graphql-api/getting-started/index.html +++ b/docs/managed-datahub/datahub-api/graphql-api/getting-started/index.html @@ -8,13 +8,13 @@ - +

    Getting Started

    The Acryl DataHub GraphQL API is an extension of the open source DataHub GraphQL API.

    For a full reference to the Queries & Mutations available for consumption, check out Queries & Mutations.

    Connecting to the API

    When you generate the token you will see an example of curl command which you can use to connect to the GraphQL API.

    Note that there is a single URL mentioned there but it can be any of these

    • https://your-account.acryl.io/api/graphql
    • https://your-account.acryl.io/api/gms/graphql

    If there is any example that requires you to connect to GMS then you can use the second URL and change the endpoints.

    e.g. to get configuration of your GMS server you can use

    curl -X GET 'https://your-account.acryl.io/api/gms/config' --header <YOUR_TOKEN>

    e.g. to connect to ingestion endpoint for doing ingestion programmatically you can use the below URL

    Exploring the API

    The entire GraphQL API can be explored & introspected using GraphiQL, an interactive query tool which allows you to navigate the entire Acryl GraphQL schema as well as craft & issue using an intuitive UI.

    GraphiQL is available for each Acryl DataHub deployment, locating at https://your-account.acryl.io/api/graphiql.

    Querying the API

    Currently, we do not offer language-specific SDKs for accessing the DataHub GraphQL API. For querying the API, you can make use of a variety of per-language client libraries. For a full list, see GraphQL Code Libraries, Tools, & Services.

    - + \ No newline at end of file diff --git a/docs/managed-datahub/datahub-api/graphql-api/incidents-api-beta/index.html b/docs/managed-datahub/datahub-api/graphql-api/incidents-api-beta/index.html index 4164a88c4bb3c..654af772b1e9d 100644 --- a/docs/managed-datahub/datahub-api/graphql-api/incidents-api-beta/index.html +++ b/docs/managed-datahub/datahub-api/graphql-api/incidents-api-beta/index.html @@ -8,13 +8,13 @@ - +

    Incidents API (Beta)

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Introduction

    Incidents are a concept used to flag particular Data Assets as being in an unhealthy state. Each incident has an independent lifecycle and details including a state (active, resolved), a title, a description, & more.

    A couple scenarios in which incidents can be useful are

    1. Pipeline Circuit Breaking: You can use Incidents as the basis for intelligent data pipelines that verify upstream inputs (e.g. datasets) are free of any active incidents before executing.
    2. [Coming Soon] Announcing Known-Bad Assets: You can mark a known-bad data asset as under an ongoing incident so consumers and stakeholders can be informed about the health status of a data asset via the DataHub UI. Moreover, they can follow the incident as it progresses toward resolution.

    In the next section, we'll show you how to

    1. Create a new incident
    2. Fetch all incidents for a data asset
    3. Resolve an incident

    for Datasets using the Acryl GraphQL API.

    Let's get started!

    Creating an Incident

    info

    Creating incidents is currently only supported against Dataset assets.

    To create (i.e. raise) a new incident for a data asset, simply create a GraphQL request using the raiseIncident mutation.

    type Mutation {
    """
    Raise a new incident for a data asset
    """
    raiseIncident(input: RaiseIncidentInput!): String! # Returns new Incident URN.
    }

    input RaiseIncidentInput {
    """
    The type of incident, e.g. OPERATIONAL
    """
    type: IncidentType!

    """
    A custom type of incident. Present only if type is 'CUSTOM'
    """
    customType: String

    """
    An optional title associated with the incident
    """
    title: String

    """
    An optional description associated with the incident
    """
    description: String

    """
    The resource (dataset, dashboard, chart, dataFlow, etc) that the incident is associated with.
    """
    resourceUrn: String!

    """
    The source of the incident, i.e. how it was generated
    """
    source: IncidentSourceInput
    }

    Examples

    First, we'll create a demo GraphQL query, then show how to represent it via CURL & Python.

    Imagine we want to raise a new incident on a Dataset with URN urn:li:dataset:(abc) because it's failed automated quality checks. To do so, we could make the following GraphQL query:

    Request

    mutation raiseIncident {
    raiseIncident(input: {
    type: OPERATIONAL
    title: "Dataset Failed Quality Checks"
    description: "Dataset failed 2/6 Quality Checks for suite run id xy123mksj812pk23."
    resourceUrn: "urn:li:dataset:(urn:li:dataPlatform:kafka,SampleKafkaDataset,PROD)"
    })
    }

    After we make this query, we will get back a unique URN for the incident.

    Response

    {
    "data": {
    "raiseIncident": "urn:li:incident:bfecab62-dc10-49a6-a305-78ce0cc6e5b1"
    }
    }

    Now we'll see how to issue this query using a CURL or Python.

    CURL

    To issue the above GraphQL as a CURL:

    curl --location --request POST 'https://your-account.acryl.io/api/graphql' \
    --header 'Authorization: Bearer your-access-token' \
    --header 'Content-Type: application/json' \
    --data-raw '{"query":"mutation raiseIncident {\n raiseIncident(input: {\n type: OPERATIONAL\n title: \"Dataset Failed Quality Checks\"\n description: \"Dataset failed 2/6 Quality Checks for suite run id xy123mksj812pk23.\"\n resourceUrn: \"urn:li:dataset:(urn:li:dataPlatform:kafka,SampleKafkaDataset,PROD)\"\n })\n}","variables":{}}'

    Python

    To issue the above GraphQL query in Python (requests):

    import requests

    datahub_session = requests.Session()

    headers = {
    "Content-Type": "application/json",
    "Authorization": "Bearer your-personal-access-token",
    }

    json = {
    "query": """mutation raiseIncident {\n
    raiseIncident(input: {\n
    type: OPERATIONAL\n
    resourceUrn: \"urn:li:dataset:(urn:li:dataPlatform:kafka,SampleKafkaDataset,PROD)\"\n
    })}""",
    "variables": {},
    }

    response = datahub_session.post(f"https://your-account.acryl.io/api/graphql", headers=headers, json=json)
    response.raise_for_status()
    res_data = response.json() # Get result as JSON

    Retrieving Active Incidents

    To fetch the the ongoing incidents for a data asset, we can use the incidents GraphQL field on the entity of interest.

    Datasets

    To retrieve all incidents for a Dataset with a particular URN, you can reference the 'incidents' field of the Dataset type:

    type Dataset {
    ....
    """
    Incidents associated with the Dataset
    """
    incidents(
    """
    Optional incident state to filter by, defaults to any state.
    """
    state: IncidentState,
    """
    Optional start offset, defaults to 0.
    """
    start: Int,
    """
    Optional start offset, defaults to 20.
    """
    count: Int): EntityIncidentsResult # Returns a list of incidents.
    }

    Examples

    Now that we've raised an incident on it, imagine we want to fetch the first 10 "active" incidents for the Dataset with URN urn:li:dataset:(abc). To do so, we could issue the following request:

    Request

    query dataset {
    dataset(urn: "urn:li:dataset:(abc)") {
    incidents(state: ACTIVE, start: 0, count: 10) {
    total
    incidents {
    urn
    title
    description
    status {
    state
    }
    }
    }
    }
    }

    After we make this query, we will get back a unique URN for the incident.

    Response

    {
    "data": {
    "dataset": {
    "incidents": {
    "total": 1,
    "incidents": [
    {
    "urn": "urn:li:incident:bfecab62-dc10-49a6-a305-78ce0cc6e5b1",
    "title": "Dataset Failed Quality Check",
    "description": "Dataset failed 2/6 Quality Checks for suite run id xy123mksj812pk23.",
    "status": {
    "state": "ACTIVE"
    }
    }
    ]
    }
    }
    }
    }

    Now we'll see how to issue this query using a CURL or Python.

    CURL

    To issue the above GraphQL as a CURL:

    curl --location --request POST 'https://your-account.acryl.io/api/graphql' \
    --header 'Authorization: Bearer your-access-token' \
    --header 'Content-Type: application/json' \
    --data-raw '{"query":"query dataset {\n dataset(urn: "urn:li:dataset:(abc)") {\n incidents(state: ACTIVE, start: 0, count: 10) {\n total\n incidents {\n urn\n title\n description\n status {\n state\n }\n }\n }\n }\n}","variables":{}}'Python

    To issue the above GraphQL query in Python (requests):

    import requests

    datahub_session = requests.Session()

    headers = {
    "Content-Type": "application/json",
    "Authorization": "Bearer your-personal-access-token",
    }

    json = {
    "query": """query dataset {\n
    dataset(urn: "urn:li:dataset:(abc)") {\n
    incidents(state: ACTIVE, start: 0, count: 10) {\n
    total\n
    incidents {\n
    urn\n
    title\n
    description\n
    status {\n
    state\n
    }\n
    }\n
    }\n
    }\n
    }""",
    "variables": {},
    }

    response = datahub_session.post(f"https://your-account.acryl.io/api/graphql", headers=headers, json=json)
    response.raise_for_status()
    res_data = response.json() # Get result as JSON

    Resolving an Incident

    To resolve an incident for a data asset, simply create a GraphQL request using the updateIncidentStatus mutation. To mark an incident as resolved, simply update its state to RESOLVED.

    type Mutation {
    """
    Update an existing incident for a resource (asset)
    """
    updateIncidentStatus(
    """
    The urn for an existing incident
    """
    urn: String!

    """
    Input required to update the state of an existing incident
    """
    input: UpdateIncidentStatusInput!): String
    }

    """
    Input required to update status of an existing incident
    """
    input UpdateIncidentStatusInput {
    """
    The new state of the incident
    """
    state: IncidentState!

    """
    An optional message associated with the new state
    """
    message: String
    }

    Examples

    Imagine that we've fixed our Dataset with urn urn:li:dataset:(abc) so that it's passing validation. Now we want to mark the Dataset as healthy, so stakeholders and downstream consumers know it's ready to use.

    To do so, we need the URN of the Incident that we raised previously.

    Request

    mutation updateIncidentStatus {
    updateIncidentStatus(urn: "urn:li:incident:bfecab62-dc10-49a6-a305-78ce0cc6e5b1",
    input: {
    state: RESOLVED
    message: "Dataset is now passing validations. Verified by John Joyce on Data Platform eng."
    })
    }

    Response

    {
    "data": {
    "updateIncidentStatus": "true"
    }
    }

    True is returned if the incident's was successfully marked as resolved.

    CURL

    To issue the above GraphQL as a CURL:

    curl --location --request POST 'https://your-account.acryl.io/api/graphql' \
    --header 'Authorization: Bearer your-access-token' \
    --header 'Content-Type: application/json' \
    --data-raw '{"query":"mutation updateIncidentStatus {\n updateIncidentStatus(urn: "urn:li:incident:bfecab62-dc10-49a6-a305-78ce0cc6e5b1", \n input: {\n state: RESOLVED\n message: "Dataset is now passing validations. Verified by John Joyce on Data Platform eng."\n })\n}","variables":{}}'Python

    To issue the above GraphQL query in Python (requests):

    import requests

    datahub_session = requests.Session()

    headers = {
    "Content-Type": "application/json",
    "Authorization": "Bearer your-personal-access-token",
    }

    json = {
    "query": """mutation updateIncidentStatus {\n
    updateIncidentStatus(urn: \"urn:li:incident:bfecab62-dc10-49a6-a305-78ce0cc6e5b1\",\n
    input: {\n
    state: RESOLVED\n
    message: \"Dataset is now passing validations. Verified by John Joyce on Data Platform eng.\"\n
    })\n
    }""",
    "variables": {},
    }

    response = datahub_session.post(f"https://your-account.acryl.io/api/graphql", headers=headers, json=json)
    response.raise_for_status()
    res_data = response.json() # Get result as JSON

    Tips

    info

    Authorization

    Remember to always provide a DataHub Personal Access Token when calling the GraphQL API. To do so, just add the 'Authorization' header as follows:

    Authorization: Bearer <personal-access-token>

    Exploring GraphQL API

    Also, remember that you can play with an interactive version of the Acryl GraphQL API at https://your-account-id.acryl.io/api/graphiql

    Enabling Slack Notifications

    You can configure Acryl to send slack notifications to a specific channel when incidents are raised or their status is changed.

    These notifications are also able to tag the immediate asset's owners, along with the owners of downstream assets consuming it.

    To do so, simply follow the Slack Integration Guide and contact your Acryl customer success team to enable the feature!

    - + \ No newline at end of file diff --git a/docs/managed-datahub/integrations/aws-privatelink/index.html b/docs/managed-datahub/integrations/aws-privatelink/index.html index ae6f9f8908f31..a8213a269aed4 100644 --- a/docs/managed-datahub/integrations/aws-privatelink/index.html +++ b/docs/managed-datahub/integrations/aws-privatelink/index.html @@ -8,13 +8,13 @@ - +

    AWS PrivateLink

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    If you require a private connection between the provisioned DataHub instance and your own existing AWS account, Acryl supports using AWS PrivateLink in order to complete this private connection.

    In order to complete this connection, the Acryl integrations team will require the AWS ARN for a user or role that can accept and complete the connection to your AWS account.

    Once that team reports the PrivateLink has been created, the team will give you a VPC Endpoint Service Name to use.

    In order to complete the connection, you will have to create a VPC Endpoint in your AWS account. To do so, please follow these instructions:

    info

    Before following the instructions below, please create a VPC security group with ports 80, and 443 (Both TCP) and any required CIDR blocks or other sources as an inbound rule

    1. Open the AWS console to the region that the VPC Endpoint Service is created (Generally this will be in us-west-2 (Oregon) but will be seen in the service name itself)
    2. Browse to the VPC Service and click on Endpoints
    3. Click on Create Endpoint in the top right corner
    4. Give the endpoint a name tag (such as datahub-pl)
    5. Click on the Other endpoint services radio button
    6. In the Service setting, copy the service name that was given to you by the integrations team into the Service name field and click Verify Service
    7. Now select the VPC from the dropdown menu where the endpoint will be created.
    8. A list of availability zones will now be shown in the Subnets section. Please select at least 1 availability zone and then a corresponding subnet ID from the drop down menu to the right of that AZ.
    9. Choose IPv4 for the IP address type
    10. Choose an existing security group (or multiple) to use on this endpoint
    11. (Optional) For Policy, you can keep it on Full access or custom if you have specific access requirements
    12. (Optional) Create any tags you wish to add to this endpoint
    13. Click Create endpoint
    14. Once it has been created, Acryl will need to accept the incoming connection from your AWS account; the integrations team will advise you when this has been completed.
    - + \ No newline at end of file diff --git a/docs/managed-datahub/integrations/oidc-sso-integration/index.html b/docs/managed-datahub/integrations/oidc-sso-integration/index.html index 58a8fcc0b807d..ba42f65ec73f0 100644 --- a/docs/managed-datahub/integrations/oidc-sso-integration/index.html +++ b/docs/managed-datahub/integrations/oidc-sso-integration/index.html @@ -8,13 +8,13 @@ - +

    OIDC SSO Integration

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Note that we do not yet support LDAP or SAML authentication. Please let us know if either of these integrations would be useful for your organization.

    If you'd like to do a deeper dive into OIDC configuration outside of the UI, please see our docs here

    Getting Details From Your Identity Provider

    To set up the OIDC integration, you will need the following pieces of information.

    1. Client ID - A unique identifier for your application with the identity provider
    2. Client Secret - A shared secret to use for exchange between you and your identity provider.
    3. Discovery URL - A URL where the OIDC API of your identity provider can be discovered. This should suffixed by .well-known/openid-configuration. Sometimes, identity providers will not explicitly include this URL in their setup guides, though this endpoint will exist as per the OIDC specification. For more info see here.

    The callback URL to register in your Identity Provider will be

    https://<your-acryl-domain>.acryl.io/callback/oidc 

    Configuring OIDC SSO

    In order to set up the OIDC SSO integration, the user must have the Manage Platform Settings privilege.

    Enabling the OIDC Integration

    To enable the OIDC integration, start by navigating to Settings > Platform > SSO.

    1. Click OIDC
    2. Enable the Integration
    3. Enter the Client ID, Client Secret, and Discovery URI obtained in the previous steps
    4. If there are any advanced settings you would like to configure, click on the Advanced button. These come with defaults, so only input settings here if there is something you need changed from the default configuration.
    5. Click Update to save your settings.

    - + \ No newline at end of file diff --git a/docs/managed-datahub/managed-datahub-overview/index.html b/docs/managed-datahub/managed-datahub-overview/index.html index e2e89b65f4b96..008d3f75d7a13 100644 --- a/docs/managed-datahub/managed-datahub-overview/index.html +++ b/docs/managed-datahub/managed-datahub-overview/index.html @@ -8,13 +8,13 @@ - + - + \ No newline at end of file diff --git a/docs/managed-datahub/metadata-ingestion-with-acryl/ingestion/index.html b/docs/managed-datahub/metadata-ingestion-with-acryl/ingestion/index.html index 4be933cabb840..4bf7354177c11 100644 --- a/docs/managed-datahub/metadata-ingestion-with-acryl/ingestion/index.html +++ b/docs/managed-datahub/metadata-ingestion-with-acryl/ingestion/index.html @@ -8,7 +8,7 @@ - + @@ -27,7 +27,7 @@ To do so, simply run datahub ingest with a pointer to your YAML recipe file:

    datahub ingest -c ./example-recipe.yml

    Step 5: Scheduling Ingestion

    Ingestion can either be run in an ad-hoc manner by a system administrator or scheduled for repeated executions. Most commonly, ingestion will be run on a daily cadence. To schedule your ingestion job, we recommend using a job schedule like Apache Airflow. In cases of simpler deployments, a CRON job scheduled on an always-up machine can also work. Note that each source system will require a separate recipe file. This allows you to schedule ingestion from different sources independently or together.

    Looking for information on real-time ingestion? Click here.

    Note: Real-time ingestion setup is not recommended for an initial POC as it generally takes longer to configure and is prone to inevitable system errors.

    - + \ No newline at end of file diff --git a/docs/managed-datahub/observe/freshness-assertions/index.html b/docs/managed-datahub/observe/freshness-assertions/index.html index 81fc658ae12cc..072ebdced2571 100644 --- a/docs/managed-datahub/observe/freshness-assertions/index.html +++ b/docs/managed-datahub/observe/freshness-assertions/index.html @@ -8,7 +8,7 @@ - + @@ -81,7 +81,7 @@ Edit Assertions and Edit Monitors privileges for it.

    GraphQL

    In order to create a Freshness Assertion that is being monitored on a specific Evaluation Schedule, you'll need to use 2 GraphQL mutation queries to create a Freshness Assertion entity and create an Assertion Monitor entity responsible for evaluating it.

    Start by creating the Freshness Assertion entity using the createFreshnessAssertion query and hang on to the 'urn' field of the Assertion entit y you get back. Then continue by creating a Monitor entity using the createAssertionMonitor.

    Examples

    To create a Freshness Assertion Entity that checks whether a table has been updated in the past 8 hours:

    mutation createFreshnessAssertion {
    createFreshnessAssertion(
    input: {
    entityUrn: "<urn of the table to be monitored>"
    type: DATASET_CHANGE
    schedule: {
    type: FIXED_INTERVAL
    fixedInterval: { unit: HOUR, multiple: 8 }
    }
    }
    ) {
    urn
    }
    }

    This defines the user's expectation: that the table should have changed in the past 8 hours whenever the assertion is evaluated.

    To create an Assertion Monitor Entity that evaluates the assertion every 8 hours using the Audit Log:

    mutation createAssertionMonitor {
    createAssertionMonitor(
    input: {
    entityUrn: "<urn of entity being monitored>",
    assertionUrn: "<urn of assertion created in first query>",
    schedule: {
    cron: "0 */8 * * *",
    timezone: "America/Los_Angeles"
    },
    parameters: {
    type: DATASET_FRESHNESS,
    datasetFreshnessParameters: {
    sourceType: AUDIT_LOG,
    }
    }
    }
    ) {
    urn
    }
    }

    This entity defines when to run the check (Using CRON format - every 8th hour) and how to run the check (using the Audit Log).

    After creating the monitor, the new assertion will start to be evaluated every 8 hours in your selected timezone.

    You can delete assertions along with their monitors using GraphQL mutations: deleteAssertion and deleteMonitor.

    Tips

    info

    Authorization

    Remember to always provide a DataHub Personal Access Token when calling the GraphQL API. To do so, just add the 'Authorization' header as follows:

    Authorization: Bearer <personal-access-token>

    Exploring GraphQL API

    Also, remember that you can play with an interactive version of the Acryl GraphQL API at https://your-account-id.acryl.io/api/graphiql

    - + \ No newline at end of file diff --git a/docs/managed-datahub/operator-guide/setting-up-events-api-on-aws-eventbridge/index.html b/docs/managed-datahub/operator-guide/setting-up-events-api-on-aws-eventbridge/index.html index b8f92531b0497..290147eace06e 100644 --- a/docs/managed-datahub/operator-guide/setting-up-events-api-on-aws-eventbridge/index.html +++ b/docs/managed-datahub/operator-guide/setting-up-events-api-on-aws-eventbridge/index.html @@ -8,13 +8,13 @@ - +

    Setting up Events API on AWS EventBridge

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Entity Events API

    • See the Entity Events API Docs here

    Event Structure

    As are all AWS EventBridge events, the payload itself will be wrapped by a set of standard fields, outlined here. The most notable include

    • source: A unique identifier for the source of the event. We tend to use `acryl.events` by default.
    • account: The account in which the event originated. This will be the Acryl AWS Account ID provided by your Acryl customer success rep.
    • detail: The place where the Entity Event payload will appear.

    Sample Event

    {
    "version": "0",
    "id": "6a7e8feb-b491-4cf7-a9f1-bf3703467718",
    "detail-type": "entityChangeEvent",
    "source": "acryl.events",
    "account": "111122223333",
    "time": "2017-12-22T18:43:48Z",
    "region": "us-west-1",
    "detail": {
    "entityUrn": "urn:li:dataset:abc",
    "entityType": "dataset",
    "category": "TAG",
    "operation": "ADD",
    "modifier": "urn:li:tag:pii",
    "parameters": {
    "tagUrn": "urn:li:tag:pii"
    }
    }
    }

    Sample Pattern

    { 
    "source": ["acryl.events"],
    "detail": {
    "category": ["TAG"],
    "parameters": {
    "tagUrn": ["urn:li:tag:pii"]
    }
    }
    }

    Sample Event Pattern Filtering any Add Tag Events on a PII Tag

    Step 1: Create an Event Bus

    We recommend creating a dedicated event bus for Acryl. To do so, follow the steps below:

    1. Navigate to the AWS console inside the account where you will deploy Event Bridge.

    2. Search and navigate to the EventBridge page.

    3. Navigate to the Event Buses tab.

    3. Click Create Event Bus.

    4. Give the new bus a name, e.g. acryl-events.

    5. Define a Resource Policy

    When creating your new event bus, you need to create a Policy that allows the Acryl AWS account to publish messages to the bus. This involves granting the PutEvents privilege to the Acryl account via an account id.

    Sample Policy

    {
    "Version": "2012-10-17",
    "Statement": [{
    "Sid": "allow_account_to_put_events",
    "Effect": "Allow",
    "Principal": {
    "AWS": "arn:aws:iam::795586375822:root"
    },
    "Action": "events:PutEvents",
    "Resource": "<event-bus-arn>"
    }]
    }

    Notice that you'll need to populate the following fields on your own

    • event-bus-arn: This is the AWS ARN of your new event bus.

    Step 2: Create a Routing Rule

    Once you've defined an event bus, you need to create a rule for routing incoming events to your destinations, for example an SQS topic, a Lambda function, a Log Group, etc.

    To do so, follow the below steps

    1. Navigate to the Rules tab.

    2. Click Create Rule.

    3. Give the rule a name. This will usually depend on the target where you intend to route requests matching the rule.

    4. In the Event Bus field, select the event bus created in Step 1.

    5. Select the 'Rule with Event Pattern' option

    6. Click Next.

    7. For Event Source, choose Other

    8. ** Optional: Define a Sample Event. You can use the Sample Event defined in the Event Structure** section above.

    9. Define a matching Rule. This determines which Acryl events will be routed based on the current rule. You can use the Sample Rule defined in the Event Structure section above as a reference.

    10. Define a Target: This defines where the events that match the rule should be routed.

    Step 3: Configure Acryl to Send Events

    Once you've completed these steps, communicate the following information to your Acryl Customer Success rep:

    • The ARN of the new Event Bus.
    • The AWS region in which the Event Bus is located.

    This will enable Acryl to begin sending events to your EventBridge bus.

    __

    - + \ No newline at end of file diff --git a/docs/managed-datahub/operator-guide/setting-up-remote-ingestion-executor-on-aws/index.html b/docs/managed-datahub/operator-guide/setting-up-remote-ingestion-executor-on-aws/index.html index fffb9e94a5a1b..26def720f93f3 100644 --- a/docs/managed-datahub/operator-guide/setting-up-remote-ingestion-executor-on-aws/index.html +++ b/docs/managed-datahub/operator-guide/setting-up-remote-ingestion-executor-on-aws/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ Providing secrets enables you to manage ingestion sources from the DataHub UI without storing credentials inside DataHub. Once defined, secrets can be referenced by name inside of your DataHub Ingestion Source configurations using the usual convention: ${SECRET_NAME}.

    Note that the only external secret provider that is currently supported is AWS Secrets Manager.

    1. Test the Executor: To test your remote executor:

      1. Create a new Ingestion Source by clicking 'Create new Source' the 'Ingestion' tab of the DataHub console. Configure your Ingestion Recipe as though you were running it from inside of your environment.

      2. When working with "secret" fields (passwords, keys, etc), you can refer to any "self-managed" secrets by name: ${SECRET_NAME}:

        Using a secret called BQ_DEPLOY_KEY which is managed in AWS secrets manager

      3. In the 'Finish Up' step, click 'Advanced'.

      4. Update the 'Executor Id' form field to be 'remote'. This indicates that you'd like to use the remote executor.

      5. Click 'Done'.

      Now, simple click 'Execute' to test out the remote executor. If your remote executor is configured properly, you should promptly see the ingestion task state change to 'Running'.

    Updating a Remote Ingestion Executor

    In order to update the executor, ie. to deploy a new container version, you'll need to update the CloudFormation Stack to re-deploy the CloudFormation template with a new set of parameters.

    Steps - AWS Console

    1. Navigate to CloudFormation in AWS Console
    2. Select the stack dedicated to the remote executor
    3. Click Update
    4. Select Replace Current Template
    5. Select Upload a template file
    6. Upload a copy of the Acryl Remote Executor CloudFormation Template
    7. Click Next
    8. Change parameters based on your modifications (e.g. ImageTag, etc)
    9. Click Next
    10. Confirm your parameter changes, and update. This should perform the necessary upgrades.

    FAQ

    If I need to change (or add) a secret that is stored in AWS Secrets Manager, e.g. for rotation, will the new secret automatically get picked up by Acryl's executor?**

    Unfortunately, no. Secrets are wired into the executor container at deployment time, via environment variables. Therefore, the ECS Task will need to be restarted (either manually or via a stack parameter update) whenever your secrets change.

    I want to deploy multiple Acryl Executors. Is this currently possible?**

    This is possible, but requires a new SQS queue is maintained (on per executor). Please contact your Acryl representative for more information.

    I've run the CloudFormation Template, how can I tell that the container was successfully deployed?**

    We recommend verifying in AWS Console by navigating to ECS > Cluster > Stack Name > Services > Logs. When you first deploy the executor, you should a single log line to indicate success:

    Starting AWS executor consumer..

    This indicates that the remote executor has established a successful connection to your DataHub instance and is ready to execute ingestion runs. If you DO NOT see this log line, but instead see something else, please contact your Acryl representative for support.

    Release Notes

    This is where release notes for the Acryl Remote Executor Container will live.

    v0.0.3.9

    Bumping to the latest version of acryl-executor, which includes smarter messaging around OOM errors.

    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_1_69/index.html b/docs/managed-datahub/release-notes/v_0_1_69/index.html index ca39092a8a101..ef7f3da57bef6 100644 --- a/docs/managed-datahub/release-notes/v_0_1_69/index.html +++ b/docs/managed-datahub/release-notes/v_0_1_69/index.html @@ -8,13 +8,13 @@ - +

    v0.1.69


    This is a scheduled release which contains all changes from OSS DataHub upto commit 10a31b1aa08138c616c0e44035f8f843bef13085. In addition to all the features added in OSS DataHub below are Managed DataHub specific release notes.

    Release Availability Date

    06 Dec 2022

    Release Changlog


    • We now support >10k results in Metadata Test results
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_1_70/index.html b/docs/managed-datahub/release-notes/v_0_1_70/index.html index a18351d740252..940792ab23008 100644 --- a/docs/managed-datahub/release-notes/v_0_1_70/index.html +++ b/docs/managed-datahub/release-notes/v_0_1_70/index.html @@ -8,13 +8,13 @@ - +

    v0.1.70


    This is a scheduled release which contains all changes from OSS DataHub upto commit 70659711a841bcce4bb1e0350027704b3783f6a5. In addition to all the features added in OSS DataHub below are Managed DataHub specific release notes.

    Release Availability Date

    30 Dec 2022

    Release Changlog


    • Improvements in Caching implementation to fix search consistency problems
    • We have heard many organisations ask for metrics for the SaaS product. We have made good progress towards this goal which allows us to share Grafana dashboards. We will be testing it selectively. Expect more updates in coming month on this.
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_1_72/index.html b/docs/managed-datahub/release-notes/v_0_1_72/index.html index a3c3cc787bba6..eab6ef233e1fb 100644 --- a/docs/managed-datahub/release-notes/v_0_1_72/index.html +++ b/docs/managed-datahub/release-notes/v_0_1_72/index.html @@ -8,13 +8,13 @@ - +

    v0.1.72


    Release Availability Date

    18 Jan 2023

    Release Changlog


    Special Notes


    • If anyone faces issues with login please clear your cookies. Some security updates are part of this release. That may cause login issues until cookies are cleared.
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_1_73/index.html b/docs/managed-datahub/release-notes/v_0_1_73/index.html index df3d829a91989..7d9de2e27ef95 100644 --- a/docs/managed-datahub/release-notes/v_0_1_73/index.html +++ b/docs/managed-datahub/release-notes/v_0_1_73/index.html @@ -8,13 +8,13 @@ - + - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_0/index.html b/docs/managed-datahub/release-notes/v_0_2_0/index.html index 865f4d800a972..43a8c8bf20c83 100644 --- a/docs/managed-datahub/release-notes/v_0_2_0/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_0/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    v0.2.0


    Release Availability Date

    09 Feb 2023

    Update Downtime

    During release installation the Elasticsearch indices will be reindex to improve search capabilities. While the upgrade is in progress DataHub will be set to a read-only mode. Once this operation is completed, the upgrade will proceed normally. Depending on index sizes and infrastructure this process can take 5 minutes to hours however as a rough estimate 1 hour for every 2.3 million entities.

    Release Changlog


    • Since v0.1.73 these changes from OSS DataHub https://github.com/datahub-project/datahub/compare/36afdec3946df2fb4166ac27a89b933ced87d00e...v0.10.0 have been pulled in
      • Improved documentation editor
      • Filter lineage graphs based on time windows
      • Improvements in Search
      • Metadata Ingestion
        • Redshift: You can now extract lineage information from unload queries
        • PowerBI: Ingestion now maps Workspaces to DataHub Containers
        • BigQuery: You can now extract lineage metadata from the Catalog
        • Glue: Ingestion now uses table name as the human-readable name
    • SSO Preferred Algorithm Setting
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_1/index.html b/docs/managed-datahub/release-notes/v_0_2_1/index.html index 685418caad62f..b061ac70d3a7c 100644 --- a/docs/managed-datahub/release-notes/v_0_2_1/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_1/index.html @@ -8,13 +8,13 @@ - +

    v0.2.1


    Release Availability Date

    23-Feb-2023

    Release Changlog


    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_10/index.html b/docs/managed-datahub/release-notes/v_0_2_10/index.html index a11d09d5297f7..f47924a3d3a1a 100644 --- a/docs/managed-datahub/release-notes/v_0_2_10/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_10/index.html @@ -8,13 +8,13 @@ - +

    v0.2.10


    Release Availability Date

    09-Aug-2023

    If you are using an older CLI/SDK version then please upgrade it. This applies for all CLI/SDK usages, if you are using it through your terminal, github actions, airflow, in python SDK somewhere, Java SKD etc. This is a strong recommendation to upgrade as we keep on pushing fixes in the CLI and it helps us support you better.

    Special Notes

    • We have a new search and browse experience. We cannot enable it unless all of your CLI/SDK usages are upgraded. If you are using a custom source then you need to upgrade your source to produce browsePathv2 aspects. Details are in this doc.
    • [Breaking change] For all sql-based sources that support profiling, you can no longer specify profile_table_level_only together with include_field_xyz config options to ingest certain column-level metrics. Instead, set profile_table_level_only to false and individually enable / disable desired field metrics.
    • [Breaking change] The bigquery-beta and snowflake-beta source aliases have been dropped. Use bigquery and snowflake as the source type instead.
    • [Behaviour change] Ingestion runs created with Pipeline.create will show up in the DataHub ingestion tab as CLI-based runs. To revert to the previous behavior of not showing these runs in DataHub, pass no_default_report=True.
    • [Behaviour change] snowflake connector will use user's email attribute as is, as the urn. To revert to previous behavior disable email_as_user_identifier in recipe.

    Release Changelog


    Some notable features in this SaaS release

    • New search and Browse v2 experience. This can only be enabled if you upgrade all your CLI/SDK usage as per our recommendation provided above.
    • We will be enabling these features selectively. If you are interested in trying it and providing feedback, please reach out to your Acryl Customer Success representative.
      • Acryl Observe Freshness Assertions available in private beta as shared here.
      • New notifications and Subscriptions feature available.
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_2/index.html b/docs/managed-datahub/release-notes/v_0_2_2/index.html index 81cde8aef9dd9..d54b1b90a98d1 100644 --- a/docs/managed-datahub/release-notes/v_0_2_2/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_2/index.html @@ -8,13 +8,13 @@ - +

    v0.2.2


    Release Availability Date

    01-Mar-2023

    Release Changelog


    • Since v0.2.1 no changes from OSS DataHub have been pulled in.
    • fix(lineage): fix filtering for Timeline Lineage, regression for Search Ingestion Summaries
    • fix(recommendations): recommendations now display on the homepage for recently viewed, searched, and most popular.
    • fix(analytics): chart smoothing and date range fixes
    • fix(search): case-sensitive exact match
    • fix(search): fix handling of 2 character search terms when not a prefix or exact match
    • fix(ingestion): fix ingestion run summary showing no results
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_3/index.html b/docs/managed-datahub/release-notes/v_0_2_3/index.html index 695044f0b4a31..8d41ebff304ca 100644 --- a/docs/managed-datahub/release-notes/v_0_2_3/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_3/index.html @@ -8,13 +8,13 @@ - +

    v0.2.3


    Release Availability Date

    14-Mar-2023

    Release Changelog


    • Since v0.2.2 no changes from OSS DataHub have been pulled in.
    • fix(mcl): only restate Lineage MCL's - This should help with some lag issues being seen
    • feat(proposals): Add ability to propose descriptions on datasets
    • Hotfix 2023 03 06 - Some Miscellaneous search improvements
    • fix(bootstrap): only ingest default metadata tests once - This should help with some deleted metadata tests re-appearing.
    • refactor(lineage): Fix & optimize getAndUpdatePaths - The impact should be a reduced page load time for the lineage-intensive entities
    • refactor(ui): Loading schema dynamically for datasets
    • fix(lineage): nullpointer exceptions - should fix some errors related to lineage search
    • chore(ci): add daylight savings timezone for tests, fix daylight saving bug in analytics charts - Should fix gaps in Monthly charts for people with daylight savings
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_4/index.html b/docs/managed-datahub/release-notes/v_0_2_4/index.html index a4cc09d734cfb..f8db0eed738de 100644 --- a/docs/managed-datahub/release-notes/v_0_2_4/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_4/index.html @@ -8,13 +8,13 @@ - +

    v0.2.4


    Release Availability Date

    24-Mar-2023

    Release Changelog


    • Since v0.2.3 no changes from OSS DataHub have been pulled in.
    • fix(ui) Safeguard ingestion execution request check - Fixes an error on frontend managed ingestion page
    • fix(impactAnalysis): fix filtering for lightning mode search
    • fix(search): fix tags with colons
    • refactor(incidents): Remove dataset health caching to make incident health instantly update
    • fix(ui): Address regression in column usage stats + add unit test
    • fix(timeBasedLineage): fix ingestProposal flow for no ops
    • feat(assertions + incidents): Support Querying Entities by Assertion / Incident Status + Chrome Embed Optimizations
    • fix(lineage): change default lineage time window to All Time
    • Truncate cache key for search lineage
    • feat(config): Add endpoint to exact search query information
    • fix(default policies): Add Manage Proposals Default Policies for Root User
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_5/index.html b/docs/managed-datahub/release-notes/v_0_2_5/index.html index d8ca8eeebfe2b..4868be2258c6d 100644 --- a/docs/managed-datahub/release-notes/v_0_2_5/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_5/index.html @@ -8,13 +8,13 @@ - +

    v0.2.5


    Release Availability Date

    11-Apr-2023

    Release Changelog


    • Since v0.2.4 these changes from OSS DataHub https://github.com/datahub-project/datahub/compare/2764c44977583d8a34a3425454e81a730b120829...294c5ff50789564dc836ca0cbcd8f7020756eb0a have been pulled in.
    • feat(graphql): Adding new offline features to dataset stats summary
    • feat(metadata tests): Further Metadata Tests Improvements (Prep for Uplift)
    • refactor(tests): Supporting soft-deleted Metadata Tests
    • feat(tests): Adding a high-quality set of Default Metadata Tests
    • refactor(tests): Uplift Metadata Tests UX
    • refactor(Tests): Metadata Tests Uplift: Adding Empty Tests state
    • refactor(Tests): Adding Test Results Modal
    • refactor(tests): Adding more default tests and tags
    • fix(graphQL): Add protection agaisnt optional null OwnershipTypes
    • fix(ui): Fix tags display name + color in UI for autocomplete, search preview, entity profile
    • fix(ui) Fix tags and terms columns on nested schema fields
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_6/index.html b/docs/managed-datahub/release-notes/v_0_2_6/index.html index a95d19b82cb6d..cc260ca2e5ee9 100644 --- a/docs/managed-datahub/release-notes/v_0_2_6/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_6/index.html @@ -8,13 +8,13 @@ - +

    v0.2.6


    Release Availability Date

    28-Apr-2023

    Release Changelog


    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_7/index.html b/docs/managed-datahub/release-notes/v_0_2_7/index.html index 62d36f2f307a7..34338c451441b 100644 --- a/docs/managed-datahub/release-notes/v_0_2_7/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_7/index.html @@ -8,13 +8,13 @@ - +

    v0.2.7


    Release Availability Date

    19-May-2023

    Release Changelog


    • Since v0.2.6 these changes from OSS DataHub https://github.com/datahub-project/datahub/compare/2bc0a781a63fd4aed50080ab453bcbd3ec0570bd...44406f7adf09674727e433c2136654cc21e79dd2 have been pulled in.
    • feat(observability): Extending Incidents Models for Observability
    • models(integrations + obs): Adding a Connection entity
    • feat(observability): Extending Assertions Models for Observability
    • feat(observability): Introducing Anomaly Models
    • feat(fastpath): pre-process updateIndicesHook for UI sourced updates
    • fix(metadataTests): change scroll to searchAfter based API
    • feat(observability): Assertions-Based Incidents Generator Hook
    • fix(notifications): fix double notifications issue
    • fix(tag): render tag name via properties
    • fix(jackson): add stream reader constraint with 16 MB limit
    • fix(metadataTests): gold tier metadata tests condition
    • fix(ingest/dbt): fix siblings resolution for sources
    • Some search fixes
    • fix(graphql) Fix autocomplete for views with un-searchable types
    • fix(ui) Allow users to be able to propose new terms/term groups from UI
    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_8/index.html b/docs/managed-datahub/release-notes/v_0_2_8/index.html index 49755acd0f0ab..36a9cb95fab42 100644 --- a/docs/managed-datahub/release-notes/v_0_2_8/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_8/index.html @@ -8,13 +8,13 @@ - +

    v0.2.8


    Release Availability Date

    07-June-2023

    Release Changelog


    • Since v0.2.7 these changes from OSS DataHub https://github.com/datahub-project/datahub/compare/a68833769e1fe1b27c22269971a51c63cc285c18...e7d1b900ec09cefca4e6ca979f391d3a17b473c9 have been pulled in.
    • feat(assertions): Extending Assertions READ GraphQL APIs for Observability
    • fix(embed): styling updates for chrome extension
    • feat: slack integrations service
    • feat(assertions): Extending Assertions WRITE GraphQL APIs for Observability
    • feat(contracts): Adding models for Data Contracts
    • feat(tests): prevent reprocessing of test sourced events
    • feat(tests): add parallelization for workloads on metadata tests
    • feat(observability): Monitor Models for Observability
    • fix(datahub-upgrade) fix while loop predicates for scrolling
    • fix(usage): Add resource spec for authenticated access where possible
    • feat(observability): Assertions-Based Anomalies Generator Hook
    • feat(observability): Adding the GraphQL Implementation for Monitor Entity
    • fix(restli): update base client retry logic

    Some notable features's documentation in this SaaS release

    - + \ No newline at end of file diff --git a/docs/managed-datahub/release-notes/v_0_2_9/index.html b/docs/managed-datahub/release-notes/v_0_2_9/index.html index feb8543128788..5efc4b5eff4fd 100644 --- a/docs/managed-datahub/release-notes/v_0_2_9/index.html +++ b/docs/managed-datahub/release-notes/v_0_2_9/index.html @@ -8,13 +8,13 @@ - +

    v0.2.9


    Release Availability Date

    28-June-2023

    If you are using an older CLI/SDK version then please upgrade it. This applies for all CLI/SDK usages, if you are using it through your terminal, github actions, airflow, in python SDK somewhere, Java SKD etc. This is a strong recommendation to upgrade as we keep on pushing fixes in the CLI and it helps us support you better.

    Special Notes

    • We have a new search and browse experience. We cannot enable it unless all of your CLI/SDK usages are upgraded. If you are using a custom source then you need to upgrade your source to produce browsePathv2 aspects.
    • [BREAKING CHANGE] If you are using our okta source to do ingestion then you MUST read this. Okta source config option okta_profile_to_username_attr default changed from login to email. This determines which Okta profile attribute is used for the corresponding DataHub user and thus may change what DataHub users are generated by the Okta source. And in a follow up okta_profile_to_username_regex has been set to .* which taken together with previous change brings the defaults in line with OIDC which is used for login via Okta.
    • [DEPRECATION] In the DataFlow class, the cluster argument is deprecated in favor of env.

    Release Changelog


    • Since v0.2.8 these changes from OSS DataHub https://github.com/datahub-project/datahub/compare/e7d1b900ec09cefca4e6ca979f391d3a17b473c9...1f0723fad109658a69bb1d4279100de8514f35d7 have been pulled in.
    • fix(tests): Fixing pagination on Metadata Test results
    • fix(assertions): fix assertion actions hook npe
    • fix(notifications): Fixing duplicate ingestion started notifications
    • feat(slack integrations): Existing component changes required for revised Slack integration
    • fix(proposals): fixing propose glossary term description and adding tests
    • fix(search): populate scroll ID properly for other scroll usages
    • fix(metadata test icon): hide metadata test pass/fail icon on entity header

    These changes are for an upcoming feature which we have not enabled yet. We are putting it here for transparency purposes. Acryl team will reach out once we start the rollout of our observability features.

    • feat(incidents): Extending Incidents GraphQL APIs for Observability
    • feat(anomalies): Adding Anomalies READ GraphQL APIs for Observability
    • feat(observability): Minor models and graphql improvements
    • feat(observability): UI for creating Dataset SLA Assertions
    • feat(observability): Adding support for patching monitor info aspect
    • feat(observability): Adding GraphQL APIs for enabling / disabling System Monitors
    • feat(observability): DataHub Monitors Service
    • feat(observability): display assertion externalUrl if available

    Some notable features in this SaaS release

    • New search and Browse v2 experience. This can only be enabled if you upgrade all your CLI/SDK usage as per our recommendation provided above.
    • Patch support for dataJobInputOutput as described here
    - + \ No newline at end of file diff --git a/docs/managed-datahub/saas-slack-setup/index.html b/docs/managed-datahub/saas-slack-setup/index.html index 7203afafcb220..b651f4654e799 100644 --- a/docs/managed-datahub/saas-slack-setup/index.html +++ b/docs/managed-datahub/saas-slack-setup/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ To enable or disable a specific notification type in Slack, simply click the check mark. By default, all notification types are enabled. To customize the channel where notifications are send, click the button to the right of the check box.

    If provided, a custom channel will be used to route notifications of the given type. If not provided, the default channel will be used. That's it! You should begin to receive notifications on Slack. Note that it may take up to 1 minute for notification settings to take effect after saving.

    Sending Notifications

    For now we support sending notifications to

    • Slack Channel ID (e.g. C029A3M079U)
    • Slack Channel Name (e.g. #troubleshoot)
    • Specific Users (aka Direct Messages or DMs) via user ID

    How to find Team ID and Channel ID in Slack

    • Go to the Slack channel for which you want to get channel ID
    • Check the URL e.g. for the troubleshoot channel in OSS DataHub slack

    • Notice TUMKD5EGJ/C029A3M079U in the URL
      • Team ID = TUMKD5EGJ from above
      • Channel ID = C029A3M079U from above

    How to find User ID in Slack

    • Go to user DM
    • Click on their profile picture
    • Click on View Full Profile
    • Click on “More”
    • Click on “Copy member ID”

    - + \ No newline at end of file diff --git a/docs/managed-datahub/welcome-acryl/index.html b/docs/managed-datahub/welcome-acryl/index.html index 6d444f5b20811..3b5546ffec8b5 100644 --- a/docs/managed-datahub/welcome-acryl/index.html +++ b/docs/managed-datahub/welcome-acryl/index.html @@ -8,13 +8,13 @@ - +

    Getting Started with Acryl DataHub

    Welcome to the Acryl DataHub! We at Acryl are on a mission to make data reliable by bringing clarity to the who, what, when, & how of your data ecosystem. We're thrilled to be on this journey with you; and cannot wait to see what we build together!

    Close communication is not only welcomed, but highly encouraged. For all questions, concerns, & feedback, please reach out to us directly at support@acryl.io.

    Prerequisites

    Before you go further, you'll need to have a DataHub instance provisioned. The Acryl integrations team will provide you the following once it has been deployed:

    1. The URL for your Acryl instance (https://your-domain-name.acryl.io)
    2. Admin account credentials for logging into the DataHub UI

    Once you have these, you're ready to go.

    info

    If you wish to have a private connection to your DataHub instance, Acryl supports AWS PrivateLink to complete this connection to your existing AWS account. Please see more details here.

    Logging In

    Acryl DataHub currently supports the following means to log into a DataHub instance:

    1. Admin account: With each deployment of DataHub comes a master admin account. It has a randomly generated password that can be accessed by reaching out to Acryl Integrations team (support@acryl.io). To log in with an admin account, navigate to https://your-domain.acryl.io/login
    2. OIDC: Acryl DataHub also supports OIDC integration with the Identity Provider of your choice (Okta, Google, etc). To set this up, Acryl integrations team will require the following:
    3. Client ID - A unique identifier for your application with the identity provider
    4. Client Secret - A shared secret to use for exchange between you and your identity provider. To send this over securely, we recommend using onetimesecret.com to create a link.
    5. Discovery URL - A URL where the OIDC API of your identity provider can be discovered. This should suffixed by .well-known/openid-configuration. Sometimes, identity providers will not explicitly include this URL in their setup guides, though this endpoint will exist as per the OIDC specification. For more info see here.

    The callback URL to register in your Identity Provider will be

    https://your-acryl-domain.acryl.io/callback/oidc 

    Note that we do not yet support LDAP or SAML authentication. Please let us know if either of these integrations would be useful for your organization.

    Getting Started

    Acryl DataHub is first and foremost a metadata Search & Discovery product. As such, the two most important parts of the experience are

    1. Ingesting metadata
    2. Discovering metadata

    Ingesting Metadata

    Acryl DataHub employs a push-based metadata ingestion model. In practice, this means running an Acryl-provided agent inside your organization's infrastructure, and pushing that data out to your DataHub instance in the cloud. One benefit of this approach is that metadata can be aggregated across any number of distributed sources, regardless of form or location.

    This approach comes with another benefit: security. By managing your own instance of the agent, you can keep the secrets and credentials within your walled garden. Skip uploading secrets & keys into a third-party cloud tool.

    To push metadata into DataHub, Acryl provide's an ingestion framework written in Python. Typically, push jobs are run on a schedule at an interval of your choosing. For our step-by-step guide on ingestion, click here.

    Discovering Metadata

    There are 2 primary ways to find metadata: search and browse. Both can be accessed via the DataHub home page.

    By default, we provide rich search capabilities across your ingested metadata. This includes the ability to search by tags, descriptions, column names, column descriptions, and more using the global search bar found on the home page.

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion-modules/airflow-plugin/index.html b/docs/metadata-ingestion-modules/airflow-plugin/index.html index b466fbb0b57b5..c9fafd5854346 100644 --- a/docs/metadata-ingestion-modules/airflow-plugin/index.html +++ b/docs/metadata-ingestion-modules/airflow-plugin/index.html @@ -8,13 +8,13 @@ - +
    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/adding-source/index.html b/docs/metadata-ingestion/adding-source/index.html index 72c09d148c6d9..496bd29c05364 100644 --- a/docs/metadata-ingestion/adding-source/index.html +++ b/docs/metadata-ingestion/adding-source/index.html @@ -8,7 +8,7 @@ - + @@ -27,7 +27,7 @@ listed when running datahub check plugins, and sets up the source's shortened alias for use in recipes.

    6. Write tests

    Tests go in the tests directory. We use the pytest framework.

    7. Write docs

    7.1 Set up the source class for automatic documentation

    • Indicate the platform name that this source class produces metadata for using the @platform_name decorator. We prefer using the human-readable platform name, so e.g. BigQuery (not bigquery).
    • Indicate the config class being used by the source by using the @config_class decorator.
    • Indicate the support status of the connector by using the @support_status decorator.
    • Indicate what capabilities the connector supports (and what important capabilities it does NOT support) by using the @capability decorator.
    • Add rich documentation for the connector by utilizing docstrings on your Python class. Markdown is supported.

    See below a simple example of how to do this for any source.


    from datahub.ingestion.api.decorators import (
    SourceCapability,
    SupportStatus,
    capability,
    config_class,
    platform_name,
    support_status,
    )

    @platform_name("File")
    @support_status(SupportStatus.CERTIFIED)
    @config_class(FileSourceConfig)
    @capability(
    SourceCapability.PLATFORM_INSTANCE,
    "File based ingestion does not support platform instances",
    supported=False,
    )
    @capability(SourceCapability.DOMAINS, "Enabled by default")
    @capability(SourceCapability.DATA_PROFILING, "Optionally enabled via configuration")
    @capability(SourceCapability.DESCRIPTIONS, "Enabled by default")
    @capability(SourceCapability.LINEAGE_COARSE, "Enabled by default")
    class FileSource(Source):
    """

    The File Source can be used to produce all kinds of metadata from a generic metadata events file.
    :::note
    Events in this file can be in MCE form or MCP form.
    :::

    """

    ... source code goes here

    7.2 Write custom documentation

    • Create a copy of source-docs-template.md and edit all relevant components.
    • Name the document as <plugin.md> and move it to metadata-ingestion/docs/sources/<platform>/<plugin>.md. For example for the Kafka platform, under the kafka plugin, move the document to metadata-ingestion/docs/sources/kafka/kafka.md.
    • Add a quickstart recipe corresponding to the plugin under metadata-ingestion/docs/sources/<platform>/<plugin>_recipe.yml. For example, for the Kafka platform, under the kafka plugin, there is a quickstart recipe located at metadata-ingestion/docs/sources/kafka/kafka_recipe.yml.
    • To write platform-specific documentation (that is cross-plugin), write the documentation under metadata-ingestion/docs/sources/<platform>/README.md. For example, cross-plugin documentation for the BigQuery platform is located under metadata-ingestion/docs/sources/bigquery/README.md.

    7.3 Viewing the Documentation

    Documentation for the source can be viewed by running the documentation generator from the docs-website module.

    Step 1: Build the Ingestion docs
    # From the root of DataHub repo
    ./gradlew :metadata-ingestion:docGen

    If this finishes successfully, you will see output messages like:

    Ingestion Documentation Generation Complete
    ############################################
    {
    "source_platforms": {
    "discovered": 40,
    "generated": 40
    },
    "plugins": {
    "discovered": 47,
    "generated": 47,
    "failed": 0
    }
    }
    ############################################

    You can also find documentation files generated at ./docs/generated/ingestion/sources relative to the root of the DataHub repo. You should be able to locate your specific source's markdown file here and investigate it to make sure things look as expected.

    Step 2: Build the Entire Documentation

    To view how this documentation looks in the browser, there is one more step. Just build the entire docusaurus page from the docs-website module.

    # From the root of DataHub repo
    ./gradlew :docs-website:build

    This will generate messages like:

    ...
    > Task :docs-website:yarnGenerate
    yarn run v1.22.0
    $ rm -rf genDocs/* && ts-node -O '{ "lib": ["es2020"], "target": "es6" }' generateDocsDir.ts && mv -v docs/* genDocs/
    Including untracked files in docs list:
    docs/graphql -> genDocs/graphql
    Done in 2.47s.

    > Task :docs-website:yarnBuild
    yarn run v1.22.0
    $ docusaurus build

    ╭──────────────────────────────────────────────────────────────────────────────╮│ ││ Update available 2.0.0-beta.8 → 2.0.0-beta.18 ││ ││ To upgrade Docusaurus packages with the latest version, run the ││ following command: ││ yarn upgrade @docusaurus/core@latest ││ @docusaurus/plugin-ideal-image@latest @docusaurus/preset-classic@latest ││ │╰──────────────────────────────────────────────────────────────────────────────╯


    [en] Creating an optimized production build...
    Invalid docusaurus-plugin-ideal-image version 2.0.0-beta.7.
    All official @docusaurus/* packages should have the exact same version as @docusaurus/core (2.0.0-beta.8).
    Maybe you want to check, or regenerate your yarn.lock or package-lock.json file?
    Browserslist: caniuse-lite is outdated. Please run:
    npx browserslist@latest --update-db
    Why you should do it regularly: https://github.com/browserslist/browserslist#browsers-data-updating
    ℹ Compiling Client
    ℹ Compiling Server
    ✔ Client: Compiled successfully in 1.95s
    ✔ Server: Compiled successfully in 7.52s
    Success! Generated static files in "build".

    Use `npm run serve` command to test your build locally.

    Done in 11.59s.

    Deprecated Gradle features were used in this build, making it incompatible with Gradle 7.0.
    Use '--warning-mode all' to show the individual deprecation warnings.
    See https://docs.gradle.org/6.9.2/userguide/command_line_interface.html#sec:command_line_warnings

    BUILD SUCCESSFUL in 35s
    36 actionable tasks: 16 executed, 20 up-to-date

    After this you need to run the following script from the docs-website module.

    cd docs-website
    npm run serve

    Now, browse to http://localhost:3000 or whichever port npm is running on, to browse the docs. Your source should show up on the left sidebar under Metadata Ingestion / Sources.

    8. Add SQL Alchemy mapping (if applicable)

    Add the source in get_platform_from_sqlalchemy_uri function in sql_common.py if the source has an sqlalchemy source

    9. Add logo for the platform

    Add the logo image in images folder and add it to be ingested at startup

    10. Update Frontend for UI-based ingestion

    We are currently transitioning to a more dynamic approach to display available sources for UI-based Managed Ingestion. For the time being, adhere to these next steps to get your source to display in the UI Ingestion tab.

    10.1 Add to sources.json

    Add new source to the list in sources.json including a default quickstart recipe. This will render your source in the list of options when creating a new recipe in the UI.

    10.2 Add logo to the React app

    Add your source logo to the React images folder so your image is available in memory.

    10.3 Update constants.ts

    Create new constants in constants.ts for the source urn and source name. Update PLATFORM_URN_TO_LOGO to map your source urn to the newly added logo in the images folder.

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/as-a-library/index.html b/docs/metadata-ingestion/as-a-library/index.html index 4090cfe0affd3..13b02f251d63a 100644 --- a/docs/metadata-ingestion/as-a-library/index.html +++ b/docs/metadata-ingestion/as-a-library/index.html @@ -8,14 +8,14 @@ - +

    Python Emitter

    In some cases, you might want to construct Metadata events directly and use programmatic ways to emit that metadata to DataHub. Use-cases are typically push-based and include emitting metadata events from CI/CD pipelines, custom orchestrators etc.

    The acryl-datahub Python package offers REST and Kafka emitter API-s, which can easily be imported and called from your own code.

    Pro Tip! Throughout our API guides, we have examples of using Python API SDK. Lookout for the | Python | tab within our tutorials.

    Installation

    Follow the installation guide for the main acryl-datahub package here. Read on for emitter specific installation instructions.

    REST Emitter

    The REST emitter is a thin wrapper on top of the requests module and offers a blocking interface for sending metadata events over HTTP. Use this when simplicity and acknowledgement of metadata being persisted to DataHub's metadata store is more important than throughput of metadata emission. Also use this when read-after-write scenarios exist, e.g. writing metadata and then immediately reading it back.

    Installation

    pip install -U `acryl-datahub[datahub-rest]`

    Example Usage

    import datahub.emitter.mce_builder as builder
    from datahub.emitter.mcp import MetadataChangeProposalWrapper
    from datahub.metadata.schema_classes import DatasetPropertiesClass

    from datahub.emitter.rest_emitter import DatahubRestEmitter

    # Create an emitter to DataHub over REST
    emitter = DatahubRestEmitter(gms_server="http://localhost:8080", extra_headers={})

    # Test the connection
    emitter.test_connection()

    # Construct a dataset properties object
    dataset_properties = DatasetPropertiesClass(description="This table stored the canonical User profile",
    customProperties={
    "governance": "ENABLED"
    })

    # Construct a MetadataChangeProposalWrapper object.
    metadata_event = MetadataChangeProposalWrapper(
    entityUrn=builder.make_dataset_urn("bigquery", "my-project.my-dataset.user-table"),
    aspect=dataset_properties,
    )

    # Emit metadata! This is a blocking call
    emitter.emit(metadata_event)

    Other examples:

    Emitter Code

    If you're interested in looking at the REST emitter code, it is available here

    Kafka Emitter

    The Kafka emitter is a thin wrapper on top of the SerializingProducer class from confluent-kafka and offers a non-blocking interface for sending metadata events to DataHub. Use this when you want to decouple your metadata producer from the uptime of your datahub metadata server by utilizing Kafka as a highly available message bus. For example, if your DataHub metadata service is down due to planned or unplanned outages, you can still continue to collect metadata from your mission critical systems by sending it to Kafka. Also use this emitter when throughput of metadata emission is more important than acknowledgement of metadata being persisted to DataHub's backend store.

    Note: The Kafka emitter uses Avro to serialize the Metadata events to Kafka. Changing the serializer will result in unprocessable events as DataHub currently expects the metadata events over Kafka to be serialized in Avro.

    Installation

    # For emission over Kafka
    pip install -U `acryl-datahub[datahub-kafka]`

    Example Usage

    import datahub.emitter.mce_builder as builder
    from datahub.emitter.mcp import MetadataChangeProposalWrapper
    from datahub.metadata.schema_classes import DatasetPropertiesClass

    from datahub.emitter.kafka_emitter import DatahubKafkaEmitter, KafkaEmitterConfig
    # Create an emitter to Kafka
    kafka_config = {
    "connection": {
    "bootstrap": "localhost:9092",
    "schema_registry_url": "http://localhost:8081",
    "schema_registry_config": {}, # schema_registry configs passed to underlying schema registry client
    "producer_config": {}, # extra producer configs passed to underlying kafka producer
    }
    }

    emitter = DatahubKafkaEmitter(
    KafkaEmitterConfig.parse_obj(kafka_config)
    )

    # Construct a dataset properties object
    dataset_properties = DatasetPropertiesClass(description="This table stored the canonical User profile",
    customProperties={
    "governance": "ENABLED"
    })

    # Construct a MetadataChangeProposalWrapper object.
    metadata_event = MetadataChangeProposalWrapper(
    entityUrn=builder.make_dataset_urn("bigquery", "my-project.my-dataset.user-table"),
    aspect=dataset_properties,
    )


    # Emit metadata! This is a non-blocking call
    emitter.emit(
    metadata_event,
    callback=lambda exc, message: print(f"Message sent to topic:{message.topic()}, partition:{message.partition()}, offset:{message.offset()}") if message else print(f"Failed to send with: {exc}")
    )

    #Send all pending events
    emitter.flush()

    Emitter Code

    If you're interested in looking at the Kafka emitter code, it is available here

    Other Languages

    Emitter API-s are also supported for:

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/developing/index.html b/docs/metadata-ingestion/developing/index.html index 8d7bc710b2155..be9cbcaf59d07 100644 --- a/docs/metadata-ingestion/developing/index.html +++ b/docs/metadata-ingestion/developing/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    Developing on Metadata Ingestion

    If you just want to use metadata ingestion, check the user-centric guide. This document is for developers who want to develop and possibly contribute to the metadata ingestion framework.

    Also take a look at the guide to adding a source.

    Getting Started

    Requirements

    1. Python 3.7+ must be installed in your host environment.
    2. Java8 (gradle won't work with newer versions)
    3. On MacOS: brew install librdkafka
    4. On Debian/Ubuntu: sudo apt install librdkafka-dev python3-dev python3-venv
    5. On Fedora (if using LDAP source integration): sudo yum install openldap-devel

    Set up your Python environment

    From the repository root:

    cd metadata-ingestion
    ../gradlew :metadata-ingestion:installDev
    source venv/bin/activate
    datahub version # should print "DataHub CLI version: unavailable (installed in develop mode)"

    Common setup issues

    Common issues (click to expand):

    datahub command not found with PyPI install

    If you've already run the pip install, but running datahub in your command line doesn't work, then there is likely an issue with your PATH setup and Python.

    The easiest way to circumvent this is to install and run via Python, and use python3 -m datahub in place of datahub.

    python3 -m pip install --upgrade acryl-datahub
    python3 -m datahub --help
    Wheel issues e.g. "Failed building wheel for avro-python3" or "error: invalid command 'bdist_wheel'"

    This means Python's wheel is not installed. Try running the following commands and then retry.

    pip install --upgrade pip wheel setuptools
    pip cache purge
    Failure to install confluent_kafka: "error: command 'x86_64-linux-gnu-gcc' failed with exit status 1"

    This sometimes happens if there's a version mismatch between the Kafka's C library and the Python wrapper library. Try running pip install confluent_kafka==1.5.0 and then retrying.

    Using Plugins in Development

    The syntax for installing plugins is slightly different in development. For example:

    - pip install 'acryl-datahub[bigquery,datahub-rest]'
    + pip install -e '.[bigquery,datahub-rest]'

    Architecture

    metadata ingestion framework layout

    The architecture of this metadata ingestion framework is heavily inspired by Apache Gobblin (also originally a LinkedIn project!). We have a standardized format - the MetadataChangeEvent - and sources and sinks which respectively produce and consume these objects. The sources pull metadata from a variety of data systems, while the sinks are primarily for moving this metadata into DataHub.

    Code layout

    • The CLI interface is defined in entrypoints.py and in the cli directory.
    • The high level interfaces are defined in the API directory.
    • The actual sources and sinks have their own directories. The registry files in those directories import the implementations.
    • The metadata models are created using code generation, and eventually live in the ./src/datahub/metadata directory. However, these files are not checked in and instead are generated at build time. See the codegen script for details.
    • Tests live in the tests directory. They're split between smaller unit tests and larger integration tests.

    Code style

    We use black, isort, flake8, and mypy to ensure consistent code style and quality.

    # Assumes: pip install -e '.[dev]' and venv is activated
    black src/ tests/
    isort src/ tests/
    flake8 src/ tests/
    mypy src/ tests/

    or you can run from root of the repository

    ./gradlew :metadata-ingestion:lintFix

    Some other notes:

    • Prefer mixin classes over tall inheritance hierarchies.
    • Write type annotations wherever possible.
    • Use typing.Protocol to make implicit interfaces explicit.
    • If you ever find yourself copying and pasting large chunks of code, there's probably a better way to do it.
    • Prefer a standalone helper method over a @staticmethod.
    • You probably should not be defining a __hash__ method yourself. Using @dataclass(frozen=True) is a good way to get a hashable class.
    • Avoid global state. In sources, this includes instance variables that effectively function as "global" state for the source.
    • Avoid defining functions within other functions. This makes it harder to read and test the code.
    • When interacting with external APIs, parse the responses into a dataclass rather than operating directly on the response object.

    Dependency Management

    The vast majority of our dependencies are not required by the "core" package but instead can be optionally installed using Python "extras". This allows us to keep the core package lightweight. We should be deliberate about adding new dependencies to the core framework.

    Where possible, we should avoid pinning version dependencies. The acryl-datahub package is frequently used as a library and hence installed alongside other tools. If you need to restrict the version of a dependency, use a range like >=1.2.3,<2.0.0 or a negative constraint like >=1.2.3, !=1.2.7 instead. Every upper bound and negative constraint should be accompanied by a comment explaining why it's necessary.

    Caveat: Some packages like Great Expectations and Airflow frequently make breaking changes. For such packages, it's ok to add a "defensive" upper bound with the current latest version, accompanied by a comment. It's critical that we revisit these upper bounds at least once a month and broaden them if possible.

    Guidelines for Ingestion Configs

    We use pydantic to define the ingestion configs. In order to ensure that the configs are consistent and easy to use, we have a few guidelines:

    Naming

    • Most important point: we should match the terminology of the source system. For example, snowflake shouldn’t have a host_port, it should have an account_id.
    • We should prefer slightly more verbose names when the alternative isn’t descriptive enough. For example client_id or tenant_id over a bare id and access_secret over a bare secret.
    • AllowDenyPatterns should be used whenever we need to filter a list. The pattern should always apply to the fully qualified name of the entity. These configs should be named *_pattern, for example table_pattern.
    • Avoid *_only configs like profile_table_level_only in favor of profile_table_level and profile_column_level. include_tables and include_views are a good example.

    Content

    • All configs should have a description.
    • When using inheritance or mixin classes, make sure that the fields and documentation is applicable in the base class. The bigquery_temp_table_schema field definitely shouldn’t be showing up in every single source’s profiling config!
    • Set reasonable defaults!
      • The configs should not contain a default that you’d reasonably expect to be built in. As a bad example, the Postgres source’s schema_pattern has a default deny pattern containing information_schema. This means that if the user overrides the schema_pattern, they’ll need to manually add the information_schema to their deny patterns. This is a bad, and the filtering should’ve been handled automatically by the source’s implementation, not added at runtime by its config.

    Coding

    • Use a single pydantic validator per thing to validate - we shouldn’t have validation methods that are 50 lines long.
    • Use SecretStr for passwords, auth tokens, etc.
    • When doing simple field renames, use the pydantic_renamed_field helper.
    • When doing field deprecations, use the pydantic_removed_field helper.
    • Validator methods must only throw ValueError, TypeError, or AssertionError. Do not throw ConfigurationError from validators.
    • Set hidden_from_docs for internal-only config flags. However, needing this often indicates a larger problem with the code structure. The hidden field should probably be a class attribute or an instance variable on the corresponding source.

    Testing

    # Follow standard install from source procedure - see above.

    # Install, including all dev requirements.
    pip install -e '.[dev]'

    # For running integration tests, you can use
    pip install -e '.[integration-tests]'

    # Run the full testing suite
    pytest -vv

    # Run unit tests.
    pytest -m 'not integration and not slow_integration'

    # Run Docker-based integration tests.
    pytest -m 'integration'

    # Run Docker-based slow integration tests.
    pytest -m 'slow_integration'

    # You can also run these steps via the gradle build:
    ../gradlew :metadata-ingestion:lint
    ../gradlew :metadata-ingestion:lintFix
    ../gradlew :metadata-ingestion:testQuick
    ../gradlew :metadata-ingestion:testFull
    ../gradlew :metadata-ingestion:check
    # Run all tests in a single file
    ../gradlew :metadata-ingestion:testSingle -PtestFile=tests/unit/test_airflow.py
    # Run all tests under tests/unit
    ../gradlew :metadata-ingestion:testSingle -PtestFile=tests/unit

    Updating golden test files

    If you made some changes that require generating new "golden" data files for use in testing a specific ingestion source, you can run the following to re-generate them:

    pytest tests/integration/<source>/<source>.py --update-golden-files

    For example,

    pytest tests/integration/dbt/test_dbt.py --update-golden-files
    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/docs/dev_guides/add_stateful_ingestion_to_source/index.html b/docs/metadata-ingestion/docs/dev_guides/add_stateful_ingestion_to_source/index.html index 7148e69828729..96ef93883c1f7 100644 --- a/docs/metadata-ingestion/docs/dev_guides/add_stateful_ingestion_to_source/index.html +++ b/docs/metadata-ingestion/docs/dev_guides/add_stateful_ingestion_to_source/index.html @@ -8,7 +8,7 @@ - + @@ -30,7 +30,7 @@ Examples:

    1. Snowflake Usage
    @dataclass
    class SnowflakeUsageReport(BaseSnowflakeReport, StatefulIngestionReport):
    # <members specific to snowflake usage report>

    3. Modifying the Source

    The source must inherit from StatefulIngestionSourceBase.

    3.1 Instantiate RedundantRunSkipHandler in the __init__ method of the source.

    The source should instantiate an instance of the RedundantRunSkipHandler in its __init__ method. Examples: Snowflake Usage

    from datahub.ingestion.source.state.redundant_run_skip_handler import (
    RedundantRunSkipHandler,
    )
    class SnowflakeUsageSource(StatefulIngestionSourceBase):

    def __init__(self, config: SnowflakeUsageConfig, ctx: PipelineContext):
    super(SnowflakeUsageSource, self).__init__(config, ctx)
    self.config: SnowflakeUsageConfig = config
    self.report: SnowflakeUsageReport = SnowflakeUsageReport()
    # Create and register the stateful ingestion use-case handlers.
    self.redundant_run_skip_handler = RedundantRunSkipHandler(
    source=self,
    config=self.config,
    pipeline_name=self.ctx.pipeline_name,
    run_id=self.ctx.run_id,
    )

    3.2 Checking if the current run should be skipped.

    The sources can query if the current run should be skipped using should_skip_this_run method of RedundantRunSkipHandler. This should done from the get_workunits method, before doing any other work.

    Example code:

    def get_workunits(self) -> Iterable[MetadataWorkUnit]:
    # Skip a redundant run
    if self.redundant_run_skip_handler.should_skip_this_run(
    cur_start_time_millis=datetime_to_ts_millis(self.config.start_time)
    ):
    return
    # Generate the workunits.

    3.3 Updating the state for the current run.

    The source should use the update_state method of RedundantRunSkipHandler to update the current run's state if the run has not been skipped. This step can be performed in the get_workunits if the run has not been skipped.

    Example code:

        def get_workunits(self) -> Iterable[MetadataWorkUnit]:
    # Skip a redundant run
    if self.redundant_run_skip_handler.should_skip_this_run(
    cur_start_time_millis=datetime_to_ts_millis(self.config.start_time)
    ):
    return

    # Generate the workunits.
    # <code for generating the workunits>
    # Update checkpoint state for this run.
    self.redundant_run_skip_handler.update_state(
    start_time_millis=datetime_to_ts_millis(self.config.start_time),
    end_time_millis=datetime_to_ts_millis(self.config.end_time),
    )
    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/docs/dev_guides/classification/index.html b/docs/metadata-ingestion/docs/dev_guides/classification/index.html index 18478aad8c1b2..b319fc103d9a0 100644 --- a/docs/metadata-ingestion/docs/dev_guides/classification/index.html +++ b/docs/metadata-ingestion/docs/dev_guides/classification/index.html @@ -8,13 +8,13 @@ - +

    Classification

    The classification feature enables sources to be configured to automatically predict info types for columns and use them as glossary terms. This is an explicit opt-in feature and is not enabled by default.

    Config details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldRequiredTypeDescriptionDefault
    enabledbooleanWhether classification should be used to auto-detect glossary termsFalse
    sample_sizeintNumber of sample values used for classification.100
    max_workersintNumber of worker threads to use for classification. Set to 1 to disable.Number of cpu cores or 4
    info_type_to_termDict[str,string]Optional mapping to provide glossary term identifier for info type.By default, info type is used as glossary term identifier.
    classifiersArray of objectClassifiers to use to auto-detect glossary terms. If more than one classifier, infotype predictions from the classifier defined later in sequence take precedance.[{'type': 'datahub', 'config': None}]
    table_patternAllowDenyPattern (see below for fields)Regex patterns to filter tables for classification. This is used in combination with other patterns in parent config. Specify regex to match the entire table name in database.schema.table format. e.g. to match all tables starting with customer in Customer database and public schema, use the regex 'Customer.public.customer.*'{'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    table_pattern.allowArray of stringList of regex patterns to include in ingestion['.*']
    table_pattern.denyArray of stringList of regex patterns to exclude from ingestion.[]
    table_pattern.ignoreCasebooleanWhether to ignore case sensitivity during pattern matching.True
    column_patternAllowDenyPattern (see below for fields)Regex patterns to filter columns for classification. This is used in combination with other patterns in parent config. Specify regex to match the column name in database.schema.table.column format.{'allow': ['.*'], 'deny': [], 'ignoreCase': True}
    column_pattern.allowArray of stringList of regex patterns to include in ingestion['.*']
    column_pattern.denyArray of stringList of regex patterns to exclude from ingestion.[]
    column_pattern.ignoreCasebooleanWhether to ignore case sensitivity during pattern matching.True

    DataHub Classifier

    DataHub Classifier is the default classifier implementation, which uses acryl-datahub-classify library to predict info types.

    Config Details

    FieldRequiredTypeDescriptionDefault
    confidence_level_thresholdnumber0.68
    info_typeslist[string]List of infotypes to be predicted. By default, all supported infotypes are considered, along with any custom infotypes configured in info_types_config.None
    info_types_configConfiguration details for infotypesDict[str, InfoTypeConfig]See reference_input.py for default configuration.
    info_types_config.key.prediction_factors_and_weights❓ (required if info_types_config.key is set)Dict[str,number]Factors and their weights to consider when predicting info types
    info_types_config.key.nameNameFactorConfig (see below for fields)
    info_types_config.key.name.regexArray of stringList of regex patterns the column name follows for the info type['.*']
    info_types_config.key.descriptionDescriptionFactorConfig (see below for fields)
    info_types_config.key.description.regexArray of stringList of regex patterns the column description follows for the info type['.*']
    info_types_config.key.datatypeDataTypeFactorConfig (see below for fields)
    info_types_config.key.datatype.typeArray of stringList of data types for the info type['.*']
    info_types_config.key.valuesValuesFactorConfig (see below for fields)
    info_types_config.key.values.prediction_type❓ (required if info_types_config.key.values is set)stringNone
    info_types_config.key.values.regexArray of stringList of regex patterns the column value follows for the info typeNone
    info_types_config.key.values.libraryArray of stringLibrary used for predictionNone
    minimum_values_thresholdnumberMinimum number of non-null column values required to process values prediction factor.50

    Supported infotypes

    • Email_Address
    • Gender
    • Credit_Debit_Card_Number
    • Phone_Number
    • Street_Address
    • Full_Name
    • Age
    • IBAN
    • US_Social_Security_Number
    • Vehicle_Identification_Number
    • IP_Address_v4
    • IP_Address_v6
    • US_Driving_License_Number
    • Swift_Code

    Supported sources

    • snowflake

    Example

    source:
    type: snowflake
    config:
    env: PROD
    # Coordinates
    account_id: account_name
    warehouse: "COMPUTE_WH"

    # Credentials
    username: user
    password: pass
    role: "sysadmin"

    # Options
    top_n_queries: 10
    email_domain: mycompany.com

    classification:
    enabled: True
    classifiers:
    - type: datahub

    Example with Advanced Configuration: Customizing configuration for supported info types

    source:
    type: snowflake
    config:
    env: PROD
    # Coordinates
    account_id: account_name
    warehouse: "COMPUTE_WH"

    # Credentials
    username: user
    password: pass
    role: "sysadmin"

    # Options
    top_n_queries: 10
    email_domain: mycompany.com

    classification:
    enabled: True
    info_type_to_term:
    Email_Address: "Email"
    classifiers:
    - type: datahub
    config:
    confidence_level_threshold: 0.7
    info_types_config:
    Email_Address:
    prediction_factors_and_weights:
    name: 0.4
    description: 0
    datatype: 0
    values: 0.6
    name:
    regex:
    - "^.*mail.*id.*$"
    - "^.*id.*mail.*$"
    - "^.*mail.*add.*$"
    - "^.*add.*mail.*$"
    - email
    - mail
    description:
    regex:
    - "^.*mail.*id.*$"
    - "^.*mail.*add.*$"
    - email
    - mail
    datatype:
    type:
    - str
    values:
    prediction_type: regex
    regex:
    - "[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\\.[A-Z|a-z]{2,}"
    library: []
    Gender:
    prediction_factors_and_weights:
    name: 0.4
    description: 0
    datatype: 0
    values: 0.6
    name:
    regex:
    - "^.*gender.*$"
    - "^.*sex.*$"
    - gender
    - sex
    description:
    regex:
    - "^.*gender.*$"
    - "^.*sex.*$"
    - gender
    - sex
    datatype:
    type:
    - int
    - str
    values:
    prediction_type: regex
    regex:
    - male
    - female
    - man
    - woman
    - m
    - f
    - w
    - men
    - women
    library: []
    Credit_Debit_Card_Number:
    prediction_factors_and_weights:
    name: 0.4
    description: 0
    datatype: 0
    values: 0.6
    name:
    regex:
    - "^.*card.*number.*$"
    - "^.*number.*card.*$"
    - "^.*credit.*card.*$"
    - "^.*debit.*card.*$"
    description:
    regex:
    - "^.*card.*number.*$"
    - "^.*number.*card.*$"
    - "^.*credit.*card.*$"
    - "^.*debit.*card.*$"
    datatype:
    type:
    - str
    - int
    values:
    prediction_type: regex
    regex:
    - "^4[0-9]{12}(?:[0-9]{3})?$"
    - "^(?:5[1-5][0-9]{2}|222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}$"
    - "^3[47][0-9]{13}$"
    - "^3(?:0[0-5]|[68][0-9])[0-9]{11}$"
    - "^6(?:011|5[0-9]{2})[0-9]{12}$"
    - "^(?:2131|1800|35\\d{3})\\d{11}$"
    - "^(6541|6556)[0-9]{12}$"
    - "^389[0-9]{11}$"
    - "^63[7-9][0-9]{13}$"
    - "^9[0-9]{15}$"
    - "^(6304|6706|6709|6771)[0-9]{12,15}$"
    - "^(5018|5020|5038|6304|6759|6761|6763)[0-9]{8,15}$"
    - "^(62[0-9]{14,17})$"
    - "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14})$"
    - "^(4903|4905|4911|4936|6333|6759)[0-9]{12}|(4903|4905|4911|4936|6333|6759)[0-9]{14}|(4903|4905|4911|4936|6333|6759)[0-9]{15}|564182[0-9]{10}|564182[0-9]{12}|564182[0-9]{13}|633110[0-9]{10}|633110[0-9]{12}|633110[0-9]{13}$"
    - "^(6334|6767)[0-9]{12}|(6334|6767)[0-9]{14}|(6334|6767)[0-9]{15}$"
    library: []
    Phone_Number:
    prediction_factors_and_weights:
    name: 0.4
    description: 0
    datatype: 0
    values: 0.6
    name:
    regex:
    - ".*phone.*(num|no).*"
    - ".*(num|no).*phone.*"
    - ".*[^a-z]+ph[^a-z]+.*(num|no).*"
    - ".*(num|no).*[^a-z]+ph[^a-z]+.*"
    - ".*mobile.*(num|no).*"
    - ".*(num|no).*mobile.*"
    - ".*telephone.*(num|no).*"
    - ".*(num|no).*telephone.*"
    - ".*cell.*(num|no).*"
    - ".*(num|no).*cell.*"
    - ".*contact.*(num|no).*"
    - ".*(num|no).*contact.*"
    - ".*landline.*(num|no).*"
    - ".*(num|no).*landline.*"
    - ".*fax.*(num|no).*"
    - ".*(num|no).*fax.*"
    - phone
    - telephone
    - landline
    - mobile
    - tel
    - fax
    - cell
    - contact
    description:
    regex:
    - ".*phone.*(num|no).*"
    - ".*(num|no).*phone.*"
    - ".*[^a-z]+ph[^a-z]+.*(num|no).*"
    - ".*(num|no).*[^a-z]+ph[^a-z]+.*"
    - ".*mobile.*(num|no).*"
    - ".*(num|no).*mobile.*"
    - ".*telephone.*(num|no).*"
    - ".*(num|no).*telephone.*"
    - ".*cell.*(num|no).*"
    - ".*(num|no).*cell.*"
    - ".*contact.*(num|no).*"
    - ".*(num|no).*contact.*"
    - ".*landline.*(num|no).*"
    - ".*(num|no).*landline.*"
    - ".*fax.*(num|no).*"
    - ".*(num|no).*fax.*"
    - phone
    - telephone
    - landline
    - mobile
    - tel
    - fax
    - cell
    - contact
    datatype:
    type:
    - int
    - str
    values:
    prediction_type: library
    regex: []
    library:
    - phonenumbers
    Street_Address:
    prediction_factors_and_weights:
    name: 0.5
    description: 0
    datatype: 0
    values: 0.5
    name:
    regex:
    - ".*street.*add.*"
    - ".*add.*street.*"
    - ".*full.*add.*"
    - ".*add.*full.*"
    - ".*mail.*add.*"
    - ".*add.*mail.*"
    - add[^a-z]+
    - address
    - street
    description:
    regex:
    - ".*street.*add.*"
    - ".*add.*street.*"
    - ".*full.*add.*"
    - ".*add.*full.*"
    - ".*mail.*add.*"
    - ".*add.*mail.*"
    - add[^a-z]+
    - address
    - street
    datatype:
    type:
    - str
    values:
    prediction_type: library
    regex: []
    library:
    - spacy
    Full_name:
    prediction_factors_and_weights:
    name: 0.3
    description: 0
    datatype: 0
    values: 0.7
    name:
    regex:
    - ".*person.*name.*"
    - ".*name.*person.*"
    - ".*user.*name.*"
    - ".*name.*user.*"
    - ".*full.*name.*"
    - ".*name.*full.*"
    - fullname
    - name
    - person
    - user
    description:
    regex:
    - ".*person.*name.*"
    - ".*name.*person.*"
    - ".*user.*name.*"
    - ".*name.*user.*"
    - ".*full.*name.*"
    - ".*name.*full.*"
    - fullname
    - name
    - person
    - user
    datatype:
    type:
    - str
    values:
    prediction_type: library
    regex: []
    library:
    - spacy
    Age:
    prediction_factors_and_weights:
    name: 0.65
    description: 0
    datatype: 0
    values: 0.35
    name:
    regex:
    - age[^a-z]+.*
    - ".*[^a-z]+age"
    - ".*[^a-z]+age[^a-z]+.*"
    - age
    description:
    regex:
    - age[^a-z]+.*
    - ".*[^a-z]+age"
    - ".*[^a-z]+age[^a-z]+.*"
    - age
    datatype:
    type:
    - int
    values:
    prediction_type: library
    regex: []
    library:
    - rule_based_logic

    Example with Advanced Configuration: Specifying custom info type

    source:
    type: snowflake
    config:
    env: PROD
    # Coordinates
    account_id: account_name
    warehouse: "COMPUTE_WH"

    # Credentials
    username: user
    password: pass
    role: "sysadmin"

    # Options
    top_n_queries: 10
    email_domain: mycompany.com

    classification:
    enabled: True
    classifiers:
    - type: datahub
    config:
    confidence_level_threshold: 0.7
    minimum_values_threshold: 10
    info_types_config:
    CloudRegion:
    prediction_factors_and_weights:
    name: 0
    description: 0
    datatype: 0
    values: 1
    values:
    prediction_type: regex
    regex:
    - "(af|ap|ca|eu|me|sa|us)-(central|north|(north(?:east|west))|south|south(?:east|west)|east|west)-\\d+"
    library: []
    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/docs/dev_guides/reporting_telemetry/index.html b/docs/metadata-ingestion/docs/dev_guides/reporting_telemetry/index.html index ff442365e669f..59331e49f4a2d 100644 --- a/docs/metadata-ingestion/docs/dev_guides/reporting_telemetry/index.html +++ b/docs/metadata-ingestion/docs/dev_guides/reporting_telemetry/index.html @@ -8,7 +8,7 @@ - + @@ -27,7 +27,7 @@ key of the entry_points section in setup.py with its type and implementation class as shown below.

    entry_points = {
    # <snip other keys>"
    "datahub.ingestion.reporting_provider.plugins": [
    "datahub = datahub.ingestion.reporting.datahub_ingestion_run_summary_provider:DatahubIngestionRunSummaryProvider",
    "file = datahub.ingestion.reporting.file_reporter:FileReporter",
    ],
    }

    Datahub Reporting Ingestion State Provider

    This is the reporting state provider implementation that is available out of the box in datahub. Its type is datahub and it is implemented on top of the datahub_api client and the timeseries aspect capabilities of the datahub-backend.

    Config details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldRequiredDefaultDescription
    typedatahubThe type of the ingestion reporting provider registered with datahub.
    configThe datahub_api config if set at pipeline level. Otherwise, the default DatahubClientConfig. See the defaults here.The configuration required for initializing the datahub reporting provider.
    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/docs/dev_guides/sql_profiles/index.html b/docs/metadata-ingestion/docs/dev_guides/sql_profiles/index.html index e608ea3c84cb1..41a4a7045d5e3 100644 --- a/docs/metadata-ingestion/docs/dev_guides/sql_profiles/index.html +++ b/docs/metadata-ingestion/docs/dev_guides/sql_profiles/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ While we've done our best to limit the expensiveness of the queries the profiler runs, you should be prudent about the set of tables profiling is enabled on or the frequency of the profiling runs.

    Capabilities

    Extracts:

    • Row and column counts for each table
    • For each column, if applicable:
      • null counts and proportions
      • distinct counts and proportions
      • minimum, maximum, mean, median, standard deviation, some quantile values
      • histograms or frequencies of unique values

    Supported Sources

    SQL profiling is supported for all SQL sources. Check the individual source page to verify if it supports profiling.

    Questions

    If you've got any questions on configuring profiling, feel free to ping us on our Slack!

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/docs/dev_guides/stateful/index.html b/docs/metadata-ingestion/docs/dev_guides/stateful/index.html index f13b689ece182..4bb297e57bd2c 100644 --- a/docs/metadata-ingestion/docs/dev_guides/stateful/index.html +++ b/docs/metadata-ingestion/docs/dev_guides/stateful/index.html @@ -8,7 +8,7 @@ - + @@ -26,7 +26,7 @@ IngestionCheckpointingProviderBase interface and register itself with datahub by adding an entry under datahub.ingestion.checkpointing_provider.plugins key of the entry_points section in setup.py with its type and implementation class as shown below.

    entry_points = {
    # <snip other keys>"
    "datahub.ingestion.checkpointing_provider.plugins": [
    "datahub = datahub.ingestion.source.state_provider.datahub_ingestion_checkpointing_provider:DatahubIngestionCheckpointingProvider",
    ],
    }

    Datahub Checkpointing Ingestion State Provider

    This is the state provider implementation that is available out of the box. Its type is datahub and it is implemented on top of the datahub_api client and the timeseries aspect capabilities of the datahub-backend.

    Config details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldRequiredDefaultDescription
    state_provider.typedatahubThe type of the ingestion state provider registered with datahub
    state_provider.configThe datahub_api config if set at pipeline level. Otherwise, the default DatahubClientConfig. See the defaults here.The configuration required for initializing the state provider.
    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/docs/transformer/dataset_transformer/index.html b/docs/metadata-ingestion/docs/transformer/dataset_transformer/index.html index 209a750124344..e9e826fd711c3 100644 --- a/docs/metadata-ingestion/docs/transformer/dataset_transformer/index.html +++ b/docs/metadata-ingestion/docs/transformer/dataset_transformer/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ First, let's get all our imports in:

    # append these to the start of custom_transform_example.py
    import json
    from typing import List, Optional

    from datahub.configuration.common import ConfigModel
    from datahub.ingestion.api.common import PipelineContext
    from datahub.ingestion.transformer.add_dataset_ownership import Semantics
    from datahub.ingestion.transformer.base_transformer import (
    BaseTransformer,
    SingleAspectTransformer,
    )
    from datahub.metadata.schema_classes import (
    OwnerClass,
    OwnershipClass,
    OwnershipTypeClass,
    )

    Next, let's define the base scaffolding for the class:

    # append this to the end of custom_transform_example.py

    class AddCustomOwnership(BaseTransformer, SingleAspectTransformer):
    """Transformer that adds owners to datasets according to a callback function."""

    # context param to generate run metadata such as a run ID
    ctx: PipelineContext
    # as defined in the previous block
    config: AddCustomOwnershipConfig

    def __init__(self, config: AddCustomOwnershipConfig, ctx: PipelineContext):
    super().__init__()
    self.ctx = ctx
    self.config = config

    with open(self.config.owners_json, "r") as f:
    raw_owner_urns = json.load(f)

    self.owners = [
    OwnerClass(owner=owner, type=OwnershipTypeClass.DATAOWNER)
    for owner in raw_owner_urns
    ]

    A transformer must have two functions: a create() function for initialization and a transform() function for executing the transformation. Transformers that extend BaseTransformer and SingleAspectTransformer can avoid having to implement the more complex transform function and just implement the transform_aspect function.

    Let's begin by adding a create() method for parsing our configuration dictionary:

    # add this as a function of AddCustomOwnership

    @classmethod
    def create(cls, config_dict: dict, ctx: PipelineContext) -> "AddCustomOwnership":
    config = AddCustomOwnershipConfig.parse_obj(config_dict)
    return cls(config, ctx)

    Next we need to tell the helper classes which entity types and aspect we are interested in transforming. In this case, we want to only process dataset entities and transform the ownership aspect.

    def entity_types(self) -> List[str]:
    return ["dataset"]

    def aspect_name(self) -> str:
    return "ownership"

    Finally we need to implement the transform_aspect() method that does the work of adding our custom ownership classes. This method will be called be the framework with an optional aspect value filled out if the upstream source produced a value for this aspect. The framework takes care of pre-processing both MCE-s and MCP-s so that the transform_aspect() function is only called one per entity. Our job is merely to inspect the incoming aspect (or absence) and produce a transformed value for this aspect. Returning None from this method will effectively suppress this aspect from being emitted.

    # add this as a function of AddCustomOwnership

    def transform_aspect( # type: ignore
    self, entity_urn: str, aspect_name: str, aspect: Optional[OwnershipClass]
    ) -> Optional[OwnershipClass]:

    owners_to_add = self.owners
    assert aspect is None or isinstance(aspect, OwnershipClass)

    if owners_to_add:
    ownership = (
    aspect
    if aspect
    else OwnershipClass(
    owners=[],
    )
    )
    ownership.owners.extend(owners_to_add)

    return ownership

    More Sophistication: Making calls to DataHub during Transformation

    In some advanced cases, you might want to check with DataHub before performing a transformation. A good example for this might be retrieving the current set of owners of a dataset before providing the new set of owners during an ingestion process. To allow transformers to always be able to query the graph, the framework provides them access to the graph through the context object ctx. Connectivity to the graph is automatically instantiated anytime the pipeline uses a REST sink. In case you are using the Kafka sink, you can additionally provide access to the graph by configuring it in your pipeline.

    Here is an example of a recipe that uses Kafka as the sink, but provides access to the graph by explicitly configuring the datahub_api.

    source:
    type: mysql
    config:
    # ..source configs

    sink:
    type: datahub-kafka
    config:
    connection:
    bootstrap: localhost:9092
    schema_registry_url: "http://localhost:8081"

    datahub_api:
    server: http://localhost:8080
    # standard configs accepted by datahub rest client ...

    Advanced Use-Case: Patching Owners

    With the above capability, we can now build more powerful transformers that can check with the server-side state before issuing changes in metadata. e.g. Here is how the AddDatasetOwnership transformer can now support PATCH semantics by ensuring that it never deletes any owners that are stored on the server.

    def transform_one(self, mce: MetadataChangeEventClass) -> MetadataChangeEventClass:
    if not isinstance(mce.proposedSnapshot, DatasetSnapshotClass):
    return mce
    owners_to_add = self.config.get_owners_to_add(mce.proposedSnapshot)
    if owners_to_add:
    ownership = builder.get_or_add_aspect(
    mce,
    OwnershipClass(
    owners=[],
    ),
    )
    ownership.owners.extend(owners_to_add)

    if self.config.semantics == Semantics.PATCH:
    assert self.ctx.graph
    patch_ownership = AddDatasetOwnership.get_ownership_to_set(
    self.ctx.graph, mce.proposedSnapshot.urn, ownership
    )
    builder.set_aspect(
    mce, aspect=patch_ownership, aspect_type=OwnershipClass
    )
    return mce

    Installing the package

    Now that we've defined the transformer, we need to make it visible to DataHub. The easiest way to do this is to just place it in the same directory as your recipe, in which case the module name is the same as the file – in this case, custom_transform_example.

    Advanced: Installing as a package and enable discoverability
    Alternatively, create a `setup.py` in the same directory as our transform script to make it visible globally. After installing this package (e.g. with `python setup.py` or `pip install -e .`), our module will be installed and importable as `custom_transform_example`.
    from setuptools import find_packages, setup

    setup(
    name="custom_transform_example",
    version="1.0",
    packages=find_packages(),
    # if you don't already have DataHub installed, add it under install_requires
    # install_requires=["acryl-datahub"],
    entry_points={
    "datahub.ingestion.transformer.plugins": [
    "custom_transform_example_alias = custom_transform_example:AddCustomOwnership",
    ],
    },
    )

    Additionally, declare the transformer under the entry_points variable of the setup script. This enables the transformer to be listed when running datahub check plugins, and sets up the transformer's shortened alias for use in recipes.

    Running the transform

    transformers:
    - type: "custom_transform_example_alias"
    config:
    owners_json: "<path_to_owners_json>" # the JSON file mentioned at the start

    After running datahub ingest -c <path_to_recipe>, our MCEs will now have the following owners appended:

    "owners": [
    {
    "owner": "urn:li:corpuser:athos",
    "type": "DATAOWNER",
    "source": null
    },
    {
    "owner": "urn:li:corpuser:porthos",
    "type": "DATAOWNER",
    "source": null
    },
    {
    "owner": "urn:li:corpuser:aramis",
    "type": "DATAOWNER",
    "source": null
    },
    {
    "owner": "urn:li:corpGroup:the_three_musketeers",
    "type": "DATAOWNER",
    "source": null
    },
    // ...and any additional owners
    ],

    All the files for this tutorial may be found here.

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/docs/transformer/intro/index.html b/docs/metadata-ingestion/docs/transformer/intro/index.html index a5c95c4ce0563..84cc355c0b10b 100644 --- a/docs/metadata-ingestion/docs/transformer/intro/index.html +++ b/docs/metadata-ingestion/docs/transformer/intro/index.html @@ -8,13 +8,13 @@ - +

    Transformers

    What’s a transformer?

    Oftentimes we want to modify metadata before it reaches the ingestion sink – for instance, we might want to add custom tags, ownership, properties, or patch some fields. A transformer allows us to do exactly these things.

    Moreover, a transformer allows one to have fine-grained control over the metadata that’s ingested without having to modify the ingestion framework's code yourself. Instead, you can write your own module that can transform metadata events however you like. To include a transformer into a recipe, all that's needed is the name of the transformer as well as any configuration that the transformer needs.

    note

    Providing urns for metadata that does not already exist will result in unexpected behavior. Ensure any tags, terms, domains, etc. urns that you want to apply in your transformer already exist in your DataHub instance.

    For example, adding a domain urn in your transformer to apply to datasets will not create the domain entity if it doesn't exist. Therefore, you can't add documentation to it and it won't show up in Advanced Search. This goes for any metadata you are applying in transformers.

    Provided transformers

    Aside from the option of writing your own transformer (see below), we provide some simple transformers for the use cases of adding: tags, glossary terms, properties and ownership information.

    DataHub provided transformers for dataset are:

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/examples/transforms/index.html b/docs/metadata-ingestion/examples/transforms/index.html index d97d0688cf0f7..9e64506c749fc 100644 --- a/docs/metadata-ingestion/examples/transforms/index.html +++ b/docs/metadata-ingestion/examples/transforms/index.html @@ -8,13 +8,13 @@ - +
    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/index.html b/docs/metadata-ingestion/index.html index 4c18ca9974b8e..4768f91956ec0 100644 --- a/docs/metadata-ingestion/index.html +++ b/docs/metadata-ingestion/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ ingestion recipe is producing the desired metadata events before ingesting them into datahub.

    # Dry run
    datahub ingest -c ./examples/recipes/example_to_datahub_rest.dhub.yml --dry-run
    # Short-form
    datahub ingest -c ./examples/recipes/example_to_datahub_rest.dhub.yml -n

    The --preview option of the ingest command performs all of the ingestion steps, but limits the processing to only the first 10 workunits produced by the source. This option helps with quick end-to-end smoke testing of the ingestion recipe.

    # Preview
    datahub ingest -c ./examples/recipes/example_to_datahub_rest.dhub.yml --preview
    # Preview with dry-run
    datahub ingest -c ./examples/recipes/example_to_datahub_rest.dhub.yml -n --preview

    By default --preview creates 10 workunits. But if you wish to try producing more workunits you can use another option --preview-workunits

    # Preview 20 workunits without sending anything to sink
    datahub ingest -c ./examples/recipes/example_to_datahub_rest.dhub.yml -n --preview --preview-workunits=20

    Reporting

    By default, the cli sends an ingestion report to DataHub, which allows you to see the result of all cli-based ingestion in the UI. This can be turned off with the --no-default-report flag.

    # Running ingestion with reporting to DataHub turned off
    datahub ingest -c ./examples/recipes/example_to_datahub_rest.dhub.yaml --no-default-report

    The reports include the recipe that was used for ingestion. This can be turned off by adding an additional section to the ingestion recipe.

    source:
    # source configs

    sink:
    # sink configs

    # Add configuration for the datahub reporter
    reporting:
    - type: datahub
    config:
    report_recipe: false

    # Optional log to put failed JSONs into a file
    # Helpful in case you are trying to debug some issue with specific ingestion failing
    failure_log:
    enabled: false
    log_config:
    filename: ./path/to/failure.json

    Deploying and scheduling ingestion to the UI

    The deploy subcommand of the ingest command tree allows users to upload their recipes and schedule them in the server.

    datahub ingest deploy -n <user friendly name for ingestion> -c recipe.yaml

    By default, no schedule is done unless explicitly configured with the --schedule parameter. Timezones are inferred from the system time, can be overriden with --time-zone flag.

    datahub ingest deploy -n test --schedule "0 * * * *" --time-zone "Europe/London" -c recipe.yaml

    Transformations

    If you'd like to modify data before it reaches the ingestion sinks – for instance, adding additional owners or tags – you can use a transformer to write your own module and integrate it with DataHub. Transformers require extending the recipe with a new section to describe the transformers that you want to run.

    For example, a pipeline that ingests metadata from MSSQL and applies a default "important" tag to all datasets is described below:

    # A recipe to ingest metadata from MSSQL and apply default tags to all tables
    source:
    type: mssql
    config:
    username: sa
    password: ${MSSQL_PASSWORD}
    database: DemoData

    transformers: # an array of transformers applied sequentially
    - type: simple_add_dataset_tags
    config:
    tag_urns:
    - "urn:li:tag:Important"
    # default sink, no config needed

    Check out the transformers guide to learn more about how you can create really flexible pipelines for processing metadata using Transformers!

    Using as a library (SDK)

    In some cases, you might want to construct Metadata events directly and use programmatic ways to emit that metadata to DataHub. In this case, take a look at the Python emitter and the Java emitter libraries which can be called from your own code.

    Programmatic Pipeline

    In some cases, you might want to configure and run a pipeline entirely from within your custom Python script. Here is an example of how to do it.

    Developing

    See the guides on developing, adding a source and using transformers.

    Compatibility

    DataHub server uses a 3 digit versioning scheme, while the CLI uses a 4 digit scheme. For example, if you're using DataHub server version 0.10.0, you should use CLI version 0.10.0.x, where x is a patch version. We do this because we do CLI releases at a much higher frequency than server releases, usually every few days vs twice a month.

    For ingestion sources, any breaking changes will be highlighted in the release notes. When fields are deprecated or otherwise changed, we will try to maintain backwards compatibility for two server releases, which is about 4-6 weeks. The CLI will also print warnings whenever deprecated options are used.

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/integration_docs/great-expectations/index.html b/docs/metadata-ingestion/integration_docs/great-expectations/index.html index eff7ed8c56e00..99210aa97050a 100644 --- a/docs/metadata-ingestion/integration_docs/great-expectations/index.html +++ b/docs/metadata-ingestion/integration_docs/great-expectations/index.html @@ -8,13 +8,13 @@ - +

    Great Expectations

    This guide helps to setup and configure DataHubValidationAction in Great Expectations to send assertions(expectations) and their results to DataHub using DataHub's Python Rest emitter.

    Capabilities

    DataHubValidationAction pushes assertions metadata to DataHub. This includes

    • Assertion Details: Details of assertions (i.e. expectation) set on a Dataset (Table).
    • Assertion Results: Evaluation results for an assertion tracked over time.

    This integration supports v3 api datasources using SqlAlchemyExecutionEngine.

    Limitations

    This integration does not support

    • v2 Datasources such as SqlAlchemyDataset
    • v3 Datasources using execution engine other than SqlAlchemyExecutionEngine (Spark, Pandas)
    • Cross-dataset expectations (those involving > 1 table)

    Setting up

    1. Install the required dependency in your Great Expectations environment.
      pip install 'acryl-datahub[great-expectations]'
    1. To add DataHubValidationAction in Great Expectations Checkpoint, add following configuration in action_list for your Great Expectations Checkpoint. For more details on setting action_list, see Checkpoints and Actions
      action_list:
      - name: datahub_action
      action:
      module_name: datahub.integrations.great_expectations.action
      class_name: DataHubValidationAction
      server_url: http://localhost:8080 #datahub server url
      Configuration options:
      • server_url (required): URL of DataHub GMS endpoint
      • env (optional, defaults to "PROD"): Environment to use in namespace when constructing dataset URNs.
      • exclude_dbname (optional): Exclude dbname / catalog when constructing dataset URNs. (Highly applicable to Trino / Presto where we want to omit catalog e.g. hive)
      • platform_alias (optional): Platform alias when constructing dataset URNs. e.g. main data platform is presto-on-hive but using trino to run the test
      • platform_instance_map (optional): Platform instance mapping to use when constructing dataset URNs. Maps the GX 'data source' name to a platform instance on DataHub. e.g. platform_instance_map: { "datasource_name": "warehouse" }
      • graceful_exceptions (defaults to true): If set to true, most runtime errors in the lineage backend will be suppressed and will not cause the overall checkpoint to fail. Note that configuration issues will still throw exceptions.
      • token (optional): Bearer token used for authentication.
      • timeout_sec (optional): Per-HTTP request timeout.
      • retry_status_codes (optional): Retry HTTP request also on these status codes.
      • retry_max_times (optional): Maximum times to retry if HTTP request fails. The delay between retries is increased exponentially.
      • extra_headers (optional): Extra headers which will be added to the datahub request.
      • parse_table_names_from_sql (defaults to false): The integration can use an SQL parser to try to parse the datasets being asserted. This parsing is disabled by default, but can be enabled by setting parse_table_names_from_sql: True. The parser is based on the sqllineage package.
      • convert_urns_to_lowercase (optional): Whether to convert dataset urns to lowercase.

    Debugging

    Set environment variable DATAHUB_DEBUG (default false) to true to enable debug logging for DataHubValidationAction.

    Learn more

    To see the Great Expectations in action, check out this demo from the Feb 2022 townhall.

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/schedule_docs/airflow/index.html b/docs/metadata-ingestion/schedule_docs/airflow/index.html index 48d5d198fedc9..6d5cb233252ac 100644 --- a/docs/metadata-ingestion/schedule_docs/airflow/index.html +++ b/docs/metadata-ingestion/schedule_docs/airflow/index.html @@ -8,13 +8,13 @@ - +

    Using Airflow

    If you are using Apache Airflow for your scheduling then you might want to also use it for scheduling your ingestion recipes. For any Airflow specific questions you can go through Airflow docs for more details.

    We've provided a few examples of how to configure your DAG:

    • mysql_sample_dag embeds the full MySQL ingestion configuration inside the DAG.

    • snowflake_sample_dag avoids embedding credentials inside the recipe, and instead fetches them from Airflow's Connections feature. You must configure your connections in Airflow to use this approach.

    tip

    These example DAGs use the PythonVirtualenvOperator to run the ingestion. This is the recommended approach, since it guarantees that there will not be any conflicts between DataHub and the rest of your Airflow environment.

    When configuring the task, it's important to specify the requirements with your source and set the system_site_packages option to false.

    ingestion_task = PythonVirtualenvOperator(
    task_id="ingestion_task",
    requirements=[
    "acryl-datahub[<your-source>]",
    ],
    system_site_packages=False,
    python_callable=your_callable,
    )
    Advanced: loading a recipe file

    In more advanced cases, you might want to store your ingestion recipe in a file and load it from your task.

    • Ensure the recipe file is in a folder accessible to your airflow workers. You can either specify absolute path on the machines where Airflow is installed or a path relative to AIRFLOW_HOME.
    • Ensure DataHub CLI is installed in your airflow environment.
    • Create a DAG task to read your DataHub ingestion recipe file and run it. See the example below for reference.
    • Deploy the DAG file into airflow for scheduling. Typically this involves checking in the DAG file into your dags folder which is accessible to your Airflow instance.

    Example: generic_recipe_sample_dag

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/schedule_docs/cron/index.html b/docs/metadata-ingestion/schedule_docs/cron/index.html index 50128942226d8..9ad16875a4057 100644 --- a/docs/metadata-ingestion/schedule_docs/cron/index.html +++ b/docs/metadata-ingestion/schedule_docs/cron/index.html @@ -8,13 +8,13 @@ - +

    Using Cron

    Assume you have a recipe file /home/ubuntu/datahub_ingest/mysql_to_datahub.yml on your machine

    source:
    type: mysql
    config:
    # Coordinates
    host_port: localhost:3306
    database: dbname

    # Credentials
    username: root
    password: example

    sink:
    type: datahub-rest
    config:
    server: http://localhost:8080

    We can use crontab to schedule ingestion to run five minutes after midnight, every day using DataHub CLI.

    5 0 * * * datahub ingest -c /home/ubuntu/datahub_ingest/mysql_to_datahub.yml

    Read through crontab docs for more options related to scheduling.

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/schedule_docs/datahub/index.html b/docs/metadata-ingestion/schedule_docs/datahub/index.html index b071d7746f771..3ce85ab6968fc 100644 --- a/docs/metadata-ingestion/schedule_docs/datahub/index.html +++ b/docs/metadata-ingestion/schedule_docs/datahub/index.html @@ -8,13 +8,13 @@ - +
    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/schedule_docs/intro/index.html b/docs/metadata-ingestion/schedule_docs/intro/index.html index a30d576a3ae0f..cb7aa845265e8 100644 --- a/docs/metadata-ingestion/schedule_docs/intro/index.html +++ b/docs/metadata-ingestion/schedule_docs/intro/index.html @@ -8,13 +8,13 @@ - +

    Introduction to Scheduling Metadata Ingestion

    Given a recipe file /home/ubuntu/datahub_ingest/mysql_to_datahub.yml.

    source:
    type: mysql
    config:
    # Coordinates
    host_port: localhost:3306
    database: dbname

    # Credentials
    username: root
    password: example

    sink:
    type: datahub-rest
    config:
    server: http://localhost:8080

    We can do ingestion of our metadata using DataHub CLI as follows

    datahub ingest -c /home/ubuntu/datahub_ingest/mysql_to_datahub.yml

    This will ingest metadata from the mysql source which is configured in the recipe file. This does ingestion once. As the source system changes we would like to have the changes reflected in DataHub. To do this someone will need to re-run the ingestion command using a recipe file.

    An alternate to running the command manually we can schedule the ingestion to run on a regular basis. In this section we give some examples of how scheduling ingestion of metadata into DataHub can be done.

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/schedule_docs/kubernetes/index.html b/docs/metadata-ingestion/schedule_docs/kubernetes/index.html index 2a08d26bec22e..c8505bdaf3959 100644 --- a/docs/metadata-ingestion/schedule_docs/kubernetes/index.html +++ b/docs/metadata-ingestion/schedule_docs/kubernetes/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    Using Kubernetes

    If you have deployed DataHub using our official helm charts you can use the datahub ingestion cron subchart to schedule ingestions.

    Here is an example of what that configuration would look like in your values.yaml:

    datahub-ingestion-cron:
    enabled: true
    crons:
    mysql:
    schedule: "0 * * * *" # Every hour
    recipe:
    configmapName: recipe-config
    fileName: mysql_recipe.yml

    This assumes the pre-existence of a Kubernetes ConfigMap which holds all recipes being scheduled in the same namespace as where the cron jobs will be running.

    An example could be:

    apiVersion: v1
    kind: ConfigMap
    metadata:
    name: recipe-config
    data:
    mysql_recipe.yml: |-
    source:
    type: mysql
    config:
    # Coordinates
    host_port: <MYSQL HOST>:3306
    database: dbname

    # Credentials
    username: root
    password: example

    sink:
    type: datahub-rest
    config:
    server: http://<GMS_HOST>:8080

    For more information, please see the documentation of this sub-chart.

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/sink_docs/console/index.html b/docs/metadata-ingestion/sink_docs/console/index.html index a8ebd14531b67..735020e190c1a 100644 --- a/docs/metadata-ingestion/sink_docs/console/index.html +++ b/docs/metadata-ingestion/sink_docs/console/index.html @@ -8,13 +8,13 @@ - +

    Console

    For context on getting started with ingestion, check out our metadata ingestion guide.

    Setup

    Works with acryl-datahub out of the box.

    Capabilities

    Simply prints each metadata event to stdout. Useful for experimentation and debugging purposes.

    Quickstart recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    # source configs

    sink:
    type: "console"

    Config details

    None!

    Questions

    If you've got any questions on configuring this sink, feel free to ping us on our Slack!

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/sink_docs/datahub/index.html b/docs/metadata-ingestion/sink_docs/datahub/index.html index 0a76e8133e1bb..39125d7e2aff0 100644 --- a/docs/metadata-ingestion/sink_docs/datahub/index.html +++ b/docs/metadata-ingestion/sink_docs/datahub/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    DataHub

    DataHub Rest

    For context on getting started with ingestion, check out our metadata ingestion guide.

    Setup

    To install this plugin, run pip install 'acryl-datahub[datahub-rest]'.

    Capabilities

    Pushes metadata to DataHub using the GMS REST API. The advantage of the REST-based interface is that any errors can immediately be reported.

    Quickstart recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide. This should point to the GMS server.

    source:
    # source configs
    sink:
    type: "datahub-rest"
    config:
    server: "http://localhost:8080"

    If you are running the ingestion in a container in docker and your GMS is also running in docker then you should use the internal docker hostname of the GMS pod. Usually it would look something like

    source:
    # source configs
    sink:
    type: "datahub-rest"
    config:
    server: "http://datahub-gms:8080"

    If GMS is running in a kubernetes pod deployed through the helm charts and you are trying to connect to it from within the kubernetes cluster then you should use the Kubernetes service name of GMS. Usually it would look something like

    source:
    # source configs
    sink:
    type: "datahub-rest"
    config:
    server: "http://datahub-datahub-gms.datahub.svc.cluster.local:8080"

    If you are using UI based ingestion then where GMS is deployed decides what hostname you should use.

    Config details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldRequiredDefaultDescription
    serverURL of DataHub GMS endpoint.
    timeout_sec30Per-HTTP request timeout.
    retry_max_times1Maximum times to retry if HTTP request fails. The delay between retries is increased exponentially
    retry_status_codes[429, 502, 503, 504]Retry HTTP request also on these status codes
    tokenBearer token used for authentication.
    extra_headersExtra headers which will be added to the request.
    max_threads15Experimental: Max parallelism for REST API calls
    ca_certificate_pathPath to server's CA certificate for verification of HTTPS communications
    client_certificate_pathPath to client's CA certificate for HTTPS communications
    disable_ssl_verificationfalseDisable ssl certificate validation

    DataHub Kafka

    For context on getting started with ingestion, check out our metadata ingestion guide.

    Setup

    To install this plugin, run pip install 'acryl-datahub[datahub-kafka]'.

    Capabilities

    Pushes metadata to DataHub by publishing messages to Kafka. The advantage of the Kafka-based interface is that it's asynchronous and can handle higher throughput.

    Quickstart recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    # source configs

    sink:
    type: "datahub-kafka"
    config:
    connection:
    bootstrap: "localhost:9092"
    schema_registry_url: "http://localhost:8081"

    Config details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldRequiredDefaultDescription
    connection.bootstrapKafka bootstrap URL.
    connection.producer_config.<option>Passed to https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html#confluent_kafka.SerializingProducer
    connection.schema_registry_urlURL of schema registry being used.
    connection.schema_registry_config.<option>Passed to https://docs.confluent.io/platform/current/clients/confluent-kafka-python/html/index.html#confluent_kafka.schema_registry.SchemaRegistryClient
    topic_routes.MetadataChangeEventMetadataChangeEventOverridden Kafka topic name for the MetadataChangeEvent
    topic_routes.MetadataChangeProposalMetadataChangeProposalOverridden Kafka topic name for the MetadataChangeProposal

    The options in the producer config and schema registry config are passed to the Kafka SerializingProducer and SchemaRegistryClient respectively.

    For a full example with a number of security options, see this example recipe.

    DataHub Lite (experimental)

    A sink that provides integration with DataHub Lite for local metadata exploration and serving.

    Setup

    To install this plugin, run pip install 'acryl-datahub[datahub-lite]'.

    Capabilities

    Pushes metadata to a local DataHub Lite instance.

    Quickstart recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    # source configs
    sink:
    type: "datahub-lite"

    By default, datahub-lite uses a DuckDB database and will write to a database file located under ~/.datahub/lite/.

    To configure the location, you can specify it directly in the config:

    source:
    # source configs
    sink:
    type: "datahub-lite"
    config:
    type: "duckdb"
    config:
    file: "<path_to_duckdb_file>"
    note

    DataHub Lite currently doesn't support stateful ingestion, so you'll have to turn off stateful ingestion in your recipe to use it. This will be fixed shortly.

    Config details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldRequiredDefaultDescription
    typeduckdbType of DataHub Lite implementation to use
    config{"file": "~/.datahub/lite/datahub.duckdb"}Config dictionary to pass through to the DataHub Lite implementation. See below for fields accepted by the DuckDB implementation

    DuckDB Config Details

    FieldRequiredDefaultDescription
    file"~/.datahub/lite/datahub.duckdb"File to use for DuckDB storage
    options{}Options dictionary to pass through to DuckDB library. See the official spec for the options supported by DuckDB.

    Questions

    If you've got any questions on configuring this sink, feel free to ping us on our Slack!

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/sink_docs/file/index.html b/docs/metadata-ingestion/sink_docs/file/index.html index 0c18946bc1502..0c9901bdd44f5 100644 --- a/docs/metadata-ingestion/sink_docs/file/index.html +++ b/docs/metadata-ingestion/sink_docs/file/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    File

    For context on getting started with ingestion, check out our metadata ingestion guide.

    Setup

    Works with acryl-datahub out of the box.

    Capabilities

    Outputs metadata to a file. This can be used to decouple metadata sourcing from the process of pushing it into DataHub, and is particularly useful for debugging purposes. Note that the file source can read files generated by this sink.

    Quickstart recipe

    Check out the following recipe to get started with ingestion! See below for full configuration options.

    For general pointers on writing and running a recipe, see our main recipe guide.

    source:
    # source configs

    sink:
    type: file
    config:
    filename: ./path/to/mce/file.json

    Config details

    Note that a . is used to denote nested fields in the YAML recipe.

    FieldRequiredDefaultDescription
    filenamePath to file to write to.

    Questions

    If you've got any questions on configuring this sink, feel free to ping us on our Slack!

    - + \ No newline at end of file diff --git a/docs/metadata-ingestion/source-docs-template/index.html b/docs/metadata-ingestion/source-docs-template/index.html index a001d78802a9d..38ed29dbd6ee0 100644 --- a/docs/metadata-ingestion/source-docs-template/index.html +++ b/docs/metadata-ingestion/source-docs-template/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    Source Name

    Certified Incubating Testing

    Integration Details

    Concept Mapping

    This ingestion source maps the following Source System Concepts to DataHub Concepts:

    Source ConceptDataHub ConceptNotes
    Data Platform
    Dataset
    Data Job
    Data Flow
    Chart
    Dashboard
    User (a.k.a CorpUser)
    CorpGroup
    Domain
    Container
    Tag
    GlossaryTerm
    GlossaryNode
    Assertion
    DataProcess
    MlFeature
    MlFeatureTable
    MlModel
    MlModelDeployment
    MlPrimaryKey
    SchemaField
    DataHubPolicy
    DataHubIngestionSource
    DataHubSecret
    DataHubExecutionRequest
    DataHubREtention

    Supported Capabilities

    CapabilityStatusNotes
    Data ContainerEnabled by default
    Detect Deleted EntitiesRequires recipe configuration
    Data DomainRequires transformer
    Dataset ProfilingRequires acryl-datahub[source-usage-name]
    Dataset UsageRequires acryl-datahub[source-usage-name]
    Extract DescriptionsEnabled by default
    Extract LineageEnabled by default
    Extract OwnershipEnabled by default
    Extract TagsRequires transformer
    Partition SupportNot applicable to source
    Platform InstanceNot applicable to source
    ...

    Metadata Ingestion Quickstart

    Prerequisites

    In order to ingest metadata from [Source Name], you will need:

    • eg. Python version, source version, source access requirements
    • eg. Steps to configure source access
    • ...

    Install the Plugin(s)

    Run the following commands to install the relevant plugin(s):

    pip install 'acryl-datahub[source-name]'

    pip install 'acryl-datahub[source-usage-name]'

    Configure the Ingestion Recipe(s)

    Use the following recipe(s) to get started with ingestion.

    For general pointers on writing and running a recipe, see our main recipe guide.

    'acryl-datahub[source-name]'

    source:
    type: source_name
    config:
    # Required fields
    option1: value1

    sink:
    # sink configs
    View All Recipe Configuartion Options
    | Field | Required | Default | Description | | --- | :-: | :-: | --- | | `field1` | ✅ | `default_value` | A required field with a default value | | `field2` | ❌ | `default_value` | An optional field with a default value | | `field3` | ❌ | | An optional field without a default value | | ... | | |

    'acryl-datahub[source-usage-name]'

    source:
    type: source-usage-name
    config:
    # Required Fields
    option1: value1

    # Options
    top_n_queries: 10

    sink:
    # sink configs
    View All Recipe Configuartion Options
    | Field | Required | Default | Description | | --- | :-: | :-: | --- | | `field1` | ✅ | `default_value` | A required field with a default value | | `field2` | ❌ | `default_value` | An optional field with a default value | | `field3` | ❌ | | An optional field without a default value | | ... | | |

    Troubleshooting

    [Common Issue]

    [Provide description of common issues with this integration and steps to resolve]

    - + \ No newline at end of file diff --git a/docs/metadata-integration/java/as-a-library/index.html b/docs/metadata-integration/java/as-a-library/index.html index 8f55de92b8380..ff1874a678710 100644 --- a/docs/metadata-integration/java/as-a-library/index.html +++ b/docs/metadata-integration/java/as-a-library/index.html @@ -8,14 +8,14 @@ - +

    Java Emitter

    In some cases, you might want to construct Metadata events directly and use programmatic ways to emit that metadata to DataHub. Use-cases are typically push-based and include emitting metadata events from CI/CD pipelines, custom orchestrators etc.

    The io.acryl:datahub-client Java package offers REST emitter API-s, which can be easily used to emit metadata from your JVM-based systems. For example, the Spark lineage integration uses the Java emitter to emit metadata events from Spark jobs.

    Pro Tip! Throughout our API guides, we have examples of using Java API SDK. Lookout for the | Java | tab within our tutorials.

    Installation

    Follow the specific instructions for your build system to declare a dependency on the appropriate version of the package.

    Note: Check the Maven repository for the latest version of the package before following the instructions below.

    Gradle

    Add the following to your build.gradle.

    implementation 'io.acryl:datahub-client:__version__'

    Maven

    Add the following to your pom.xml.

    <!-- https://mvnrepository.com/artifact/io.acryl/datahub-client -->
    <dependency>
    <groupId>io.acryl</groupId>
    <artifactId>datahub-client</artifactId>
    <!-- replace __version__ with the latest version number -->
    <version>__version__</version>
    </dependency>

    REST Emitter

    The REST emitter is a thin wrapper on top of the Apache HttpClient library. It supports non-blocking emission of metadata and handles the details of JSON serialization of metadata aspects over the wire.

    Constructing a REST Emitter follows a lambda-based fluent builder pattern. The config parameters mirror the Python emitter configuration for the most part. In addition, you can also customize the HttpClient that is constructed under the hood by passing in customizations to the HttpClient builder.

    import datahub.client.rest.RestEmitter;
    //...
    RestEmitter emitter = RestEmitter.create(b -> b
    .server("http://localhost:8080")
    //Auth token for Managed DataHub .token(AUTH_TOKEN_IF_NEEDED)
    //Override default timeout of 10 seconds .timeoutSec(OVERRIDE_DEFAULT_TIMEOUT_IN_SECONDS)
    //Add additional headers .extraHeaders(Collections.singletonMap("Session-token", "MY_SESSION"))
    // Customize HttpClient's connection ttl .customizeHttpAsyncClient(c -> c.setConnectionTimeToLive(30, TimeUnit.SECONDS))
    );

    Usage

    import com.linkedin.dataset.DatasetProperties;
    import com.linkedin.events.metadata.ChangeType;
    import datahub.event.MetadataChangeProposalWrapper;
    import datahub.client.rest.RestEmitter;
    import datahub.client.Callback;
    // ... followed by

    // Creates the emitter with the default coordinates and settings
    RestEmitter emitter = RestEmitter.createWithDefaults();

    MetadataChangeProposalWrapper mcpw = MetadataChangeProposalWrapper.builder()
    .entityType("dataset")
    .entityUrn("urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.my-dataset.user-table,PROD)")
    .upsert()
    .aspect(new DatasetProperties().setDescription("This is the canonical User profile dataset"))
    .build();

    // Blocking call using future
    Future<MetadataWriteResponse> requestFuture = emitter.emit(mcpw, null).get();

    // Non-blocking using callback
    emitter.emit(mcpw, new Callback() {
    @Override
    public void onCompletion(MetadataWriteResponse response) {
    if (response.isSuccess()) {
    System.out.println(String.format("Successfully emitted metadata event for %s", mcpw.getEntityUrn()));
    } else {
    // Get the underlying http response
    HttpResponse httpResponse = (HttpResponse) response.getUnderlyingResponse();
    System.out.println(String.format("Failed to emit metadata event for %s, aspect: %s with status code: %d",
    mcpw.getEntityUrn(), mcpw.getAspectName(), httpResponse.getStatusLine().getStatusCode()));
    // Print the server side exception if it was captured
    if (response.getServerException() != null) {
    System.out.println(String.format("Server side exception was %s", response.getServerException()));
    }
    }
    }

    @Override
    public void onFailure(Throwable exception) {
    System.out.println(
    String.format("Failed to emit metadata event for %s, aspect: %s due to %s", mcpw.getEntityUrn(),
    mcpw.getAspectName(), exception.getMessage()));
    }
    });

    REST Emitter Code

    If you're interested in looking at the REST emitter code, it is available here.

    Kafka Emitter

    The Kafka emitter is a thin wrapper on top of the SerializingProducer class from confluent-kafka and offers a non-blocking interface for sending metadata events to DataHub. Use this when you want to decouple your metadata producer from the uptime of your datahub metadata server by utilizing Kafka as a highly available message bus. For example, if your DataHub metadata service is down due to planned or unplanned outages, you can still continue to collect metadata from your mission critical systems by sending it to Kafka. Also use this emitter when throughput of metadata emission is more important than acknowledgement of metadata being persisted to DataHub's backend store.

    Note: The Kafka emitter uses Avro to serialize the Metadata events to Kafka. Changing the serializer will result in unprocessable events as DataHub currently expects the metadata events over Kafka to be serialized in Avro.

    Usage



    import java.io.IOException;
    import java.util.concurrent.ExecutionException;
    import com.linkedin.dataset.DatasetProperties;
    import datahub.client.kafka.KafkaEmitter;
    import datahub.client.kafka.KafkaEmitterConfig;
    import datahub.event.MetadataChangeProposalWrapper;

    // ... followed by

    // Creates the emitter with the default coordinates and settings
    KafkaEmitterConfig.KafkaEmitterConfigBuilder builder = KafkaEmitterConfig.builder(); KafkaEmitterConfig config = builder.build();
    KafkaEmitter emitter = new KafkaEmitter(config);

    //Test if topic is available

    if(emitter.testConnection()){

    MetadataChangeProposalWrapper mcpw = MetadataChangeProposalWrapper.builder()
    .entityType("dataset")
    .entityUrn("urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.my-dataset.user-table,PROD)")
    .upsert()
    .aspect(new DatasetProperties().setDescription("This is the canonical User profile dataset"))
    .build();

    // Blocking call using future
    Future<MetadataWriteResponse> requestFuture = emitter.emit(mcpw, null).get();

    // Non-blocking using callback
    emitter.emit(mcpw, new Callback() {

    @Override
    public void onFailure(Throwable exception) {
    System.out.println("Failed to send with: " + exception);
    }
    @Override
    public void onCompletion(MetadataWriteResponse metadataWriteResponse) {
    if (metadataWriteResponse.isSuccess()) {
    RecordMetadata metadata = (RecordMetadata) metadataWriteResponse.getUnderlyingResponse();
    System.out.println("Sent successfully over topic: " + metadata.topic());
    } else {
    System.out.println("Failed to send with: " + metadataWriteResponse.getUnderlyingResponse());
    }
    }
    });

    }
    else {
    System.out.println("Kafka service is down.");
    }

    Kafka Emitter Code

    If you're interested in looking at the Kafka emitter code, it is available here.

    File Emitter

    The File emitter writes metadata change proposal events (MCPs) into a JSON file that can be later handed off to the Python File source for ingestion. This works analogous to the File sink in Python. This mechanism can be used when the system producing metadata events doesn't have direct connection to DataHub's REST server or Kafka brokers. The generated JSON file can be transferred later and then ingested into DataHub using the File source.

    Usage



    import datahub.client.file.FileEmitter;
    import datahub.client.file.FileEmitterConfig;
    import datahub.event.MetadataChangeProposalWrapper;

    // ... followed by


    // Define output file co-ordinates
    String outputFile = "/my/path/output.json";

    //Create File Emitter
    FileEmitter emitter = new FileEmitter(FileEmitterConfig.builder().fileName(outputFile).build());

    // A couple of sample metadata events
    MetadataChangeProposalWrapper mcpwOne = MetadataChangeProposalWrapper.builder()
    .entityType("dataset")
    .entityUrn("urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.my-dataset.user-table,PROD)")
    .upsert()
    .aspect(new DatasetProperties().setDescription("This is the canonical User profile dataset"))
    .build();

    MetadataChangeProposalWrapper mcpwTwo = MetadataChangeProposalWrapper.builder()
    .entityType("dataset")
    .entityUrn("urn:li:dataset:(urn:li:dataPlatform:bigquery,my-project.my-dataset.fact-orders-table,PROD)")
    .upsert()
    .aspect(new DatasetProperties().setDescription("This is the canonical Fact table for orders"))
    .build();

    MetadataChangeProposalWrapper[] mcpws = { mcpwOne, mcpwTwo };
    for (MetadataChangeProposalWrapper mcpw : mcpws) {
    emitter.emit(mcpw);
    }
    emitter.close(); // calling close() is important to ensure file gets closed cleanly

    File Emitter Code

    If you're interested in looking at the File emitter code, it is available here.

    Support for S3, GCS etc.

    The File emitter only supports writing to the local filesystem currently. If you're interested in adding support for S3, GCS etc., contributions are welcome!

    Other Languages

    Emitter API-s are also supported for:

    - + \ No newline at end of file diff --git a/docs/metadata-integration/java/datahub-protobuf/index.html b/docs/metadata-integration/java/datahub-protobuf/index.html index 8f942a5ed2fb2..6248546198f6f 100644 --- a/docs/metadata-integration/java/datahub-protobuf/index.html +++ b/docs/metadata-integration/java/datahub-protobuf/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ The standard "Deprecation" aspect is used for a dataset generated from a protobuf message. Field deprecation adds a tag with the following urn urn:li:tag:deprecated (red, #FF000).

       message msg {
    extend google.protobuf.MessageOptions {
    repeated string deprecation_note = 5620 [(meta.fld.type) = DEPRECATION];
    uint64 deprecation_time = 5621 [(meta.fld.type) = DEPRECATION];
    }
    }

    message Message {
    option deprecated = true;
    option (meta.msg.deprecation_note) = "Deprecated for this other message.";
    option (meta.msg.deprecation_note) = "Drop in replacement.";
    option (meta.msg.deprecation_time) = 1649689387;
    }

    The field deprecation tag works without definition in meta.proto using the native protobuf option.

    message Message {
    uint32 my_field = 1 [deprecated = true];
    }

    Installation

    Follow the specific instructions for your build system to declare a dependency on the appropriate version of the package.

    Note: Check the Maven repository for the latest version of the package before following the instructions below.

    Gradle

    Add the following to your build.gradle.

    implementation 'io.acryl:datahub-protobuf:__version__'

    Maven

    Add the following to your pom.xml.

    <!-- https://mvnrepository.com/artifact/io.acryl/datahub-protobuf -->
    <dependency>
    <groupId>io.acryl</groupId>
    <artifactId>datahub-protobuf</artifactId>
    <!-- replace __version__ with the latest version number -->
    <version>__version__</version>
    </dependency>

    Example Application (embedded)

    An example application Proto2DataHub is included as part of this project. You can also set up a standalone project that works with the protobuf-gradle-plugin, see the standalone example project as an example of such a project.

    Usage

    Standalone Application: Proto2DataHub

    shell
    java -jar build/libs/datahub-protobuf-0.8.45-SNAPSHOT.jar --help
    usage: Proto2DataHub
    --datahub_api <arg> [Optional] The API endpoint for DataHub GMS.
    (defaults to https://localhost:8080)
    --datahub_token <arg> [Optional] The authentication token for
    DataHub API access. (defaults to empty)
    --datahub_user <arg> [Optional] The datahub user to attribute this
    ingestion to. (defaults to ..)
    --descriptor <arg> [Required] The generated protobuf descriptor
    file. Typically a single .dsc file for the
    repo or a .protoc file (1:1 with each src
    file)
    --directory <arg> [Optional if using --file] The root directory
    containing protobuf source files.
    --env <arg> [Optional] The environment to attach all
    entities to. Typically, DEV, PROD etc.
    (defaults to DEV)
    --exclude <arg> [Optional] Exclude patterns to avoid
    processing all source files, separated by ,.
    Typically used with --directory option.
    Follows glob patterns: e.g. --exclude
    "build/**,generated/**" will exclude all files
    in the build and generated directories under
    the rootDirectory given by the --directory
    option
    --file <arg> [Optional if using --directory] The protobuf
    source file. Typically a .proto file.
    --filename <arg> [Required if using transport file] Filename to
    write output to.
    --github_org <arg> [Optional] The GitHub organization that this
    schema repository belongs to. We will
    translate comments in your protoc files like
    @datahub-project/data-team to GitHub team urls
    like:
    https://github.com/orgs/datahub-project/teams/
    data-team
    --help Print this help message
    --platform <arg> [Optional] The data platform to produce
    schemas for. e.g. kafka, snowflake, etc.
    (defaults to kafka)
    --slack_id <arg> [Optional] The Slack team id if your protobuf
    files contain comments with references to
    channel names. We will translate comments like
    #data-eng in your protobuf file to slack urls
    like:
    https://slack.com/app_redirect?channel=data-en
    g&team=T1234 following the documentation at
    (https://api.slack.com/reference/deep-linking#
    deep-linking-into-your-slack-app__opening-a-ch
    annel-by-name-or-id) The easiest way to find
    your Slack team id is to open your workspace
    in your browser. It should look something
    like:
    https://app.slack.com/client/TUMKD5EGJ/... In
    this case, the team-id is TUMKD5EGJ.
    --subtype [Optional] A custom subtype to attach to all
    entities produced. e.g. event, schema, topic
    etc.(Default is schema)
    --transport <arg> [Optional] What transport to use to
    communicate with DataHub. Options are: rest
    (default), kafka and file.

    You can run it like a standard java jar application:


    java -jar build/libs/datahub-protobuf-0.8.45-SNAPSHOT.jar --descriptor ../datahub-protobuf-example/build/descriptors/main.dsc --directory ../datahub-protobuf-example/schema/protobuf/ --transport rest

    or using gradle

    ../../../gradlew run --args="--descriptor ../datahub-protobuf-example/build/descriptors/main.dsc --directory ../datahub-protobuf-example/schema/protobuf/ --transport rest"

    Result:

    java -jar build/libs/datahub-protobuf-0.8.45-SNAPSHOT.jar --descriptor ../datahub-protobuf-example/build/descriptors/main.dsc --directory ../datahub-protobuf-example/schema/protobuf/ --transport rest
    SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
    SLF4J: Defaulting to no-operation (NOP) logger implementation
    SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
    ✅ Successfully emitted 90 events for 5 files to DataHub REST

    You can also route results to a file by using the --transport file --filename events.json options.

    Important Flags

    Here are a few important flags to use with this command

    • --env : Defaults to DEV, you should use PROD once you have ironed out all the issues with running this command.
    • --platform: Defaults to Kafka (as most people use protobuf schema repos with Kafka), but you can provide a custom platform name for this e.g. (schema_repo or <company_name>_schemas). If you use a custom platform, make sure to provision the custom platform on your DataHub instance with a logo etc, to get a native experience. See how to use the put platform command to accomplish this.
    • --subtype : This gives your entities a more descriptive category than Dataset in the UI. Defaults to schema, but you might find topic, event or message more descriptive.

    Example Application (separate project)

    The standalone example project shows you how you can create an independent project that uses this as part of a build task.

    Sample Usage:

    export DATAHUB_API=...
    export DATAHUB_TOKEN=...

    # Optional parameters
    # export DATAHUB_ENV=PROD
    # export DATAHUB_GITHUBORG=datahub-project
    # export DATAHUB_SLACKID=

    # publishSchema task will publish all the protobuf files into DataHub
    ./gradlew publishSchema
    - + \ No newline at end of file diff --git a/docs/metadata-integration/java/spark-lineage/index.html b/docs/metadata-integration/java/spark-lineage/index.html index ab4216c8bf2f3..eec865ed795f8 100644 --- a/docs/metadata-integration/java/spark-lineage/index.html +++ b/docs/metadata-integration/java/spark-lineage/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ This library has also been tested to work with Spark versions(2.2.0 - 2.4.8) and Scala versions(2.10 - 2.12). For the Spark 3.x series, this has been tested to work with Spark 3.1.2 and 3.2.0 with Scala 2.12. Other combinations are not guaranteed to work currently. Support for other Spark versions is planned in the very near future.

    Environments tested with

    This initial release has been tested with the following environments:

    • spark-submit of Python/Java applications to local and remote servers
    • Jupyter notebooks with pyspark code
    • Standalone Java applications
    • Databricks Standalone Cluster

    Testing with Databricks Standard and High-concurrency Cluster is not done yet.

    Spark commands supported

    Below is a list of Spark commands that are parsed currently:

    • InsertIntoHadoopFsRelationCommand
    • SaveIntoDataSourceCommand (jdbc)
    • SaveIntoDataSourceCommand (Delta Lake)
    • CreateHiveTableAsSelectCommand
    • InsertIntoHiveTable

    Effectively, these support data sources/sinks corresponding to Hive, HDFS, JDBC, and Delta Lake.

    DataFrame.persist command is supported for below LeafExecNodes:

    • FileSourceScanExec
    • HiveTableScanExec
    • RowDataSourceScanExec
    • InMemoryTableScanExec

    Spark commands not yet supported

    • View related commands
    • Cache commands and implications on lineage
    • RDD jobs

    Important notes on usage

    • It is advisable to ensure appName is used appropriately to ensure you can trace lineage from a pipeline back to your source code.
    • If multiple apps with the same appName run concurrently, dataset-lineage will be captured correctly but the custom-properties e.g. app-id, SQLQueryId would be unreliable. We expect this to be quite rare.
    • If spark execution fails, then an empty pipeline would still get created, but it may not have any tasks.
    • For HDFS sources, the folder (name) is regarded as the dataset (name) to align with typical storage of parquet/csv formats.

    Debugging

    • Following info logs are generated

    On Spark context startup

    YY/MM/DD HH:mm:ss INFO DatahubSparkListener: DatahubSparkListener initialised.
    YY/MM/DD HH:mm:ss INFO SparkContext: Registered listener datahub.spark.DatahubSparkListener

    On application start

    YY/MM/DD HH:mm:ss INFO DatahubSparkListener: Application started: SparkListenerApplicationStart(AppName,Some(local-1644489736794),1644489735772,user,None,None)
    YY/MM/DD HH:mm:ss INFO McpEmitter: REST Emitter Configuration: GMS url <rest.server>
    YY/MM/DD HH:mm:ss INFO McpEmitter: REST Emitter Configuration: Token XXXXX

    On pushing data to server

    YY/MM/DD HH:mm:ss INFO McpEmitter: MetadataWriteResponse(success=true, responseContent={"value":"<URN>"}, underlyingResponse=HTTP/1.1 200 OK [Date: day, DD month year HH:mm:ss GMT, Content-Type: application/json, X-RestLi-Protocol-Version: 2.0.0, Content-Length: 97, Server: Jetty(9.4.46.v20220331)] [Content-Length: 97,Chunked: false])

    On application end

    YY/MM/DD HH:mm:ss INFO DatahubSparkListener: Application ended : AppName AppID
    • To enable debugging logs, add below configuration in log4j.properties file
    log4j.logger.datahub.spark=DEBUG
    log4j.logger.datahub.client.rest=DEBUG

    Known limitations

    • Only postgres supported for JDBC sources in this initial release. Support for other driver URL formats will be added in future.
    • Behavior with cached datasets is not fully specified/defined in context of lineage.
    • There is a possibility that very short-lived jobs that run within a few milliseconds may not be captured by the listener. This should not cause an issue for realistic Spark applications.
    - + \ No newline at end of file diff --git a/docs/metadata-jobs/index.html b/docs/metadata-jobs/index.html index 15284f668f6ea..dfaef791a0561 100644 --- a/docs/metadata-jobs/index.html +++ b/docs/metadata-jobs/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    MXE Processing Jobs

    DataHub uses Kafka as the pub-sub message queue in the backend. There are 2 Kafka topics used by DataHub which are MetadataChangeEvent and MetadataAuditEvent.

    • MetadataChangeEvent: This message is emitted by any data platform or crawler in which there is a change in the metadata.
    • MetadataAuditEvent: This message is emitted by DataHub GMS to notify that metadata change is registered.

    To be able to consume from these two topics, there are two Spring jobs DataHub uses:

    - + \ No newline at end of file diff --git a/docs/metadata-jobs/mae-consumer-job/index.html b/docs/metadata-jobs/mae-consumer-job/index.html index 39e9dfcdb3b4b..eed69c57df552 100644 --- a/docs/metadata-jobs/mae-consumer-job/index.html +++ b/docs/metadata-jobs/mae-consumer-job/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ Elasticsearch, and Neo4j Docker containers are up and running.

    Start via Docker image

    The quickest way to try out Metadata Audit Event Consumer Job is running the Docker image.

    Start via command line

    If you do modify things and want to try it out quickly without building the Docker image, you can also run the application directly from command line after a successful build:

    MCL_CONSUMER_ENABLED=true ./gradlew :metadata-jobs:mae-consumer-job:bootRun

    Endpoints

    Spring boot actuator has been enabled for MAE Application. healthcheck, metrics and info web endpoints are enabled by default.

    healthcheck - http://localhost:9091/actuator/health

    metrics - http://localhost:9091/actuator/metrics

    To retrieve a specific metric - http://localhost:9091/actuator/metrics/process.uptime

    - + \ No newline at end of file diff --git a/docs/metadata-jobs/mce-consumer-job/index.html b/docs/metadata-jobs/mce-consumer-job/index.html index 808cec8b08efb..bc94e28880210 100644 --- a/docs/metadata-jobs/mce-consumer-job/index.html +++ b/docs/metadata-jobs/mce-consumer-job/index.html @@ -8,7 +8,7 @@ - + @@ -27,7 +27,7 @@ will not increase ingestion performance.

    Additional Endpoints

    /* - Restli service endpoints (standalone consumer mode)

    The Restli service endpoints are exposed for use locally (not via a K8 service) by the MCE consumer job itself. This is only true in standalone mode. When run within GMS, the Restli endpoints are already available and accessed via the K8 service.

    - + \ No newline at end of file diff --git a/docs/metadata-modeling/extending-the-metadata-model/index.html b/docs/metadata-modeling/extending-the-metadata-model/index.html index 4b8c8543d4e62..7a8cde0c2de4b 100644 --- a/docs/metadata-modeling/extending-the-metadata-model/index.html +++ b/docs/metadata-modeling/extending-the-metadata-model/index.html @@ -8,7 +8,7 @@ - + @@ -92,7 +92,7 @@ replaces it with its own- this has a different boostScore and a different fieldName.

     /**
    * Tags associated with the field
    */
    @Searchable = {
    "/tags/*/tag": {
    "fieldName": "fieldTags",
    "fieldType": "URN_WITH_PARTIAL_MATCHING",
    "queryByDefault": true,
    "boostScore": 0.5
    }
    }
    globalTags: optional GlobalTags

    As a result, you can issue a query specifically for tags on Schema Fields via fieldTags:<tag_name> or tags directly applied to an entity via tags:<tag_name>. Since both have queryByDefault set to true, you can also search for entities with either of these properties just by searching for the tag name.

    • To fork or not to fork?
    • This Guide
    • Defining an Entity
      • Step 1: Define the Entity Key Aspect
      • Step 2: Create the new entity with its key aspect
      • Step 3: Define custom aspects or attach existing aspects to your entity
      • Step 4: Choose a place to store your model extension
      • Step 5: Attaching your non-key Aspect(s) to the Entity
      • Step 6 (Oss-Fork approach): Re-build DataHub to have access to your new or updated entity
      • (Optional) Step 7: Extend the DataHub frontend to view your entity in GraphQL & React
    • Metadata Annotations
    - + \ No newline at end of file diff --git a/docs/metadata-modeling/metadata-model/index.html b/docs/metadata-modeling/metadata-model/index.html index ab3abe1f627a7..3c36104c7e0f5 100644 --- a/docs/metadata-modeling/metadata-model/index.html +++ b/docs/metadata-modeling/metadata-model/index.html @@ -8,7 +8,7 @@ - + @@ -81,7 +81,7 @@ the group-by/aggregate query, in addition to echoing the input params.

    • columnNames: the names of the table columns. The group-by key names appear in the same order as they are specified in the request. Aggregation specifications follow the grouping fields in the same order as specified in the request, and will be named <agg_name>_<fieldPath>.
    • columnTypes: the data types of the columns.
    • rows: the data values, each row corresponding to the respective bucket(s).

    Example: Latest unique user count for each day.

    # QUERY
    curl --location --request POST 'http://localhost:8080/analytics?action=getTimeseriesStats' \
    --header 'X-RestLi-Protocol-Version: 2.0.0' \
    --header 'Content-Type: application/json' \
    --data-raw '{
    "entityName": "dataset",
    "aspectName": "datasetUsageStatistics",
    "filter": {
    "criteria": []
    },
    "metrics": [
    {
    "fieldPath": "uniqueUserCount",
    "aggregationType": "LATEST"
    }
    ],
    "buckets": [
    {
    "key": "timestampMillis",
    "type": "DATE_GROUPING_BUCKET",
    "timeWindowSize": {
    "multiple": 1,
    "unit": "DAY"
    }
    }
    ]
    }'

    # SAMPLE RESPOSNE
    {
    "value": {
    "filter": {
    "criteria": []
    },
    "aspectName": "datasetUsageStatistics",
    "entityName": "dataset",
    "groupingBuckets": [
    {
    "type": "DATE_GROUPING_BUCKET",
    "timeWindowSize": {
    "multiple": 1,
    "unit": "DAY"
    },
    "key": "timestampMillis"
    }
    ],
    "aggregationSpecs": [
    {
    "fieldPath": "uniqueUserCount",
    "aggregationType": "LATEST"
    }
    ],
    "table": {
    "columnNames": [
    "timestampMillis",
    "latest_uniqueUserCount"
    ],
    "rows": [
    [
    "1631491200000",
    "1"
    ]
    ],
    "columnTypes": [
    "long",
    "int"
    ]
    }
    }
    }

    For more examples on the complex types of group-by/aggregations, refer to the tests in the group getAggregatedStats of ElasticSearchTimeseriesAspectServiceTest.java.

    - + \ No newline at end of file diff --git a/docs/metadata-models-custom/index.html b/docs/metadata-models-custom/index.html index 7610a1e2558d5..cb49affae0620 100644 --- a/docs/metadata-models-custom/index.html +++ b/docs/metadata-models-custom/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ https://github.com/acryldata/datahub-helm/blob/master/charts/datahub/subcharts/datahub-gms/templates/deployment.yaml#L182 You can do this by setting the datahub-gms.extraVolumeMounts in values.yaml

    at the end your values.yaml should have something like:

    datahub-gms:
    ...
    extraVolumes:
    - name: custom-model
    configMap:
    name: custom-model ## should match configmap name above
    extraVolumeMounts:
    - name: custom-model-dir
    mountPath: /etc/plugins/models/<registry-name>/<registry-version>

    The mountPath can be configured using ENTITY_REGISTRY_PLUGIN_PATH and defaults to /etc/datahub/plugins/models.

    Check if your model got loaded successfully

    Assuming that you are running DataHub on localhost, you can curl the config endpoint to see the model load status.

    curl -s http://localhost:8080/config | jq .
    {
    "models": {
    "mycompany-dq-model": {
    "0.0.1": {
    "loadResult": "SUCCESS",
    "registryLocation": "/Users/username/.datahub/plugins/models/mycompany-dq-model/0.0.1",
    "failureCount": 0
    }
    }
    },
    "noCode": "true"
    }

    Alternatively, you could type in http://localhost:8080/config in your browser.

    Add some metadata with your new model

    We have included some sample scripts that you can modify to upload data corresponding to your new data model. The scripts/insert_one.sh script takes the scripts/data/dq_rule.json file and attaches it to the dataset_urn entity using the datahub cli.

    cd scripts
    ./insert_one.sh

    results in

    Update succeeded with status 200

    The scripts/insert_custom_aspect.py script shows you how to accomplish the same using the Python SDK. Note that we are just using a raw dictionary here to represent the dq_rule aspect and not a strongly-typed class.

    cd scripts
    python3 insert_custom_aspect.py

    results in

    Successfully wrote to DataHub

    Advanced Guide

    A few things that you will likely do as you start creating new models and creating metadata that conforms to those models.

    Deleting metadata associated with a model

    The datahub cli supports deleting metadata associated with a model as a customization of the delete command.

    e.g. datahub delete by-registry --registry-id=mycompany-dq-model:0.0.1 --hard will delete all data written using this registry name and version pair.

    Evolve the metadata model

    As you evolve the metadata model, you can publish new versions of the repository and deploy it into DataHub as well using the same steps outlined above. DataHub will check whether your new models are backwards compatible with the previous versioned model and decline loading models that are backwards incompatible.

    The Future

    Hopefully this repository shows you how easily you can extend and customize DataHub's metadata model!

    We will be continuing to make the experience less reliant on core changes to DataHub and reducing the need to fork the main repository.

    - + \ No newline at end of file diff --git a/docs/metadata-service/index.html b/docs/metadata-service/index.html index 40f0bc5bb1866..e030327b87814 100644 --- a/docs/metadata-service/index.html +++ b/docs/metadata-service/index.html @@ -8,7 +8,7 @@ - + @@ -43,7 +43,7 @@ the dataset-specific aspects are located under metadata-models/src/main/pegasus/com/linkedin/metadata/dataset.

    3. How do I find the valid set of Relationship names?

    All relationships are defined on foreign-key fields inside Aspect PDLs. They are reflected by fields bearing the @Relationship annotation. Inside this annotation is a "name" field that defines the standardized name of the Relationship to be used when querying.

    By convention, all entity PDLs live under metadata-models/src/main/pegasus/com/linkedin/metadata/common or metadata-models/src/main/pegasus/com/linkedin/metadata/<entity-name>. For example, the dataset-specific aspects are located under metadata-models/src/main/pegasus/com/linkedin/metadata/dataset.

    - + \ No newline at end of file diff --git a/docs/metadata-service/services/index.html b/docs/metadata-service/services/index.html index f2b3f72d6e234..957b44a0d6480 100644 --- a/docs/metadata-service/services/index.html +++ b/docs/metadata-service/services/index.html @@ -8,7 +8,7 @@ - + @@ -16,7 +16,7 @@

    Service Layer

    Module to abstract away business logic from implementation specific libraries to make them lighter weight from a dependency perspective. Service classes should be here unless they require direct usage of implementation specific libraries (i.e. ElasticSearch, Ebean, Neo4J, etc.).

    - + \ No newline at end of file diff --git a/docs/ownership/ownership-types/index.html b/docs/ownership/ownership-types/index.html index 0f5a6e754b1fd..51e1842e18955 100644 --- a/docs/ownership/ownership-types/index.html +++ b/docs/ownership/ownership-types/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ This allows stakeholders to discover what relationships an owner of an entity has using the language already in-use at organizations.

    How Can You Use Custom Ownership Types?

    Custom Ownership types have been implemented as a net-new entity in DataHub's Metadata Model meaning all entity-related APIs can be used for them. Additionally, they can be managed through DataHub's Admin UI and then used for ownership across the system in the same way pre-existing ownership types are.

    Custom Ownership Types Setup, Prerequisites, and Permissions

    What you need to create and add ownership types:

    • Manage Ownership Types metadata privilege to create/delete/update Ownership Types at the platform level. These can be granted by a Platform Policy.
    • Edit Owners metadata privilege to add or remove an owner with an associated custom ownership type for a given entity.

    You can create this privileges by creating a new Metadata Policy.

    Using Custom Ownership Types

    Custom Ownership Types can be managed using the UI, via a graphQL command or ingesting an MCP which can be managed using software engineering (GitOps) practices.

    Managing Custom Ownership Types

    To manage a Custom Ownership type, first navigate to the DataHub Admin page:

    Then navigate to the `Ownership Types` tab under the `Management` section.

    To create a new type simply click '+ Create new Ownership Type'.

    This will open a new modal where you can configure your Ownership Type.

    Inside the form, you can choose a name for your Ownership Type. You can also add descriptions for your ownership types to help other users more easily understand their meaning.

    Don't worry, this can be changed later.

    Once you've chosen a name and a description, click 'Save' to create the new Ownership Type.

    You can also edit and delete types in this UI by click on the ellipsis in the management view for the type you wish to change/delete.

    Assigning a Custom Ownership Type to an Entity (UI)

    You can assign an owner with a custom ownership type to an entity either using the Entity's page as the starting point.

    On an Entity's profile page, use the right sidebar to locate the Owners section.

    Click 'Add Owners', select the owner you want and then search for the Custom Ownership Type you'd like to add this asset to. When you're done, click 'Add'.

    To remove ownership from an asset, click the 'x' icon on the Owner label.

    Notice: Adding or removing an Owner to an asset requires the Edit Owners Metadata Privilege, which can be granted by a Policy.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/perf-test/index.html b/docs/perf-test/index.html index a9978d8fa6a1b..f3102fec11c25 100644 --- a/docs/perf-test/index.html +++ b/docs/perf-test/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ locally, it should be http://localhost:8080). Click on the "Start swarming" button to start the load test.

    The web interface should give you statistics on number of requests, latency, response rate, etc.

    Command Line

    To run on the command line, run the following

    locust -f <<path-to-locustfile>> --headless -H <<host>> -u <<num-users>> -r <<spawn-rate>>

    For instance, to replicate the setting in the previous section, run the following

    locust -f perf-test/locustfiles/ingest.py --headless -H http://localhost:8080 -u 100 -r 100

    It should start the load test and print out statistics on the command line.

    Reference

    For more details on how to run locust and various configs, refer to this doc

    To customize the user behavior by modifying the locustfiles, refer to this doc

    - + \ No newline at end of file diff --git a/docs/platform-instances/index.html b/docs/platform-instances/index.html index 670237cc71c1e..38fdf90ccdf8d 100644 --- a/docs/platform-instances/index.html +++ b/docs/platform-instances/index.html @@ -8,14 +8,14 @@ - +

    Working With Platform Instances

    DataHub's metadata model for Datasets supports a three-part key currently:

    • Data Platform (e.g. urn:li:dataPlatform:mysql)
    • Name (e.g. db.schema.name)
    • Env or Fabric (e.g. DEV, PROD, etc.)

    This naming scheme unfortunately does not allow for easy representation of the multiplicity of platforms (or technologies) that might be deployed at an organization within the same environment or fabric. For example, an organization might have multiple Redshift instances in Production and would want to see all the data assets located in those instances inside the DataHub metadata repository.

    As part of the v0.8.24+ releases, we are unlocking the first phase of supporting Platform Instances in the metadata model. This is done via two main additions:

    • The dataPlatformInstance aspect that has been added to Datasets which allows datasets to be associated to an instance of a platform
    • Enhancements to all ingestion sources that allow them to attach a platform instance to the recipe that changes the generated urns to go from urn:li:dataset:(urn:li:dataPlatform:<platform>,<name>,ENV) format to urn:li:dataset:(urn:li:dataPlatform:<platform>,<instance.name>,ENV) format. Sources that produce lineage to datasets in other platforms (e.g. Looker, Superset etc) also have specific configuration additions that allow the recipe author to specify the mapping between a platform and the instance name that it should be mapped to.

    ./imgs/platform-instances-for-ingestion.png

    Naming Platform Instances

    When configuring a platform instance, choose an instance name that is understandable and will be stable for the foreseeable future. e.g. core_warehouse or finance_redshift are allowed names, as are pure guids like a37dc708-c512-4fe4-9829-401cd60ed789. Remember that whatever instance name you choose, you will need to specify it in more than one recipe to ensure that the identifiers produced by different sources will line up.

    Enabling Platform Instances

    Read the Ingestion source specific guides for how to enable platform instances in each of them. The general pattern is to add an additional optional configuration parameter called platform_instance.

    e.g. here is how you would configure a recipe to ingest a mysql instance that you want to call core_finance

    source:
    type: mysql
    config:
    # Coordinates
    host_port: localhost:3306
    platform_instance: core_finance
    database: dbname

    # Credentials
    username: root
    password: example

    sink:
    # sink configs

    - + \ No newline at end of file diff --git a/docs/plugins/index.html b/docs/plugins/index.html index 74aa18e205e19..b174496756af8 100644 --- a/docs/plugins/index.html +++ b/docs/plugins/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ Choose your Identity Management System and write custom authentication plugin as per detail mentioned in this section.

    Currently, custom authenticators cannot be used to authenticate users of DataHub's web UI. This is because the DataHub web app expects the presence of 2 special cookies PLAY_SESSION and actor which are explicitly set by the server when a login action is performed. Instead, custom authenticators are useful for authenticating API requests to DataHub's backend (GMS), and can stand in addition to the default Authentication performed by DataHub, which is based on DataHub-minted access tokens.

    The sample authenticator implementation can be found at Authenticator Sample

    Implementing an Authentication Plugin

    1. Add datahub-auth-api as compileOnly dependency: Maven coordinates of datahub-auth-api can be found at Maven

      Example of gradle dependency is given below.

       dependencies {

      def auth_api = 'io.acryl:datahub-auth-api:0.9.3-3rc3'
      compileOnly "${auth_api}"
      testImplementation "${auth_api}"

      }
    2. Implement the Authenticator interface: Refer Authenticator Sample

      Sample class which implements the Authenticator interface
      public class GoogleAuthenticator implements Authenticator {

      @Override
      public void init(@Nonnull Map<String, Object> authenticatorConfig, @Nullable AuthenticatorContext context) {
      // Plugin initialization code will go here
      // DataHub will call this method on boot time
      }

      @Nullable
      @Override
      public Authentication authenticate(@Nonnull AuthenticationRequest authenticationRequest)
      throws AuthenticationException {
      // DataHub will call this method whenever authentication decisions are need to be taken
      // Authenticate the request and return Authentication
      }
      }
    3. Use getResourceAsStream to read files: If your plugin read any configuration file like properties or YAML or JSON or xml then use this.getClass().getClassLoader().getResourceAsStream("<file-name>") to read that file from DataHub GMS plugin's class-path. For DataHub GMS resource look-up behavior please refer Plugin Installation section. Sample code of getResourceAsStream is available in sample Authenticator plugin TestAuthenticator.java.

    1. Bundle your Jar: Use com.github.johnrengelman.shadow gradle plugin to create an uber jar.

      To see an example of building an uber jar, check out the build.gradle file for the apache-ranger-plugin file of Apache Ranger Plugin for reference.

      Exclude signature files as shown in below shadowJar task.

        apply plugin: 'com.github.johnrengelman.shadow';
      shadowJar {
      // Exclude com.datahub.plugins package and files related to jar signature
      exclude "META-INF/*.RSA", "META-INF/*.SF","META-INF/*.DSA"
      }
    2. Refer section Plugin Installation for plugin installation in DataHub environment

    Enable GMS Authentication

    By default, authentication is disabled in DataHub GMS.

    Follow below steps to enable GMS authentication

    1. Download docker-compose.quickstart.yml: Download docker compose file docker-compose.quickstart.yml

    2. Set environment variable: Set METADATA_SERVICE_AUTH_ENABLED environment variable to true

    3. Redeploy DataHub GMS: Below is quickstart command to redeploy DataHub GMS

      datahub docker quickstart -f docker-compose.quickstart.yml

    Authorization

    Note: This is in BETA version

    It is recommend that you do not do this unless you really know what you are doing

    Custom authorization plugin makes it possible to authorize DataHub users against any Access Management System. Choose your Access Management System and write custom authorization plugin as per detail mentioned in this section.

    The sample authorizer implementation can be found at Authorizer Sample

    Implementing an Authorization Plugin

    1. Add datahub-auth-api as compileOnly dependency: Maven coordinates of datahub-auth-api can be found at Maven

      Example of gradle dependency is given below.

       dependencies {

      def auth_api = 'io.acryl:datahub-auth-api:0.9.3-3rc3'
      compileOnly "${auth_api}"
      testImplementation "${auth_api}"

      }
    2. Implement the Authorizer interface: Authorizer Sample

      Sample class which implements the Authorization interface
       public class ApacheRangerAuthorizer implements Authorizer {
      @Override
      public void init(@Nonnull Map<String, Object> authorizerConfig, @Nonnull AuthorizerContext ctx) {
      // Plugin initialization code will go here
      // DataHub will call this method on boot time
      }

      @Override
      public AuthorizationResult authorize(@Nonnull AuthorizationRequest request) {
      // DataHub will call this method whenever authorization decisions are need be taken
      // Authorize the request and return AuthorizationResult
      }

      @Override
      public AuthorizedActors authorizedActors(String privilege, Optional<ResourceSpec> resourceSpec) {
      // Need to add doc
      }
      }
    3. Use getResourceAsStream to read files: If your plugin read any configuration file like properties or YAML or JSON or xml then use this.getClass().getClassLoader().getResourceAsStream("<file-name>") to read that file from DataHub GMS plugin's class-path. For DataHub GMS resource look-up behavior please refer Plugin Installation section. Sample code of getResourceAsStream is available in sample Authenticator plugin TestAuthenticator.java.

    4. Bundle your Jar: Use com.github.johnrengelman.shadow gradle plugin to create an uber jar.

      To see an example of building an uber jar, check out the build.gradle file for the apache-ranger-plugin file of Apache Ranger Plugin for reference.

      Exclude signature files as shown in below shadowJar task.

        apply plugin: 'com.github.johnrengelman.shadow';
      shadowJar {
      // Exclude com.datahub.plugins package and files related to jar signature
      exclude "META-INF/*.RSA", "META-INF/*.SF","META-INF/*.DSA"
      }
    5. Install the Plugin: Refer to the section (Plugin Installation)[#plugin_installation] for plugin installation in DataHub environment

    Plugin Installation

    DataHub's GMS Service searches for the plugins in container's local directory at location /etc/datahub/plugins/auth/. This location will be referred as plugin-base-directory hereafter.

    For docker, we set docker-compose to mount ${HOME}/.datahub directory to /etc/datahub directory within the GMS containers.

    Docker

    Follow below steps to install plugins:

    Lets consider you have created an uber jar for authorizer plugin and jar name is apache-ranger-authorizer.jar and class com.abc.RangerAuthorizer has implemented the Authorizer interface.

    1. Create a plugin configuration file: Create a config.yml file at ${HOME}/.datahub/plugins/auth/. For more detail on configuration refer Config Detail section

    2. Create a plugin directory: Create plugin directory as apache-ranger-authorizer, this directory will be referred as plugin-home hereafter

       mkdir -p ${HOME}/.datahub/plugins/auth/apache-ranger-authorizer
    3. Copy plugin jar to plugin-home: Copy apache-ranger-authorizer.jar to plugin-home

       copy apache-ranger-authorizer.jar ${HOME}/.datahub/plugins/auth/apache-ranger-authorizer
    4. Update plugin configuration file: Add below entry in config.yml file, the plugin can take any arbitrary configuration under the "configs" block. in our example, there is username and password

         plugins:
      - name: "apache-ranger-authorizer"
      type: "authorizer"
      enabled: "true"
      params:
      className: "com.abc.RangerAuthorizer"
      configs:
      username: "foo"
      password: "fake"

    5. Restart datahub-gms container:

      On startup DataHub GMS service performs below steps

      1. Load config.yml
      2. Prepare list of plugin where enabled is set to true
      3. Look for directory equivalent to plugin name in plugin-base-directory. In this case it is /etc/datahub/plugins/auth/apache-ranger-authorizer/, this directory will become plugin-home
      4. Look for params.jarFileName attribute otherwise look for jar having name as <plugin-name>.jar. In this case it is /etc/datahub/plugins/auth/apache-ranger-authorizer/apache-ranger-authorizer.jar
      5. Load class given in plugin params.className attribute from the jar, here load class com.abc.RangerAuthorizer from apache-ranger-authorizer.jar
      6. Call init method of plugin

      On method call of `getResourceAsStream` DataHub GMS service looks for the resource in below order. 1. Look for the requested resource in plugin-jar file. if found then return the resource as InputStream. 2. Look for the requested resource in `plugin-home` directory. if found then return the resource as InputStream. 3. Look for the requested resource in application class-loader. if found then return the resource as InputStream. 4. Return `null` as requested resource is not found.

    By default, authentication is disabled in DataHub GMS, Please follow section Enable GMS Authentication to enable authentication.

    Kubernetes

    Helm support is coming soon.

    Config Detail

    A sample config.yml can be found at config.yml.

    config.yml structure:

    FieldRequiredTypeDefaultDescription
    plugins[].namestringname of the plugin
    plugins[].typeenum[authenticator, authorizer]type of plugin, possible values are authenticator or authorizer
    plugins[].enabledbooleanwhether this plugin is enabled or disabled. DataHub GMS wouldn't process disabled plugin
    plugins[].params.classNamestringAuthenticator or Authorizer implementation class' fully qualified class name
    plugins[].params.jarFileNamestringdefault to plugins[].name.jarjar file name in plugin-home
    plugins[].params.configsmap<string,object>default to empty mapRuntime configuration required for plugin

    plugins[] is an array of plugin, where you can define multiple authenticator and authorizer plugins. plugin name should be unique in plugins array.

    Plugin Permissions

    Adhere to below plugin access control to keep your plugin forward compatible.

    • Plugin should read/write file to and from plugin-home directory only. Refer Plugin Installation step2 for plugin-home definition
    • Plugin should access port 80 or 443 or port higher than 1024

    All other access are forbidden for the plugin.

    Disclaimer: In BETA version your plugin can access any port and can read/write to any location on file system, however you should implement the plugin as per above access permission to keep your plugin compatible with upcoming release of DataHub.

    Migration Of Plugins From application.yml

    If you have any custom Authentication or Authorization plugin define in authorization or authentication section of application.yml then migrate them as per below steps.

    1. Implement Plugin: For Authentication Plugin follow steps of Implementing an Authentication Plugin and for Authorization Plugin follow steps of Implementing an Authorization Plugin

    2. Install Plugin: Install the plugins as per steps mentioned in Plugin Installation. Here you need to map the configuration from application.yml to configuration in config.yml. This mapping from application.yml to config.yml is described below

      Mapping for Authenticators

      a. In config.yml set plugins[].type to authenticator

      b. authentication.authenticators[].type is mapped to plugins[].params.className

      c. authentication.authenticators[].configs is mapped to plugins[].params.configs

      Example Authenticator Plugin configuration in config.yml

      plugins:
      - name: "apache-ranger-authenticator"
      type: "authenticator"
      enabled: "true"
      params:
      className: "com.abc.RangerAuthenticator"
      configs:
      username: "foo"
      password: "fake"

      Mapping for Authorizer

      a. In config.yml set plugins[].type to authorizer

      b. authorization.authorizers[].type is mapped to plugins[].params.className

      c. authorization.authorizers[].configs is mapped to plugins[].params.configs

      Example Authorizer Plugin configuration in config.yml

      plugins:
      - name: "apache-ranger-authorizer"
      type: "authorizer"
      enabled: "true"
      params:
      className: "com.abc.RangerAuthorizer"
      configs:
      username: "foo"
      password: "fake"

    3. Move any other configurations files of your plugin to plugin_home directory. The detail about plugin_home is mentioned in Plugin Installation section.

    - + \ No newline at end of file diff --git a/docs/posts/index.html b/docs/posts/index.html index 4bab03d11e92e..aabde15e0f0ee 100644 --- a/docs/posts/index.html +++ b/docs/posts/index.html @@ -8,13 +8,13 @@ - +

    About DataHub Posts

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub
    DataHub allows users to make Posts that can be displayed on the app. Currently, Posts are only supported on the Home Page, but may be extended to other surfaces of the app in the future. Posts can be used to accomplish the following:
    • Allowing Admins to post announcements on the home page
    • Pinning important DataHub assets or pages
    • Pinning important external links

    Posts Setup, Prerequisites, and Permissions

    Anyone can view Posts on the home page. To create Posts, a user must either have the Create Global Announcements Privilege, or possess the Admin DataHub Role.

    Using Posts

    To create a post, users must use the createPost GraphQL mutation. There is currently no way to create posts using the UI, though this will come in the future.

    There is only one type of Post that can be currently made, and that is a Home Page Announcement. This may be extended in the future to other surfaces.

    DataHub currently supports two types of Post content. Posts can either contain TEXT or can be a LINK. When creating a post through GraphQL, users will have to supply the post content.

    For TEXT posts, the following pieces of information are required in the content object (of type UpdatePostContentInput) of the GraphQL input (of type CreatePostInput)). TEXT posts cannot be clicked.

    • contentType: TEXT
    • title
    • description

    The link and media attributes are currently unused for TEXT posts.

    For LINK posts, the following pieces of information are required in the content object (of type UpdatePostContentInput) of the GraphQL input (of type CreatePostInput)). LINK posts redirect to the provided link when clicked.

    • contentType: LINK
    • title
    • link
    • media. Currently only the IMAGE type is supported, and the URL of the image must be provided

    The description attribute is currently unused for LINK posts.

    Here are some examples of Posts displayed on the home page, with one TEXT post and two LINK posts.

    GraphQL

    Examples

    Create Post
    mutation test {
      createPost(
        input: {
    postType: HOME_PAGE_ANNOUNCEMENT,
    content: {
    contentType: TEXT,
    title: "Planed Upgrade 2023-03-23 20:05 - 2023-03-23 23:05",
    description: "datahub upgrade to v0.10.1"
    }
    }
      )
    }

    List Post
    query listPosts($input: ListPostsInput!) {
      listPosts(input: $input) {
        start
        count
        total
        posts {
          urn
          type
          postType
          content {
            contentType
            title
            description
            link
            media {
              type
              location
              __typename
            }
            __typename
          }
          __typename
        }
        __typename
      }
    }

    Input for list post
    {
      "input": {
        "start": 0,
        "count": 10
      }
    }
    Delete Post
    mutation deletePosting { 
    deletePost (
      urn: "urn:li:post:61dd86fa-9e76-4924-ad45-3a533671835e"
    )
    }

    FAQ and Troubleshooting

    Need more help with Posts? Join the conversation in Slack! Please post in the #ui channel!

    - + \ No newline at end of file diff --git a/docs/python-sdk/builder/index.html b/docs/python-sdk/builder/index.html index 3cdc24df53211..b87a072c381e2 100644 --- a/docs/python-sdk/builder/index.html +++ b/docs/python-sdk/builder/index.html @@ -8,7 +8,7 @@ - + @@ -1332,7 +1332,7 @@ - + \ No newline at end of file diff --git a/docs/python-sdk/clients/index.html b/docs/python-sdk/clients/index.html index f9721fe2d1224..15849837ad455 100644 --- a/docs/python-sdk/clients/index.html +++ b/docs/python-sdk/clients/index.html @@ -8,7 +8,7 @@ - + @@ -915,7 +915,7 @@ - + \ No newline at end of file diff --git a/docs/python-sdk/models/index.html b/docs/python-sdk/models/index.html index 44e2c82279b0a..c57bcea98171a 100644 --- a/docs/python-sdk/models/index.html +++ b/docs/python-sdk/models/index.html @@ -8,7 +8,7 @@ - + @@ -11225,7 +11225,7 @@ - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/bigquery/configuration/index.html b/docs/quick-ingestion-guides/bigquery/configuration/index.html index 1aa24c152d30f..6c967bb00c90c 100644 --- a/docs/quick-ingestion-guides/bigquery/configuration/index.html +++ b/docs/quick-ingestion-guides/bigquery/configuration/index.html @@ -8,13 +8,13 @@ - +

    Configuring Your BigQuery Connector to DataHub

    Now that you have created a Service Account and Service Account Key in BigQuery in the prior step, it's now time to set up a connection via the DataHub UI.

    Configure Secrets

    1. Within DataHub, navigate to the Ingestion tab in the top, right corner of your screen

    Navigate to the "Ingestion Tab"

    note

    If you do not see the Ingestion tab, please contact your DataHub admin to grant you the correct permissions

    1. Navigate to the Secrets tab and click Create new secret

    Secrets Tab

    1. Create a Private Key secret

    This will securely store your BigQuery Service Account Private Key within DataHub

    • Enter a name like BIGQUERY_PRIVATE_KEY - we will use this later to refer to the secret
    • Copy and paste the private_key value from your Service Account Key
    • Optionally add a description
    • Click Create

    Private Key Secret

    1. Create a Private Key ID secret

    This will securely store your BigQuery Service Account Private Key ID within DataHub

    • Click Create new secret again
    • Enter a name like BIGQUERY_PRIVATE_KEY_ID - we will use this later to refer to the secret
    • Copy and paste the private_key_id value from your Service Account Key
    • Optionally add a description
    • Click Create

    Private Key Id Secret

    Configure Recipe

    1. Navigate to the Sources tab and click Create new source

    Click "Create new source"

    1. Select BigQuery

    Select BigQuery from the options

    1. Fill out the BigQuery Recipe

    You can find the following details in your Service Account Key file:

    • Project ID
    • Client Email
    • Client ID

    Populate the Secret Fields by selecting the Private Key and Private Key ID secrets you created in steps 3 and 4.

    Fill out the BigQuery Recipe

    1. Click Test Connection

    This step will ensure you have configured your credentials accurately and confirm you have the required permissions to extract all relevant metadata.

    Test BigQuery connection

    After you have successfully tested your connection, click Next.

    Schedule Execution

    Now it's time to schedule a recurring ingestion pipeline to regularly extract metadata from your BigQuery instance.

    1. Decide how regularly you want this ingestion to run-- day, month, year, hour, minute, etc. Select from the dropdown

      schedule selector

    2. Ensure you've configured your correct timezone

      timezone_selector

    3. Click Next when you are done

    Finish Up

    1. Name your ingestion source, then click Save and Run

      Name your ingestion

    You will now find your new ingestion source running

    ingestion_running

    Validate Ingestion Runs

    1. View the latest status of ingestion runs on the Ingestion page

    ingestion succeeded

    1. Click the plus sign to expand the full list of historical runs and outcomes; click Details to see the outcomes of a specific run

    ingestion_details

    1. From the Ingestion Run Details page, pick View All to see which entities were ingested

    ingestion_details_view_all

    1. Pick an entity from the list to manually validate if it contains the detail you expected

    ingestion_details_view_all

    Congratulations! You've successfully set up BigQuery as an ingestion source for DataHub!

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/bigquery/overview/index.html b/docs/quick-ingestion-guides/bigquery/overview/index.html index 3455402645ec0..3c1f7ce841cec 100644 --- a/docs/quick-ingestion-guides/bigquery/overview/index.html +++ b/docs/quick-ingestion-guides/bigquery/overview/index.html @@ -8,13 +8,13 @@ - +

    BigQuery Ingestion Guide: Overview

    What You Will Get Out of This Guide

    This guide will help you set up the BigQuery connector through the DataHub UI to begin ingesting metadata into DataHub.

    Upon completing this guide, you will have a recurring ingestion pipeline that will extract metadata from BigQuery and load it into DataHub. This will include to following BigQuery asset types:

    This recurring ingestion pipeline will also extract:

    • Usage statistics to help you understand recent query activity
    • Table-level lineage (where available) to automatically define interdependencies between datasets
    • Table- and column-level profile statistics to help you understand the shape of the data
    caution

    You will NOT have extracted Routines, Search Indexes from BigQuery, as the connector does not support ingesting these assets

    Next Steps

    If that all sounds like what you're looking for, navigate to the next page, where we'll talk about prerequisites

    Advanced Guides and Reference

    If you're looking to do something more in-depth, want to use CLI instead of the DataHub UI, or just need to look at the reference documentation for this connector, use these links:

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/bigquery/setup/index.html b/docs/quick-ingestion-guides/bigquery/setup/index.html index 275c2b506b562..43bce164ffa44 100644 --- a/docs/quick-ingestion-guides/bigquery/setup/index.html +++ b/docs/quick-ingestion-guides/bigquery/setup/index.html @@ -8,13 +8,13 @@ - +

    BigQuery Ingestion Guide: Setup & Prerequisites

    To configure ingestion from BigQuery, you'll need a Service Account configured with the proper permission sets and an associated Service Account Key.

    This setup guide will walk you through the steps you'll need to take via your Google Cloud Console.

    BigQuery Prerequisites

    If you do not have an existing Service Account and Service Account Key, please work with your BigQuery Admin to ensure you have the appropriate permissions and/or roles to continue with this setup guide.

    When creating and managing new Service Accounts and Service Account Keys, we have found the following permissions and roles to be required:

    • Create a Service Account: iam.serviceAccounts.create permission
    • Assign roles to a Service Account: serviceusage.services.enable permission
    • Set permission policy to the project: resourcemanager.projects.setIamPolicy permission
    • Generate Key for Service Account: Service Account Key Admin (roles/iam.serviceAccountKeyAdmin) IAM role
    note

    Please refer to the BigQuery Permissions and IAM Roles references for details

    BigQuery Setup

    1. To set up a new Service Account follow this guide

    2. When you are creating a Service Account, assign the following predefined Roles:

    note

    You can always add/remove roles to Service Accounts later on. Please refer to the BigQuery Manage access to projects, folders, and organizations guide for more details.

    1. Create and download a Service Account Key. We will use this to set up authentication within DataHub.

    The key file looks like this:

    {
    "type": "service_account",
    "project_id": "project-id-1234567",
    "private_key_id": "d0121d0000882411234e11166c6aaa23ed5d74e0",
    "private_key": "-----BEGIN PRIVATE KEY-----\nMIIyourkey\n-----END PRIVATE KEY-----",
    "client_email": "test@suppproject-id-1234567.iam.gserviceaccount.com",
    "client_id": "113545814931671546333",
    "auth_uri": "https://accounts.google.com/o/oauth2/auth",
    "token_uri": "https://oauth2.googleapis.com/token",
    "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
    "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/test%suppproject-id-1234567.iam.gserviceaccount.com"
    }

    Next Steps

    Once you've confirmed all of the above in BigQuery, it's time to move on to configure the actual ingestion source within the DataHub UI.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/powerbi/configuration/index.html b/docs/quick-ingestion-guides/powerbi/configuration/index.html index d31fc62536d44..b2bf6a644cdbc 100644 --- a/docs/quick-ingestion-guides/powerbi/configuration/index.html +++ b/docs/quick-ingestion-guides/powerbi/configuration/index.html @@ -8,13 +8,13 @@ - +

    Configuring Your PowerBI Connector to DataHub

    Now that you have created a DataHub specific Azure AD app with the relevant access in the prior step, it's now time to set up a connection via the DataHub UI.

    Configure Secrets

    1. Within DataHub, navigate to the Ingestion tab in the top, right corner of your screen

    Navigate to the "Ingestion Tab"

    note

    If you do not see the Ingestion tab, please contact your DataHub admin to grant you the correct permissions

    1. Navigate to the Secrets tab and click Create new secret.

    Secrets Tab

    1. Create a client id secret

      This will securely store your PowerBI Application (client) ID within DataHub

      • Enter a name like POWER_BI_CLIENT_ID - we will use this later to refer to the Application (client) ID
      • Enter the Application (client) ID
      • Optionally add a description
      • Click Create

    Application (client) ID

    1. Create a secret to store the Azure AD Client Secret

      This will securely store your client secret"

      • Enter a name like POWER_BI_CLIENT_SECRET - we will use this later to refer to the client secret
      • Enter the client secret
      • Optionally add a description
      • Click Create

    Azure AD app Secret

    Configure Recipe

    1. Navigate to the Sources tab and click Create new source

      Click "Create new source"

    2. Choose PowerBI

      Select PowerBI from the options

    3. Enter details into the PowerBI Recipe

      You need to set minimum 3 field in the recipe:

      a. tenant_id: This is the unique identifier (GUID) of the Azure Active Directory instance. Tenant Id can be found at: PowerBI Portal -> Click on ? at top-right corner -> Click on About PowerBI

      Select PowerBI from the options

      On About PowerBI window copy ctid:

      copy ctid

    b. **client_id:** Use the secret POWER_BI_CLIENT_ID with the format "${POWER_BI_CLIENT_ID}".

    c. **client_secret:** Use the secret POWER_BI_CLIENT_SECRET with the format "${POWER_BI_CLIENT_SECRET}".

    Optionally, use the workspace_id_pattern field to filter for specific workspaces.

    config:
    ...
    workspace_id_pattern:
    allow:
    - "258829b1-82b1-4bdb-b9fb-6722c718bbd3"

    Your recipe should look something like this:

    tenant id

    After completing the recipe, click Next.

    Schedule Execution

    Now it's time to schedule a recurring ingestion pipeline to regularly extract metadata from your PowerBI instance.

    1. Decide how regularly you want this ingestion to run-- day, month, year, hour, minute, etc. Select from the dropdown

    schedule selector

    1. Ensure you've configured your correct timezone

      timezone_selector

    2. Click Next when you are done

    Finish Up

    1. Name your ingestion source, then click Save and Run

      Name your ingestion

    You will now find your new ingestion source running

    ingestion_running

    Validate Ingestion Runs

    1. View the latest status of ingestion runs on the Ingestion page

    ingestion succeeded

    1. Click the plus sign to expand the full list of historical runs and outcomes; click Details to see the outcomes of a specific run

    ingestion_details

    1. From the Ingestion Run Details page, pick View All to see which entities were ingested

    ingestion_details_view_all

    1. Pick an entity from the list to manually validate if it contains the detail you expected

    ingestion_details_view_all

    Congratulations! You've successfully set up PowerBI as an ingestion source for DataHub!

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/powerbi/overview/index.html b/docs/quick-ingestion-guides/powerbi/overview/index.html index 29ad1141578ca..09cf0501a1534 100644 --- a/docs/quick-ingestion-guides/powerbi/overview/index.html +++ b/docs/quick-ingestion-guides/powerbi/overview/index.html @@ -8,13 +8,13 @@ - +

    PowerBI Ingestion Guide: Overview

    What You Will Get Out of This Guide

    This guide will help you set up the PowerBI connector to begin ingesting metadata into DataHub.

    Upon completing this guide, you will have a recurring ingestion pipeline that will extract metadata from PowerBI and load it into DataHub. This will include to following PowerBI asset types:

    • Dashboards
    • Tiles
    • Reports
    • Pages
    • Datasets
    • Lineage

    To learn more about setting these advanced values, check out the PowerBI Ingestion Source.

    Next Steps

    Continue to the setup guide, where we'll describe the prerequisites.

    Advanced Guides and Reference

    If you want to ingest metadata from PowerBI using the DataHub CLI, check out the following resources:

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/powerbi/setup/index.html b/docs/quick-ingestion-guides/powerbi/setup/index.html index 212c26657ef0f..74fdd845a91d1 100644 --- a/docs/quick-ingestion-guides/powerbi/setup/index.html +++ b/docs/quick-ingestion-guides/powerbi/setup/index.html @@ -8,13 +8,13 @@ - +

    PowerBI Ingestion Guide: Setup & Prerequisites

    In order to configure ingestion from PowerBI, you'll first have to ensure you have an Azure AD app with permission to access the PowerBI resources.

    PowerBI Prerequisites

    1. Create an Azure AD app: Follow below steps to create an Azure AD app

      a. Login to https://portal.azure.com

      b. Go to Azure Active Directory

      c. Navigate to App registrations

      d. Click on + New registration

      e. On Register an application window fill the Name of application says powerbi-app-connector and keep other default as is

      app_registration

      f. On Register an application window click on Register

      g. The Azure portal will open up the powerbi-app-connector window as shown below. On this screen note down the Application (client) ID and click on Add a certificate or secret to generate a secret for the Application (client) ID

      powerbi_app_connector

      f. On powerbi-connector-app | Certificates & secrets window generate the client secret and note down the Secret

    2. Create an Azure AD Security Group: You need to add the Azure AD app into the security group to control resource permissions for the Azure AD app. Follow below steps to create an Azure AD Security Group.

      a. Go to Azure Active Directory

      b. Navigate to Groups and click on New group

      c. On New group window fill out the Group type,  Group name,  Group description.  Group type should be set to Security .   New group window is shown in below screenshot.

      powerbi_app_connector

      d. On New group window click on No members selected and add Azure AD app i.e. powerbi-connector-app as member

      f. On New group window click on Create to create the security group powerbi-connector-app-security-group.

    3. Assign privileges to powerbi-connector-app-security-group: You need to add the created security group into PowerBI portal to grant resource access. Follow below steps to assign privileges to your security group.

      a. Login to https://app.powerbi.com/

      b. Go to Settings -> Admin Portal

      c. On Admin Portal navigate to Tenant settings as shown in below screenshot.

      powerbi_admin_portal

      d. Enable PowerBI API: Under Tenant settings -> Developer settings -> Allow service principals to use Power BI APIs add the previously created security group i.e. powerbi-connector-app-security-group into Specific security groups (Recommended)

      e. Enable Admin API Settings: Under Tenant settings -> Admin API settings enable the following options

      • Allow service principals to use read-only admin APIs
      • Enhance admin APIs responses with detailed metadata
      • Enhance admin APIs responses with DAX and mashup expressions

      f. Add Security Group to Workspace: Navigate to Workspaces window and open workspace which you want to ingest as shown in below screenshot and click on Access and add powerbi-connector-app-security-group as member

      workspace-window-underlined

    Next Steps

    Once you've done all of the above steps, it's time to move on to configuring the actual ingestion source within DataHub.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/redshift/configuration/index.html b/docs/quick-ingestion-guides/redshift/configuration/index.html index 8018462d4242e..fb832f65d9a35 100644 --- a/docs/quick-ingestion-guides/redshift/configuration/index.html +++ b/docs/quick-ingestion-guides/redshift/configuration/index.html @@ -8,13 +8,13 @@ - +

    Configuring Your Redshift Connector to DataHub

    Now that you have created a DataHub user in Redshift in the prior step, it's time to set up a connection via the DataHub UI.

    Configure Secrets

    1. Within DataHub, navigate to the Ingestion tab in the top, right corner of your screen

    Navigate to the "Ingestion Tab"

    note

    If you do not see the Ingestion tab, please contact your DataHub admin to grant you the correct permissions

    1. Navigate to the Secrets tab and click Create new secret

    Secrets Tab

    1. Create a Redshift User's Password secret

    This will securely store your Redshift User's password within DataHub

    • Click Create new secret again
    • Enter a name like REDSHIFT_PASSWORD - we will use this later to refer to the secret
    • Enter your datahub redshift user's password
    • Optionally add a description
    • Click Create

    Redshift Password Secret

    Configure Recipe

    1. Navigate to the Sources tab and click Create new source

    Click "Create new source"

    1. Select Redshift

    Select BigQuery from the options

    1. Fill out the Redshift Recipe

    Populate the Password field by selecting Redshift Password secrets you created in steps 3 and 4.

    Fill out the Redshift Recipe

    Schedule Execution

    Now it's time to schedule a recurring ingestion pipeline to regularly extract metadata from your Redshift instance.

    1. Decide how regularly you want this ingestion to run-- day, month, year, hour, minute, etc. Select from the dropdown

    schedule selector

    1. Ensure you've configured your correct timezone

    timezone_selector

    1. Click Next when you are done

    Finish Up

    1. Name your ingestion source, then click Save and Run

    Name your ingestion

    You will now find your new ingestion source running

    ingestion_running

    Validate Ingestion Runs

    1. View the latest status of ingestion runs on the Ingestion page

    ingestion succeeded

    1. Click the plus sign to expand the full list of historical runs and outcomes; click Details to see the outcomes of a specific run

    ingestion_details

    1. From the Ingestion Run Details page, pick View All to see which entities were ingested

    ingestion_details_view_all

    1. Pick an entity from the list to manually validate if it contains the detail you expected

    ingestion_details_view_all

    Congratulations! You've successfully set up Redshift as an ingestion source for DataHub!

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/redshift/overview/index.html b/docs/quick-ingestion-guides/redshift/overview/index.html index 2e5def44fd72e..48119f15e0b2d 100644 --- a/docs/quick-ingestion-guides/redshift/overview/index.html +++ b/docs/quick-ingestion-guides/redshift/overview/index.html @@ -8,13 +8,13 @@ - +

    Redshift Ingestion Guide: Overview

    What You Will Get Out of This Guide

    This guide will help you set up the Redshift connector through the DataHub UI to begin ingesting metadata into DataHub.

    Upon completing this guide, you will have a recurring ingestion pipeline that will extract metadata from Redshift and load it into DataHub. This will include to following Redshift asset types:

    • Database
    • Schemas (External and Internal)
    • Tables (External and Internal)
    • Views

    This recurring ingestion pipeline will also extract:

    • Usage statistics to help you understand recent query activity
    • Table-level lineage (where available) to automatically define interdependencies between datasets
    • Table- and column-level profile statistics to help you understand the shape of the data
    caution

    The source currently can ingest one database with one recipe

    Next Steps

    If that all sounds like what you're looking for, navigate to the next page, where we'll talk about prerequisites

    Advanced Guides and Reference

    If you're looking to do something more in-depth, want to use CLI instead of the DataHub UI, or just need to look at the reference documentation for this connector, use these links:

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/redshift/setup/index.html b/docs/quick-ingestion-guides/redshift/setup/index.html index 7fa6f45caf7ee..ee72ee81e35d2 100644 --- a/docs/quick-ingestion-guides/redshift/setup/index.html +++ b/docs/quick-ingestion-guides/redshift/setup/index.html @@ -8,14 +8,14 @@ - +

    Redshift Ingestion Guide: Setup & Prerequisites

    To configure ingestion from Redshift, you'll need a User configured with the proper permission sets, and an associated.

    This setup guide will walk you through the steps you'll need to take via your Google Cloud Console.

    Redshift Prerequisites

    1. Connect to your Amazon Redshift cluster using an SQL client such as SQL Workbench/J or Amazon Redshift Query Editor with your Admin user.
    2. Create a Redshift User that will be used to perform the metadata extraction if you don't have one already. For example:
    CREATE USER datahub WITH PASSWORD 'Datahub1234';

    Redshift Setup

    1. Grant the following permission to your datahub user:
    ALTER USER datahub WITH SYSLOG ACCESS UNRESTRICTED;
    GRANT SELECT ON pg_catalog.svv_table_info to datahub;
    GRANT SELECT ON pg_catalog.svl_user_info to datahub;

    Next Steps

    Once you've confirmed all of the above in Redshift, it's time to move on to configure the actual ingestion source within the DataHub UI.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/snowflake/configuration/index.html b/docs/quick-ingestion-guides/snowflake/configuration/index.html index e000789d390d5..7a92f0294e2ff 100644 --- a/docs/quick-ingestion-guides/snowflake/configuration/index.html +++ b/docs/quick-ingestion-guides/snowflake/configuration/index.html @@ -8,13 +8,13 @@ - +

    Configuring Your Snowflake Connector to DataHub

    Now that you have created a DataHub-specific user with the relevant roles in Snowflake in the prior step, it's now time to set up a connection via the DataHub UI.

    Configure Secrets

    1. Within DataHub, navigate to the Ingestion tab in the top, right corner of your screen

    Navigate to the "Ingestion Tab"

    note

    If you do not see the Ingestion tab, please contact your DataHub admin to grant you the correct permissions

    1. Navigate to the Secrets tab and click Create new secret

    Secrets Tab

    1. Create a Password secret

      This will securely store your Snowflake password within DataHub

      • Enter a name like SNOWFLAKE_PASSWORD - we will use this later to refer to the secret
      • Enter the password configured for the DataHub user in the previous step
      • Optionally add a description
      • Click Create

    Snowflake Password Secret

    Configure Recipe

    1. Navigate to the Sources tab and click Create new source

    Click "Create new source"

    1. Select Snowflake

    Select Snowflake from the options

    1. Fill out the Snowflake Recipe

    Enter the Snowflake Account Identifier as Account ID field. Account identifier is the part before .snowflakecomputing.com in your snowflake host URL:

    Account Id Field

    Learn more about Snowflake Account Identifiers here

    Add the previously added Password secret to Password field:

    • Click on the Password input field
    • Select SNOWFLAKE_PASSWORD secret

    Password field

    Populate the relevant fields using the same Username, Role, and Warehouse you created and/or specified in Snowflake Prerequisites.

    Warehouse Field

    1. Click Test Connection

    This step will ensure you have configured your credentials accurately and confirm you have the required permissions to extract all relevant metadata.

    Test Snoflake connection

    After you have successfully tested your connection, click Next.

    Schedule Execution

    Now it's time to schedule a recurring ingestion pipeline to regularly extract metadata from your Snowflake instance.

    1. Decide how regularly you want this ingestion to run-- day, month, year, hour, minute, etc. Select from the dropdown

    schedule selector

    1. Ensure you've configured your correct timezone

      timezone_selector

    2. Click Next when you are done

    Finish Up

    1. Name your ingestion source, then click Save and Run

      Name your ingestion

    You will now find your new ingestion source running

    ingestion_running

    Validate Ingestion Runs

    1. View the latest status of ingestion runs on the Ingestion page

    ingestion succeeded

    1. Click the plus sign to expand the full list of historical runs and outcomes; click Details to see the outcomes of a specific run

    ingestion_details

    1. From the Ingestion Run Details page, pick View All to see which entities were ingested

    ingestion_details_view_all

    1. Pick an entity from the list to manually validate if it contains the detail you expected

    ingestion_details_view_all

    Congratulations! You've successfully set up Snowflake as an ingestion source for DataHub!

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/snowflake/overview/index.html b/docs/quick-ingestion-guides/snowflake/overview/index.html index 705dafb5684be..b9b8501c70592 100644 --- a/docs/quick-ingestion-guides/snowflake/overview/index.html +++ b/docs/quick-ingestion-guides/snowflake/overview/index.html @@ -8,13 +8,13 @@ - +

    Snowflake Ingestion Guide: Overview

    What You Will Get Out of This Guide

    This guide will help you set up the Snowflake connector to begin ingesting metadata into DataHub.

    Upon completing this guide, you will have a recurring ingestion pipeline that will extract metadata from Snowflake and load it into DataHub. This will include to following Snowflake asset types:

    • Databases
    • Schemas
    • Tables
    • External Tables
    • Views
    • Materialized Views

    The pipeline will also extract:

    • Usage statistics to help you understand recent query activity (available if using Snowflake Enterprise edition or above)
    • Table- and Column-level lineage to automatically define interdependencies between datasets and columns (available if using Snowflake Enterprise edition or above)
    • Table-level profile statistics to help you understand the shape of the data
    caution

    You will NOT have extracted Stages, Snowpipes, Streams, Tasks, Procedures from Snowflake, as the connector does not support ingesting these assets yet.

    Caveats

    By default, DataHub only profiles datasets that have changed in the past 1 day. This can be changed in the YAML editor by setting the value of profile_if_updated_since_days to something greater than 1.

    Additionally, DataHub only extracts usage and lineage information based on operations performed in the last 1 day. This can be changed by setting a custom value for start_time and end_time in the YAML editor.

    To learn more about setting these advanced values, check out the Snowflake Ingestion Source.

    Next Steps

    If that all sounds like what you're looking for, navigate to the next page, where we'll talk about prerequisites.

    Advanced Guides and Reference

    If you want to ingest metadata from Snowflake using the DataHub CLI, check out the following resources:

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/snowflake/setup/index.html b/docs/quick-ingestion-guides/snowflake/setup/index.html index a290a8f8c58b0..641d391284bbc 100644 --- a/docs/quick-ingestion-guides/snowflake/setup/index.html +++ b/docs/quick-ingestion-guides/snowflake/setup/index.html @@ -8,13 +8,13 @@ - +

    Snowflake Ingestion Guide: Setup & Prerequisites

    In order to configure ingestion from Snowflake, you'll first have to ensure you have a Snowflake user with the ACCOUNTADMIN role or MANAGE GRANTS privilege.

    Snowflake Prerequisites

    1. Create a DataHub-specific role by executing the following queries in Snowflake. Replace <your-warehouse> with an existing warehouse that you wish to use for DataHub ingestion.

      create or replace role datahub_role;
      -- Grant access to a warehouse to run queries to view metadata
      grant operate, usage on warehouse "<your-warehouse>" to role datahub_role;

      Make note of this role and warehouse. You'll need this in the next step.

    2. Create a DataHub-specific user by executing the following queries. Replace <your-password> with a strong password. Replace <your-warehouse> with the same warehouse used above.

      create user datahub_user display_name = 'DataHub' password='<your-password>' default_role = datahub_role default_warehouse = '<your-warehouse>';
      -- Grant access to the DataHub role created above
      grant role datahub_role to user datahub_user;

      Make note of the user and its password. You'll need this in the next step.

    3. Assign privileges to read metadata about your assets by executing the following queries. Replace <your-database> with an existing database. Repeat for all databases from your Snowflake instance that you wish to integrate with DataHub.

      set db_var = '"<your-database>"';
      -- Grant access to view database and schema in which your tables/views exist
      grant usage on DATABASE identifier($db_var) to role datahub_role;
      grant usage on all schemas in database identifier($db_var) to role datahub_role;
      grant usage on future schemas in database identifier($db_var) to role datahub_role;

      -- Grant Select acccess enable Data Profiling
      grant select on all tables in database identifier($db_var) to role datahub_role;
      grant select on future tables in database identifier($db_var) to role datahub_role;
      grant select on all external tables in database identifier($db_var) to role datahub_role;
      grant select on future external tables in database identifier($db_var) to role datahub_role;
      grant select on all views in database identifier($db_var) to role datahub_role;
      grant select on future views in database identifier($db_var) to role datahub_role;

      -- Grant access to view tables and views
      grant references on all tables in database identifier($db_var) to role datahub_role;
      grant references on future tables in database identifier($db_var) to role datahub_role;
      grant references on all external tables in database identifier($db_var) to role datahub_role;
      grant references on future external tables in database identifier($db_var) to role datahub_role;
      grant references on all views in database identifier($db_var) to role datahub_role;
      grant references on future views in database identifier($db_var) to role datahub_role;

      -- Assign privileges to extract lineage and usage statistics from Snowflake by executing the below query.
      grant imported privileges on database snowflake to role datahub_role;

      If you have imported databases in your Snowflake instance that you wish to integrate with DataHub, you'll need to use the below query for them.

      grant IMPORTED PRIVILEGES on database "<your-database>" to role datahub_role;  

    Next Steps

    Once you've done all of the above in Snowflake, it's time to move on to configuring the actual ingestion source within DataHub.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/tableau/configuration/index.html b/docs/quick-ingestion-guides/tableau/configuration/index.html index 76a8d8882e55d..71eda218177ba 100644 --- a/docs/quick-ingestion-guides/tableau/configuration/index.html +++ b/docs/quick-ingestion-guides/tableau/configuration/index.html @@ -8,13 +8,13 @@ - +

    Configuring Your Tableau Connector to DataHub

    Now that you have created a DataHub-specific user with the relevant access in Tableau in the prior step, it's now time to set up a connection via the DataHub UI.

    Configure Secrets

    1. Within DataHub, navigate to the Ingestion tab in the top, right corner of your screen

    Navigate to the "Ingestion Tab"

    note

    If you do not see the Ingestion tab, please contact your DataHub admin to grant you the correct permissions

    1. Navigate to the Secrets tab and click Create new secret

    Secrets Tab

    1. Create a username secret

      This will securely store your Tableau username within DataHub

      • Enter a name like TABLEAU_USERNAME - we will use this later to refer in recipe
      • Enter the username, setup in the setup guide
      • Optionally add a description
      • Click Create

      Tableau Username Secret

    2. Create a password secret

      This will securely store your Tableau password within DataHub

      • Enter a name like TABLEAU_PASSWORD - we will use this later to refer in recipe
      • Enter the password of the user, setup in the setup guide
      • Optionally add a description
      • Click Create

      Tableau Password Secret

    Configure Recipe

    1. Navigate to on the Sources tab and then Create new source

    Click "Create new source"

    1. Select Tableau

    Select Tableau from the options

    1. Fill in the Tableau Recipe form:

      You need to set minimum following fields in the recipe:

      a. Host URL: URL of your Tableau instance (e.g., https://15az.online.tableau.com/). It is available in browser address bar on Tableau Portal.

      b. Username: Use the TABLEAU_USERNAME secret (e.g., "${TABLEAU_USERNAME}").

      c. Password: Use the TABLEAU_PASSWORD secret (e.g., "${TABLEAU_PASSWORD}").

      d. Site: Required only if using tableau cloud/ tableau online

    To filter specific project, use project_pattern fields.

    config:
    ...
    project_pattern:
    allow:
    - "SalesProject"

    Your recipe should look something like this:

    tableau recipe in form format

    Click Next when you're done.

    Schedule Execution

    Now it's time to schedule a recurring ingestion pipeline to regularly extract metadata from your Tableau instance.

    1. Decide how regularly you want this ingestion to run-- day, month, year, hour, minute, etc. Select from the dropdown

    schedule selector

    1. Ensure you've configured your correct timezone

      timezone_selector

    2. Click Next when you are done

    Finish Up

    1. Name your ingestion source, then click Save and Run

      Name your ingestion

    You will now find your new ingestion source running

    ingestion_running

    Validate Ingestion Runs

    1. View the latest status of ingestion runs on the Ingestion page

    ingestion succeeded

    1. Click the plus sign to expand the full list of historical runs and outcomes; click Details to see the outcomes of a specific run

    ingestion_details

    1. From the Ingestion Run Details page, pick View All to see which entities were ingested

    ingestion_details_view_all

    1. Pick an entity from the list to manually validate if it contains the detail you expected

    ingestion_details_view_all

    Congratulations! You've successfully set up Tableau as an ingestion source for DataHub!

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/tableau/overview/index.html b/docs/quick-ingestion-guides/tableau/overview/index.html index 3d071708b71c8..2029771cebc09 100644 --- a/docs/quick-ingestion-guides/tableau/overview/index.html +++ b/docs/quick-ingestion-guides/tableau/overview/index.html @@ -8,13 +8,13 @@ - +

    Tableau Ingestion Guide: Overview

    What You Will Get Out of This Guide

    This guide will help you set up the Tableau connector to begin ingesting metadata into DataHub.

    Upon completing this guide, you will have a recurring ingestion pipeline that will extract metadata from Tableau and load it into DataHub. This will include to following Tableau asset types:

    • Dashboards
    • Sheets
    • Embedded DataSource
    • Published DataSource
    • Custom SQL Table
    • Embedded or External Tables
    • User
    • Workbook
    • Tag

    The pipeline will also extract:

    • Usage statistics help you understand top viewed Dashboard/Chart
    • Table- and Column-level lineage automatically index relationships between datasets and columns

    Next Steps

    Continue to the setup guide, where we'll describe the prerequisites.

    Advanced Guides and Reference

    If you want to ingest metadata from Tableau using the DataHub CLI, check out the following resources:

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quick-ingestion-guides/tableau/setup/index.html b/docs/quick-ingestion-guides/tableau/setup/index.html index ab7de69848df4..75e5d0f12d8c4 100644 --- a/docs/quick-ingestion-guides/tableau/setup/index.html +++ b/docs/quick-ingestion-guides/tableau/setup/index.html @@ -8,13 +8,13 @@ - +

    Tableau Ingestion Guide: Setup & Prerequisites

    In order to configure ingestion from Tableau, you'll first have to enable Tableau Metadata API and you should have a user with Site Administrator Explorer permissions.

    Tableau Prerequisites

    1. Grant Site Administrator Explorer permissions to a user

      A. Log in to Tableau Cloud https://sso.online.tableau.com/public/idp/SSO.

      B. Navigate to Users.

      Navigate to the Users tab

      C. For New User: Follow below steps to grant permission for new user.

      • Click Add Users -> Add Users by Email

        Navigate to the Users tab

      • Fill Enter email addresses, set Site role to Site Administrator Explorer and Click Add Users

        Navigate to the Users tab

      D. For Existing User: Follow below steps to grant permission for existing user.

      • Select a user and click Actions -> Site Role

        Actions Site Role

      • Change user role to Site Administrator Explorer

        tableau site role

    1. Enable Tableau Metadata API: This step is required only for Tableau Server. The Metadata API is installed with Tableau Server but disabled by default.

      • Open a command prompt as an admin on the initial node (where TSM is installed) in the cluster
      • Run the command: tsm maintenance metadata-services enable

    Next Steps

    Once you've done all of the above in Tableau, it's time to move on to configuring the actual ingestion source within DataHub.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/quickstart/index.html b/docs/quickstart/index.html index 8ea7c7df8e890..4deb8382ad7a0 100644 --- a/docs/quickstart/index.html +++ b/docs/quickstart/index.html @@ -8,7 +8,7 @@ - + @@ -26,7 +26,7 @@ design choice to make development easier and is not best practice for a production environment.

    Exposed Ports

    DataHub's services, and it's backend data stores use the docker default behavior of binding to all interface addresses. This makes it useful for development but is not recommended in a production environment.

    Performance & Management

    • quickstart is limited by the resources available on a single host, there is no ability to scale horizontally.
    • Rollout of new versions requires downtime.
    • The configuration is largely pre-determined and not easily managed.
    • quickstart, by default, follows the most recent builds forcing updates to the latest released and unreleased builds.

    Other Common Operations

    Stopping DataHub

    To stop DataHub's quickstart, you can issue the following command.

    datahub docker quickstart --stop

    Resetting DataHub (a.k.a factory reset)

    To cleanse DataHub of all of its state (e.g. before ingesting your own), you can use the CLI nuke command.

    datahub docker nuke

    Backing up your DataHub Quickstart (experimental)

    The quickstart image is not recommended for use as a production instance. See Moving to production for recommendations on setting up your production cluster. However, in case you want to take a backup of your current quickstart state (e.g. you have a demo to your company coming up and you want to create a copy of the quickstart data so you can restore it at a future date), you can supply the --backup flag to quickstart.

    datahub docker quickstart --backup

    will take a backup of your MySQL image and write it by default to your ~/.datahub/quickstart/ directory as the file backup.sql. You can customize this by passing a --backup-file argument. e.g.

    datahub docker quickstart --backup --backup-file /home/my_user/datahub_backups/quickstart_backup_2002_22_01.sql
    note

    Note that the Quickstart backup does not include any timeseries data (dataset statistics, profiles, etc.), so you will lose that information if you delete all your indexes and restore from this backup.

    Restoring your DataHub Quickstart (experimental)

    As you might imagine, these backups are restore-able. The following section describes a few different options you have to restore your backup.

    Restoring a backup (primary + index) [most common]

    To restore a previous backup, run the following command:

    datahub docker quickstart --restore

    This command will pick up the backup.sql file located under ~/.datahub/quickstart and restore your primary database as well as the elasticsearch indexes with it.

    To supply a specific backup file, use the --restore-file option.

    datahub docker quickstart --restore --restore-file /home/my_user/datahub_backups/quickstart_backup_2002_22_01.sql

    Restoring only the index [to deal with index out of sync / corruption issues]

    Another situation that can come up is the index can get corrupt, or be missing some update. In order to re-bootstrap the index from the primary store, you can run this command to sync the index with the primary store.

    datahub docker quickstart --restore-indices

    Restoring a backup (primary but NO index) [rarely used]

    Sometimes, you might want to just restore the state of your primary database (MySQL), but not re-index the data. To do this, you have to explicitly disable the restore-indices capability.

    datahub docker quickstart --restore --no-restore-indices

    Upgrading your local DataHub

    If you have been testing DataHub locally, a new version of DataHub got released and you want to try the new version then you can just issue the quickstart command again. It will pull down newer images and restart your instance without losing any data.

    datahub docker quickstart

    Customization

    If you would like to customize the DataHub installation further, please download the docker-compose.yaml used by the cli tool, modify it as necessary and deploy DataHub by passing the downloaded docker-compose file:

    datahub docker quickstart --quickstart-compose-file <path to compose file>
    - + \ No newline at end of file diff --git a/docs/releases/index.html b/docs/releases/index.html index efb0d8e693277..5d352a5367ac6 100644 --- a/docs/releases/index.html +++ b/docs/releases/index.html @@ -8,14 +8,14 @@ - +

    DataHub Releases

    Summary

    VersionRelease DateLinks
    v0.10.52023-08-02Release Notes, View on GitHub
    v0.10.42023-06-09Release Notes, View on GitHub
    v0.10.32023-05-25View on GitHub
    v0.10.22023-04-13View on GitHub
    v0.10.12023-03-23View on GitHub
    v0.10.02023-02-07View on GitHub
    v0.9.6.12023-01-31View on GitHub
    v0.9.62023-01-13View on GitHub
    v0.9.52022-12-23View on GitHub
    v0.9.42022-12-20View on GitHub
    v0.9.32022-11-30View on GitHub
    v0.9.22022-11-04View on GitHub
    v0.9.12022-10-31View on GitHub
    v0.9.02022-10-11View on GitHub
    v0.8.452022-09-23View on GitHub
    v0.8.442022-09-01View on GitHub
    v0.8.432022-08-09View on GitHub
    v0.8.422022-08-03View on GitHub
    v0.8.412022-07-15View on GitHub
    v0.8.402022-06-30View on GitHub
    v0.8.392022-06-24View on GitHub
    v0.8.382022-06-09View on GitHub
    v0.8.372022-06-09View on GitHub
    v0.8.362022-06-02View on GitHub
    v0.8.352022-05-18View on GitHub
    v0.8.342022-05-04View on GitHub
    v0.8.332022-04-15View on GitHub
    v0.8.322022-04-04View on GitHub
    v0.8.312022-03-17View on GitHub
    v0.8.302022-03-17View on GitHub

    v0.10.5

    Released on 2023-08-02 by @david-leifker.

    Release Highlights

    NEW: Unified Search and Browse Experience

    It’s here, it’s here! We are incredibly excited to roll out our re-designed, streamlined Search and Browse experience. End-users now have a one-stop-shop to search for specific data entities and browse across systems, making it easier than ever to find the most relevant and meaningful resources within DataHub.

    Checkout the screenshot below and get a full walk-through in this video!

    <img width="1041" alt="CleanShot 2023-08-03 at 14 47 55@2x" src="https://github.com/datahub-project/datahub/assets/15873986/2f47d033-6c2b-483a-951d-e6d6b807f0d0">

    User Experience

    • Column-Level Lineage (CLL) visualization update: you can now visualize CLL relationships through DataJobs (i.e. Airflow DAGs)
    • Unique Glossary Terms: We now prevent creating duplicate Glossary Term names within a Term Group
    • Domains: You can now configure the Documentation tab to be the default landing page within a Domain
    • Formatting updates to Row Count to make large numbers more human readable (ie. 3283337 > 3.2M)
    • Stats Tab: Y-axis scale now dynamically set to reflect the minimum & maximum values, improving readability

    Metadata ingestion

    Ingestion Enhancements:

    • BigQuery: Set platform_instance using project_id
    • PowerBI: Ingest datasets not used in visualizations (tiles/pages
    • Kafka Connect: Ability to set platform_instance
    • Nifi: Support for basic auth
    • Presto on Hive: Extract all table properties from Hive Metastore
    • Elasticsearch: Support for basic profiling
    • Add advanced configuration for LDAP manager ingestion

    Lineage Improvements:

    • Schema-aware SQL parsing to derive column-level lineage
    • Column-level lineage support for BigQuery, Tableau, and Snowflake View definitions
    • Snowflake: Extract Snowpipe S3 lineage

    Developer Experience

    • Fine-grained ownership policies
    • PATCH support for DataJob Inputs/Outputs
    • New endpoints to extract size of time-series indices and truncate/cleanup time-series indices in Elasticsearch; support for bulk-deletes
    • Initial support for exception reporting via Sentry
    • New OpenAPI endpoint to get Task Status
    • SDK: Easily generate container URNs

    Docs

    • Improvements to our File-Based Lineage doc, specifically focused on Fine-Grained Lineage config components (link)
    • Code examples of how to manage Posts within DataHub (link)
    • Guide to generating custom browse paths for the new search experience (link)

    What's Changed

    New Contributors

    Full Changelog: https://github.com/datahub-project/datahub/compare/v0.10.4...v0.10.5

    v0.10.4

    Released on 2023-06-09 by @pedro93.

    Release Highlights

    User Experience
    Metadata ingestion
    • You can now define column-level lineage (aka fine-grained lineage) via our file-based lineage source
    • Looker: Ingest Looks that are not part of a Dashboard
    • Glue: Error reporting now includes lineage failures
    • BigQuery: Now support deduplicating LogEntries based on insertId, timestamp, and logName
    Docs
    • CSV Enricher: improvements to sample CSV and recipe
    • Guide for changing default DataHub credentials
    • Updated guide to apply time-based filters on Lineage

    What's Changed

    New Contributors

    Full Changelog: https://github.com/datahub-project/datahub/compare/v0.10.3...v0.10.4

    v0.10.3

    Released on 2023-05-25 by @iprentic.

    View the release notes for v0.10.3 on GitHub.

    DataHub v0.10.2

    Released on 2023-04-13 by @iprentic.

    View the release notes for DataHub v0.10.2 on GitHub.

    DataHub v0.10.1

    Released on 2023-03-23 by @aditya-radhakrishnan.

    View the release notes for DataHub v0.10.1 on GitHub.

    DataHub v0.10.0

    Released on 2023-02-07 by @david-leifker.

    View the release notes for DataHub v0.10.0 on GitHub.

    DataHub v0.9.6.1

    Released on 2023-01-31 by @david-leifker.

    View the release notes for DataHub v0.9.6.1 on GitHub.

    DataHub v0.9.6

    Released on 2023-01-13 by @maggiehays.

    View the release notes for DataHub v0.9.6 on GitHub.

    DataHub v0.9.5

    Released on 2022-12-23 by @jjoyce0510.

    View the release notes for DataHub v0.9.5 on GitHub.

    [Known Issues] DataHub v0.9.4

    Released on 2022-12-20 by @maggiehays.

    View the release notes for [Known Issues] DataHub v0.9.4 on GitHub.

    DataHub v0.9.3

    Released on 2022-11-30 by @maggiehays.

    View the release notes for DataHub v0.9.3 on GitHub.

    DataHub v0.9.2

    Released on 2022-11-04 by @maggiehays.

    View the release notes for DataHub v0.9.2 on GitHub.

    DataHub v0.9.1

    Released on 2022-10-31 by @maggiehays.

    View the release notes for DataHub v0.9.1 on GitHub.

    DataHub v0.9.0

    Released on 2022-10-11 by @szalai1.

    View the release notes for DataHub v0.9.0 on GitHub.

    DataHub v0.8.45

    Released on 2022-09-23 by @gabe-lyons.

    View the release notes for DataHub v0.8.45 on GitHub.

    DataHub v0.8.44

    Released on 2022-09-01 by @jjoyce0510.

    View the release notes for DataHub v0.8.44 on GitHub.

    DataHub v0.8.43

    Released on 2022-08-09 by @maggiehays.

    View the release notes for DataHub v0.8.43 on GitHub.

    v0.8.42

    Released on 2022-08-03 by @gabe-lyons.

    View the release notes for v0.8.42 on GitHub.

    v0.8.41

    Released on 2022-07-15 by @anshbansal.

    View the release notes for v0.8.41 on GitHub.

    v0.8.40

    Released on 2022-06-30 by @gabe-lyons.

    View the release notes for v0.8.40 on GitHub.

    v0.8.39

    Released on 2022-06-24 by @maggiehays.

    View the release notes for v0.8.39 on GitHub.

    [!] DataHub v0.8.38

    Released on 2022-06-09 by @jjoyce0510.

    View the release notes for [!] DataHub v0.8.38 on GitHub.

    [!] DataHub v0.8.37

    Released on 2022-06-09 by @jjoyce0510.

    View the release notes for [!] DataHub v0.8.37 on GitHub.

    DataHub V0.8.36

    Released on 2022-06-02 by @treff7es.

    View the release notes for DataHub V0.8.36 on GitHub.

    [!] DataHub v0.8.35

    Released on 2022-05-18 by @dexter-mh-lee.

    View the release notes for [!] DataHub v0.8.35 on GitHub.

    v0.8.34

    Released on 2022-05-04 by @maggiehays.

    View the release notes for v0.8.34 on GitHub.

    DataHub v0.8.33

    Released on 2022-04-15 by @dexter-mh-lee.

    View the release notes for DataHub v0.8.33 on GitHub.

    DataHub v0.8.32

    Released on 2022-04-04 by @dexter-mh-lee.

    View the release notes for DataHub v0.8.32 on GitHub.

    DataHub v0.8.31

    Released on 2022-03-17 by @dexter-mh-lee.

    View the release notes for DataHub v0.8.31 on GitHub.

    Datahub v0.8.30

    Released on 2022-03-17 by @rslanka.

    View the release notes for Datahub v0.8.30 on GitHub.

    - + \ No newline at end of file diff --git a/docs/rfc/index.html b/docs/rfc/index.html index 05d0c6de7d63f..59ac67e2551f2 100644 --- a/docs/rfc/index.html +++ b/docs/rfc/index.html @@ -8,7 +8,7 @@ - + @@ -50,7 +50,7 @@ team member believes an RFC PR is ready to be accepted into active status, they can approve the PR using GitHub's review feature to signal their approval of the RFCs.

    DataHub's RFC process is inspired by many others, including Vue.js and Ember.

    - + \ No newline at end of file diff --git a/docs/roadmap/index.html b/docs/roadmap/index.html index c311f9927432c..f2a165a4d9a92 100644 --- a/docs/roadmap/index.html +++ b/docs/roadmap/index.html @@ -8,13 +8,13 @@ - +

    DataHub Roadmap

    The DataHub Roadmap has a new home!

    Please refer to the new DataHub Roadmap for the most up-to-date details of what we are working on!

    If you have suggestions about what we should consider in future cycles, feel free to submit a feature request and/or upvote existing feature requests so we can get a sense of level of importance!

    Historical Roadmap

    This following represents the progress made on historical roadmap items as of January 2022. For incomplete roadmap items, we have created Feature Requests to gauge current community interest & impact to be considered in future cycles. If you see something that is still of high-interest to you, please up-vote via the Feature Request portal link and subscribe to the post for updates as we progress through the work in future cycles.

    Q4 2021 [Oct - Dec 2021]

    Data Lake Ecosystem Integration

    Metadata Trigger Framework

    View in Feature Request Portal

    • Stateful sensors for Airflow
    • Receive events for you to send alerts, email
    • Slack integration

    ML Ecosystem

    Metrics Ecosystem

    View in Feature Request Portal

    • Measures, Dimensions
    • Relationships to Datasets and Dashboards

    Data Mesh oriented features

    • Data Product modeling
    • Analytics to enable Data Meshification

    Collaboration

    View in Feature Reqeust Portal

    • Conversations on the platform
    • Knowledge Posts (Gdocs, Gslides, Gsheets)

    Q3 2021 [Jul - Sept 2021]

    Data Profiling and Dataset Previews

    Use Case: See sample data for a dataset and statistics on the shape of the data (column distribution, nullability etc.)

    • Support for data profiling and preview extraction through ingestion pipeline (column samples, not rows)

    Data Quality

    Included in Q1 2022 Roadmap - Display Data Quality Checks in the UI

    • Support for data profiling and time-series views
    • Support for data quality visualization
    • Support for data health score based on data quality results and pipeline observability
    • Integration with systems like Great Expectations, AWS deequ, dbt test etc.

    Fine-grained Access Control for Metadata

    • Support for role-based access control to edit metadata
    • Scope: Access control on entity-level, aspect-level and within aspects as well.

    Column-level lineage

    Included in Q1 2022 Roadmap - Column Level Lineage

    • Metadata Model
    • SQL Parsing

    Operational Metadata

    Q2 2021 (Apr - Jun 2021)

    Cloud Deployment

    • Production-grade Helm charts for Kubernetes-based deployment
    • How-to guides for deploying DataHub to all the major cloud providers
      • AWS
      • Azure
      • GCP

    Product Analytics for DataHub

    • Helping you understand how your users are interacting with DataHub
    • Integration with common systems like Google Analytics etc.

    Usage-Based Insights

    • Display frequently used datasets, etc.
    • Improved search relevance through usage data

    Role-based Access Control

    • Support for fine-grained access control for metadata operations (read, write, modify)
    • Scope: Access control on entity-level, aspect-level and within aspects as well.
    • This provides the foundation for Tag Governance, Dataset Preview access control etc.

    No-code Metadata Model Additions

    Use Case: Developers should be able to add new entities and aspects to the metadata model easily

    • No need to write any code (in Java or Python) to store, retrieve, search and query metadata
    • No need to write any code (in GraphQL or UI) to visualize metadata

    Q1 2021 [Jan - Mar 2021]

    React UI

    • Build a new UI based on React
    • Deprecate open-source support for Ember UI

    Python-based Metadata Integration

    • Build a Python-based Ingestion Framework
    • Support common people repositories (LDAP)
    • Support common data repositories (Kafka, SQL databases, AWS Glue, Hive)
    • Support common transformation sources (dbt, Looker)
    • Support for push-based metadata emission from Python (e.g. Airflow DAGs)

    Dashboards and Charts

    • Support for dashboard and chart entity page
    • Support browse, search and discovery

    SSO for Authentication

    • Support for Authentication (login) using OIDC providers (Okta, Google etc)

    Tags

    Use-Case: Support for free-form global tags for social collaboration and aiding discovery

    • Edit / Create new tags
    • Attach tags to relevant constructs (e.g. datasets, dashboards, users, schema_fields)
    • Search using tags (e.g. find all datasets with this tag, find all entities with this tag)

    Business Glossary

    • Support for business glossary model (definition + storage)
    • Browse taxonomy
    • UI support for attaching business terms to entities and fields

    Jobs, Flows / Pipelines

    Use case: Search and Discover your Pipelines (e.g. Airflow DAGs) and understand lineage with datasets

    • Support for Metadata Models + Backend Implementation
    • Metadata Integrations with systems like Airflow.

    Data Profiling and Dataset Previews

    Use Case: See sample data for a dataset and statistics on the shape of the data (column distribution, nullability etc.)

    • Support for data profiling and preview extraction through ingestion pipeline
    • Out of scope for Q1: Access control of data profiles and sample data
    - + \ No newline at end of file diff --git a/docs/saas/index.html b/docs/saas/index.html index 8c5d803f627db..9b1c7007f4fb0 100644 --- a/docs/saas/index.html +++ b/docs/saas/index.html @@ -8,13 +8,13 @@ - +
    - + \ No newline at end of file diff --git a/docs/schema-history/index.html b/docs/schema-history/index.html index e8ffa6be79e58..afa0e86f2da77 100644 --- a/docs/schema-history/index.html +++ b/docs/schema-history/index.html @@ -8,7 +8,7 @@ - + @@ -23,7 +23,7 @@ fields.

    In addition to this, you can also toggle the Audit view that shows you when the most recent changes were made to each field. You can active this by clicking on the Audit icon you see above the top right of the table.

    You can see here that some of these fields were added at the oldest dataset version, while some were added only at this latest version. Some fields were even modified and had a type change at the latest version!

    GraphQL

    FAQ and Troubleshooting

    What updates are planned for the Schema History feature?

    In the future, we plan on adding the following features

    • Supporting a linear timeline view where you can see what changes were made to various schema fields over time
    • Adding a diff viewer that highlights the differences between two versions of a Dataset
    - + \ No newline at end of file diff --git a/docs/slack/index.html b/docs/slack/index.html index c33bc6f4d06ed..2a8002938563e 100644 --- a/docs/slack/index.html +++ b/docs/slack/index.html @@ -8,13 +8,13 @@ - +

    Slack

    The DataHub Slack is a thriving and rapidly growing community - we can't wait for you to join us!

    Sign up here to join us on Slack and to subscribe to the DataHub Community newsletter. Already a member? Log in here.

    Slack Guidelines

    In addition to our Code of Conduct, we expect all Slack Community Members to respect the following guidelines:

    Avoid using DMs and @mentions

    Whenever possible, post your questions and responses in public channels so other Community Members can benefit from the conversation and outcomes. Limit the use of @mentions of other Community Members to be considerate of notification noise.

    Make use of threads

    Threads help us keep conversations contained and help us ensure we help you find a resolution and get you the support you need.

    Use threads when posting long messages and large blocks of code and/or stack trace - it is a MASSIVE help for us to keep track of the large volume of questions across our various support channels.

    Do not post the same question across multiple channels

    If you're having a tough time getting the support you need (or aren't sure where to go!), please DM @Maggie for support

    Do not solicit members of our Slack

    The DataHub Community exists to collaborate with, learn from, and support one another. It is not a space to pitch your products or services directly to our members via public channels, private channels, or direct messages.

    We are excited to have a growing presence from vendors to help answer questions from Community Members as they may arise, but we have a strict 3-strike policy against solicitation:

    1. First occurrence: We'll give you a friendly but public reminder that the behavior is inappropriate according to our guidelines.
    2. Second occurrence: We'll send you a DM warning that any additional violations will result in removal from the community.
    3. Third occurrence: We'll delete or ban your account.

    We reserve the right to ban users without notice if they are clearly spamming our Community Members.

    Lets get you settled in --

    • Head over to #introduce-yourself to, well, introduce yourself! We'd love to learn more about you, what brings you here, and how we can support you
    • Not sure how to start? You guessed it, check out #getting-started - we'll point you in the right direction
    • Looking for general debugging help? #troubleshoot is the place to go
    • Need some live support from the Core DataHub Team? Join us during our 2xWeek Office Hours via Zoom! Check out #office-hours for more details
    • Looking for ways to contribute to the DataHub project? Tell us all about it in #contribute
    • Have suggestions on how to make DataHub better? We can't wait to hear them in #feature-requests
    • Excited to share your experience working with DataHub? #show-and-tell is the perfect channel for you
    • Need something else? Reach out to @Maggie - our Community Product Manager
    - + \ No newline at end of file diff --git a/docs/sync-status/index.html b/docs/sync-status/index.html index a499ebf188d17..30b8cb116503b 100644 --- a/docs/sync-status/index.html +++ b/docs/sync-status/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ using metadata ingestion or deleting it if it no longer exists.

    Sync Status Setup, Prerequisites, and Permissions

    The sync status feature is enabled by default and does not require any special setup.

    Using Sync Status

    The DataHub UI will display the sync status in the top right corner of the page.

    The last synchronized date is basically the last time an ingestion run saw an entity. It is computed as the most recent update to the entity, excluding changes done through the UI. If an ingestion run restates an entity but doesn't actually cause any changes, we still count that as an update for the purposes of sync status.

    Technical details: computing the last synchronized timestamp

    To compute the last synchronized timestamp, we look at the system metadata of all aspects associated with the entity. We exclude any aspects where the system metadata runId value is unset or equal to no-run-id-provided, as this is what filters out changes made through the UI. Finally, we take the most recent system metadata lastObserved timestamp across the aspects and use that as the last synchronized timestamp.

    We'll automatically assign a color based on the sync status recency:

    • Green: last synchronized in the past week
    • Yellow: last synchronized in the past month
    • Red: last synchronized more than a month ago

    You can hover over the sync status message in the UI to view the exact timestamp of the most recent sync.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/tags/index.html b/docs/tags/index.html index d297de011112e..33c43c2fd3ae0 100644 --- a/docs/tags/index.html +++ b/docs/tags/index.html @@ -8,14 +8,14 @@ - +

    About DataHub Tags

    Feature Availability
    Self-Hosted DataHub
    Managed DataHub

    Tags are informal, loosely controlled labels that help in search & discovery. They can be added to datasets, dataset schemas, or containers, for an easy way to label or categorize entities – without having to associate them to a broader business glossary or vocabulary.

    Tags can help help you in:

    • Querying: Tagging a dataset with a phrase that a co-worker can use to query the same dataset
    • Mapping assets to a category or group of your choice

    Tags Setup, Prerequisites, and Permissions

    What you need to add tags:

    • Edit Tags metadata privilege to add tags at the entity level
    • Edit Dataset Column Tags to edit tags at the column level

    You can create these privileges by creating a new Metadata Policy.

    Using DataHub Tags

    Adding a Tag

    To add a tag at the dataset or container level, simply navigate to the page for that entity and click on the Add Tag button.

    Type in the name of the tag you want to add. You can add a new tag, or add a tag that already exists (the autocomplete will pull up the tag if it already exists).

    Click on the "Add" button and you'll see the tag has been added!

    If you would like to add a tag at the schema level, hover over the "Tags" column for a schema until the "Add Tag" button shows up, and then follow the same flow as above.

    Removing a Tag

    To remove a tag, simply click on the "X" button in the tag. Then click "Yes" when prompted to confirm tag removal.

    Searching by a Tag

    You can search for a tag in the search bar, and even filter entities by the presence of a specific tag.

    Additional Resources

    Videos

    Add Ownership, Tags, Terms, and more to DataHub via CSV!

    GraphQL

    You can easily fetch the Tags for an entity with a given its URN using the tags property. Check out Working with Metadata Entities for an example.

    DataHub Blog

    FAQ and Troubleshooting

    What is the difference between DataHub Tags and Glossary Terms?

    DataHub Tags are informal, loosely controlled labels while Terms are part of a controlled vocabulary, with optional hierarchy. Tags have no element of formal, central management.

    Usage and applications:

    • An asset may have multiple tags.
    • Tags serve as a tool for search & discovery while Terms are typically used to standardize types of leaf-level attributes (i.e. schema fields) for governance. E.g. (EMAIL_PLAINTEXT)

    How are DataHub Tags different from Domains?

    Domains are a set of top-level categories usually aligned to business units/disciplines to which the assets are most relevant. They rely on central or distributed management. A single domain is assigned per data asset.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/tests/metadata-tests/index.html b/docs/tests/metadata-tests/index.html index 95742a0dc9514..d7b1d9af6c59a 100644 --- a/docs/tests/metadata-tests/index.html +++ b/docs/tests/metadata-tests/index.html @@ -8,7 +8,7 @@ - + @@ -59,7 +59,7 @@ a Metadata Test may verify that ALL tables in Snowflake have at least 1 assigned owner, and a human-authored description. Metadata Tests allow you to manage broad policies across your entire data ecosystem driven by metadata, for example to augment a larger scale Data Governance initiative.

    Need more help? Join the conversation in Slack!

    - + \ No newline at end of file diff --git a/docs/townhall-history/index.html b/docs/townhall-history/index.html index 5ea4f2d90d177..5b9d1b90dbae1 100644 --- a/docs/townhall-history/index.html +++ b/docs/townhall-history/index.html @@ -8,13 +8,13 @@ - +

    Town Hall History

    A list of previous Town Halls, their planned schedule, and the recording of the meeting.

    03/23/2023

    Full YouTube video

    Agenda

    • Community & Roadmap Update
    • Recent Releases
    • Community Case Study — Jumio’s DataHub adoption journey
    • DataHub 201: Data Debugging
    • Sneak Peek: Streamlined Filtering Experience

    02/23/2023

    Full YouTube video

    Agenda

    • Community & Roadmap Update
    • Recent Releases
    • Community Case Study - How the Hurb Team successfully implemented and adopted DataHub within their organization
    • Sneak Peek: Subscriptions and Notifications
    • Search Improvements - API support for pagination
    • New Feature - Custom Queries
    • Simplifying Metadata Ingestion
    • DataHub 201: Rolling Out DataHub

    01/26/2023

    Full YouTube video

    Agenda

    • What’s to Come - Q1 2023 Roadmap: Data Products, Data Contracts and more
    • Community Case Study - Notion: Automating annotations and metadata propagation
    • Community Contribution - Grab: Improvements to documentation editing
    • Simplifying DataHub - Removing Schema Registry requirement and introducing DataHub Lite

    01/05/2023

    Full YouTube video

    Agenda

    • DataHub Community: 2022 in Review - Our Community of Data Practitioners is one of a kind. We’ll take the time to celebrate who we are, what we’ve built, and how we’ve collaborated in the past 12 months.
    • Search Improvements - Learn how we’re making the Search experience smarter and faster to connect you with the most relevant resources during data discovery.
    • Removing Schema Registry Requirement - Hear all about ongoing work to simplify the DataHub deployment process.
    • Smart Data Profiling - We’re making big improvements to data profiling! Smart data profiling will reduce processing time by only scanning datasets that have recently changed.
    • Sneak Peek: Time-based Lineage - Get a preview of how you’ll soon be able to trace lineage between datasets across different points in time to understand how interdependencies have evolved.
    • Sneak Peek: Chrome Extension - Soon, you’ll be able to quickly access rich metadata from DataHub while exploring resources in Looker via our upcoming Chrome Extension.

    12/01/2022

    Full YouTube video

    Agenda

    November Town Hall (in December!)

    • Community Case Study - The Pinterest Team will share how they have integrated DataHub + Thrift and extended the Metadata Model with a Data Element entity to capture semantic types.
    • NEW! Ingestion Quickstart Guides - DataHub newbies, this one is for you! We’re rolling out ingestion quickstart guides to help you quickly get up and running with DataHub + Snowflake, BigQuery, and more!
    • NEW! In-App Product Tours - We’re making it easier than ever for end-users to get familiar with all that DataHub has to offer - hear all about the in-product onboarding resources we’re rolling out soon!
    • DataHub UI Navigation and Performance - Learn all about upcoming changes to our user experience to make it easier (and faster!) for end users to work within DataHub.
    • Sneak Peek! Manual Lineage via the UI - The Community asked and we’re delivering! Soon you’ll be able to manually add lineage connections between Entities in DataHub.
    • NEW! Slack + Microsoft Teams Integrations - Send automated alerts to Slack and/or Teams to keep track of critical events and changes within DataHub.
    • Hacktoberfest Winners Announced - We’ll recap this year’s Hacktoberfest and announce three winners of a $250 Amazon gift card & DataHub Swag.

    10/27/2022

    Full YouTube video

    Agenda

    • Conquer Data Governance with Acryl Data’s Metadata Tests - Learn how to tackle Data Governance with incremental, automation-driven governance using Metadata Tests provided in Acryl Data’s managed DataHub offering
    • Community Case Study - The Grab Team shares how they are using DataHub for data discoverability, automated classification and governance workflows, data quality observability, and beyond!
    • Upcoming Ingestion Sources - We’ll tell you the ins and outs of our upcoming dbt Cloud and Unity Catalog connectors
    • Sneak Peek! Saved Views - Learn how you can soon use Saved Views to help end-users navigate entities in DataHub with more precision and focus
    • Performance Improvements - Hear about the latest upgrades to DataHub performance

    9/29/2022

    Full YouTube video

    Agenda

    • Column Level Lineage is here! - Demo of column-level lineage and impact analysis in the DataHub UI
    • Community Case Study - The Stripe Team shares how they leverage DataHub to power observability within their Airflow-based ecosystem
    • Sneak Peek! Automated PII Classification - Preview upcoming functionality to automatically identify data fields that likely contain sensitive data
    • Ingestion Improvements Galore - Improved performance and functionality for dbt, Looker, Tableau, and Presto ingestion sources

    8/25/2022

    Full YouTube video

    Agenda

    • Community Case Study - The Etsy Team shares their journey of adopting DataHub
    • Looker & DataHub Improvements - surface the most relevant Looks and Dashboards
    • Home Page Improvements to tailor the Browse experience
    • Unified Ingestion Summaries - View live logs for UI-based ingestion and see historical ingestion reports across CLI and UI-based ingestion
    • Patch Support - Native support for PATCH in the metadata protocol to support efficient updates to add & remove owners, lineage, tags and more
    • Sneak Peek! Advanced Search

    7/28/2022

    Full YouTube video

    Agenda

    • Community Updates
    • Project Updates
    • Improvements to UI-Based Ingestion
    • Sneak Preview - Bulk Edits via the UI
    • Streamlined Metadata Ingestion
    • DataHub 201: Metadata Enrichment

    6/30/2022

    Full YouTube video

    Agenda

    • Community Updates
    • Project Updates
    • dbt Integration Updates
    • CSV Ingestion Support
    • DataHub 201 - Glossary Term Deep Dive

    5/26/2022

    Full YouTube video

    Agenda

    • Community Case Study: Hear how the G-Research team is using Cassandra as DataHub’s Backend
    • Creating & Editing Glossary Terms from the DataHub UI
    • DataHub User Onboarding via the UI
    • DataHub 201: Impact Analysis
    • Sneak Peek: Data Reliability with DataHub
    • Metadata Day Hackathon Winners

    4/28/2022

    Full YouTube video

    Agenda

    • Community Case Study: Hear from Included Health about how they are embedding external tools into the DataHub UI
    • New! Actions Framework: run custom code when changes happen within DataHub
    • UI Refresh for ML Entities
    • Improved deletion support for time-series aspects, tags, terms, & more
    • OpenAPI Improvements

    3/31/2022

    Full YouTube video

    Agenda

    • Community Case Study: Hear from Zendesk about how they are applying “shift left” principles by authoring metadata in their Protobuf schemas
    • RBAC Functionality: View-Based Policies
    • Schema Version History - surfacing the history of schema changes in DataHub's UI
    • Improvements to Airflow Ingestion, including Run History
    • Container/Domain-Level Property Inheritance
    • Delete API

    2/25/2022

    Full YouTube video

    Agenda

    • Lineage Impact Analysis - using DataHub to understand the impact of changes on downstream dependencies
    • Displaying Data Quality Checks in the UI
    • Roadmap update: Schema Version History & Column-Level Lineage
    • Community Case Study: Managing Lineage via YAML

    1/28/2022

    Full YouTube video

    Agenda

    • Community & Roadmap Updates by Maggie Hays (Acryl Data)
    • Project Updates by Shirshanka Das (Acryl Data)
    • Community Case Study: Adding Dataset Transformers by Eric Cooklin (Stash)
    • Demo: Data Domains & Containers by John Joyce (Acryl Data)
    • DataHub Basics — Data Profiling & Usage Stats 101 by Maggie Hays & Tamás Németh (Acryl Data)
    • Demo: Spark Lineage by Mugdha Hardikar (GS Lab) & Shirshanka Das

    12/17/2021

    Full YouTube video

    Agenda

    • Community & Roadmap Updates by Maggie Hays (Acryl Data)
    • Project Updates by Shirshanka Das (Acryl Data)
    • 2021 DataHub Community in Review by Maggie Hays
    • DataHub Basics -- Users, Groups, & Authentication 101 by Pedro Silva (Acryl Data)
    • Sneak Peek: UI-Based Ingestion by John Joyce (Acryl Data)
    • Case Study — DataHub at Grofers by Shubham Gupta
    • Top DataHub Contributors of 2021 - Maggie Hays
    • Final Surprise! We Interviewed a 10yo and a 70yo about DataHub

    11/19/2021

    Full YouTube video

    Agenda

    • Community & Roadmap Updates by Maggie Hays (Acryl Data)
    • Project Updates by Shirshanka Das (Acryl Data)
    • DataHub Basics -- Lineage 101 by John Joyce & Surya Lanka (Acryl Data)
    • Introducing No-Code UI by Gabe Lyons & Shirshanka Das (Acryl Data)
    • DataHub API Authentication by John Joyce (Acryl Data)
    • Case Study: LinkedIn pilot to extend the OSS UI by Aikepaer Abuduweili & Joshua Shinavier

    10/29/2021

    Full YouTube video

    Agenda

    • DataHub Community & Roadmap Update - Maggie Hays (Acryl Data)
    • October Project Updates - Shirshanka Das (Acryl Data)
    • Introducing Recommendations - John Joyce & Dexter Lee (Acryl Data)
    • Case Study: DataHub @ hipages - Chris Coulson (hipages)
    • Data Profiling Improvements - Surya Lanka & Harshal Sheth (Acryl Data)
    • Lineage Improvements & BigQuery Dataset Lineage by Gabe Lyons & Varun Bharill (Acryl Data)

    9/24/2021

    Full YouTube video

    Agenda

    • Project Updates and Callouts by Shirshanka
      • GraphQL Public API Annoucement
    • Demo: Faceted Search by Gabe Lyons (Acryl Data)
    • Stateful Ingestion by Shirshanka Das & Surya Lanka (Acryl Data)
    • Case-Study: DataHub @ Adevinta by Martinez de Apellaniz
    • Recent Improvements to the Looker Connector by Shirshanka Das & Maggie Hays (Acryl Data)
    • Offline
      • Foreign Key and Related Term Mapping by Gabe Lyons (Acryl Data) video

    8/27/2021

    Full YouTube video

    Agenda

    • Project Updates and Callouts by Shirshanka
      • Business Glossary Demo
      • 0.8.12 Upcoming Release Highlights
      • Users and Groups Management (Okta, Azure AD)
    • Demo: Fine Grained Access Control by John Joyce (Acryl Data)
    • Community Case-Study: DataHub @ Warung Pintar and Redash integration by Taufiq Ibrahim (Bizzy Group)
    • New User Experience by John Joyce (Acryl Data)
    • Offline
      • Performance Monitoring by Dexter Lee (Acryl Data) video

    7/23/2021

    Full YouTube video

    Medium Post

    Agenda

    • Project Updates by Shirshanka
      • Release highlights
    • Deep Dive: Data Observability: Phase 1 by Harshal Sheth, Dexter Lee (Acryl Data)
    • Case Study: Building User Feedback into DataHub by Melinda Cardenas (NY Times)
    • Demo: AWS SageMaker integration for Models and Features by Kevin Hu (Acryl Data)

    6/25/2021

    Full YouTube video

    Medium Post

    Agenda

    • Project Updates by Shirshanka
      • Release notes
      • RBAC update
      • Roadmap for H2 2021
    • Demo: Table Popularity powered by Query Activity by Harshal Sheth (Acryl Data)
    • Case Study: Business Glossary in production at Saxo Bank by Sheetal Pratik (Saxo Bank), Madhu Podila (ThoughtWorks)
    • Developer Session: Simplified Deployment for DataHub by John Joyce, Gabe Lyons (Acryl Data)

    5/27/2021

    Full YouTube video

    Medium Post

    Agenda

    • Project Updates by Shirshanka - 10 mins
      • 0.8.0 Release
      • AWS Recipe by Dexter Lee (Acryl Data)
    • Demo: Product Analytics design sprint (Maggie Hays (SpotHero), Dexter Lee (Acryl Data)) - 10 mins
    • Use-Case: DataHub on GCP by Sharath Chandra (Confluent) - 10 mins
    • Deep Dive: No Code Metadata Engine by John Joyce (Acryl Data) - 20 mins
    • General Q&A and closing remarks

    4/23/2021

    Full YouTube video

    Medium Digest

    Agenda

    • Welcome - 5 mins
    • Project Updates by Shirshanka - 10 mins
      • 0.7.1 Release and callouts (dbt by Gary Lucas)
      • Product Analytics design sprint announcement (Maggie Hayes)
    • Use-Case: DataHub at DefinedCrowd (video) by Pedro Silva - 15 mins
    • Deep Dive + Demo: Lineage! Airflow, Superset integration (video) by Harshal Sheth and Gabe Lyons - 10 mins
    • Use-Case: DataHub Hackathon at Depop (video) by John Cragg - 10 mins
    • Observability Feedback share out - 5 mins
    • General Q&A and closing remarks - 5 mins

    3/19/2021

    YouTube video

    Medium Digest

    Agenda

    • Welcome - 5 mins
    • Project Updates (slides) by Shirshanka - 10 mins
      • 0.7.0 Release
      • Project Roadmap
    • Demo Time: Themes and Tags in the React App! by Gabe Lyons - 10 mins
    • Use-Case: DataHub at Wolt (slides) by Fredrik and Matti - 15 mins
    • Poll Time: Observability Mocks! (slides) - 5 mins
    • General Q&A from sign up sheet, slack, and participants - 10 mins
    • Closing remarks - 5 mins

    2/19/2021

    YouTube video

    Medium Digest

    Agenda

    • Welcome - 5 mins
    • Latest React App Demo! (video) by John Joyce and Gabe Lyons - 5 mins
    • Use-Case: DataHub at Geotab (slides,video) by John Yoon - 15 mins
    • Tech Deep Dive: Tour of new pull-based Python Ingestion scripts (slides,video) by Harshal Sheth - 15 mins
    • General Q&A from sign up sheet, slack, and participants - 15 mins
    • Closing remarks - 5 mins

    1/15/2021

    Full Recording

    Slide-deck

    Agenda

    • Announcements - 2 mins
    • Community Updates (video) - 10 mins
    • Use-Case: DataHub at Viasat (slides,video) by Anna Kepler - 15 mins
    • Tech Deep Dive: GraphQL + React RFCs readout and discussion (slides ,video) by John Joyce and Arun Vasudevan - 15 mins
    • General Q&A from sign up sheet, slack, and participants - 15 mins
    • Closing remarks - 3 mins
    • General Q&A from sign up sheet, slack, and participants - 15 mins
    • Closing remarks - 5 minutes

    12/04/2020

    Recording

    Agenda

    11/06/2020

    Recording

    Agenda

    09/25/2020

    Recording

    Agenda

    08/28/2020

    Recording

    Agenda

    07/31/20

    Recording

    Agenda

    • Quick intro - 5 mins
    • Showcasing new entities onboarded to internal LinkedIn DataHub (Data Concepts, Schemas) by Nagarjuna Kanamarlapudi (LinkedIn) - 15 mins
    • Showcasing new Lineage UI in internal LinkedIn DataHub By Ignacio Bona (LinkedIn) - 10 mins
    • New RFC Process by John Plaisted (LinkedIn) - 2 mins
    • Answering questions from the signup sheet - 13 mins
    • Questions from the participants - 10 mins
    • Closing remarks - 5 mins

    06/26/20

    Recording

    Agenda

    • Quick intro - 5 mins
    • Onboarding Data Process entity by Liangjun Jiang (Expedia) - 15 mins
    • How to onboard a new relationship to metadata graph by Kerem Sahin (Linkedin) - 15 mins
    • Answering questions from the signup sheet - 15 mins
    • Questions from the participants - 10 mins
    • Closing remarks - 5 mins

    05/29/20

    Recording

    Agenda

    • Quick intro - 5 mins
    • How to add a new aspect/feature for an existing entity in UI by Charlie Tran (LinkedIn) - 10 mins
    • How to search over a new field by Jyoti Wadhwani (LinkedIn) - 10 mins
    • Answering questions from the signup sheet - 15 mins
    • Questions from the participants - 10 mins
    • Closing remarks - 5 mins

    04/17/20

    Recording

    Agenda

    04/03/20

    Recording

    Q&A

    • Agenda
      • Quick intro - 5 mins
      • Creating Helm charts for deploying DataHub on Kubernetes by Bharat Akkinepalli (ThoughtWorks) - 10 mins
      • How to onboard a new metadata aspect by Mars Lan (LinkedIn) - 10 mins
      • Answering questions from the signup sheet - 15 mins
      • Questions from the participants - 10 mins
      • Closing remarks - 5 mins

    03/20/20

    Recording

    Q&A

    Agenda

    • Quick intro - 5 mins
    • Internal DataHub demo - 10 mins
    • What's coming up next for DataHub (what roadmap items we are working on) - 10 mins
    • Answering questions from the signup sheet - 15 mins
    • Questions from the participants - 10 mins
    • Closing remarks - 5 mins

    03/06/20

    Recording

    Q&A

    - + \ No newline at end of file diff --git a/docs/townhalls/index.html b/docs/townhalls/index.html index eaf6d7e0549bf..da435dd443a40 100644 --- a/docs/townhalls/index.html +++ b/docs/townhalls/index.html @@ -8,7 +8,7 @@ - + @@ -17,7 +17,7 @@ Currently it's held on the fourth Thursday of every month (with some exceptions such as holiday weekends). It's the perfect venue to meet the team behind DataHub and other users, as well as to ask higher-level questions, such as roadmap and product direction. From time to time we also use the opportunity to showcase upcoming features.

    Meeting Invite & Agenda

    You can join with this link https://zoom.datahubproject.io, or RSVP to get a calendar invite - this will always have the most up-to-date agenda for upcoming sessions.

    Past Meetings

    See Town Hall History for recordings of past town halls.

    - + \ No newline at end of file diff --git a/docs/troubleshooting/build/index.html b/docs/troubleshooting/build/index.html index 4b67ec667870d..025baf49da6ee 100644 --- a/docs/troubleshooting/build/index.html +++ b/docs/troubleshooting/build/index.html @@ -8,14 +8,14 @@ - +

    Build Debugging Guide

    For when Local Development did not work out smoothly.

    Getting Unsupported class file major version 57

    You're probably using a Java version that's too new for gradle. Run the following command to check your Java version

    java --version

    While it may be possible to build and run DataHub using newer versions of Java, we currently only support Java 11 (aka Java 11).

    Getting cannot find symbol error for javax.annotation.Generated

    Similar to the previous issue, please use Java 1.8 to build the project. You can install multiple version of Java on a single machine and switch between them using the JAVA_HOME environment variable. See this document for more details.

    :metadata-models:generateDataTemplate task fails with java.nio.file.InvalidPathException: Illegal char <:> at index XX or Caused by: java.lang.IllegalArgumentException: 'other' has different root error

    This is a known issue when building the project on Windows due a bug in the Pegasus plugin. Please refer to Windows Compatibility.

    As we generate quite a few files from the models, it is possible that old generated files may conflict with new model changes. When this happens, a simple ./gradlew clean should reosolve the issue.

    Execution failed for task ':metadata-service:restli-servlet-impl:checkRestModel'

    This generally means that an incompatible change was introduced to the rest.li API in GMS. You'll need to rebuild the snapshots/IDL by running the following command once

    ./gradlew :metadata-service:restli-servlet-impl:build -Prest.model.compatibility=ignore

    java.io.IOException: No space left on device

    This means you're running out of space on your disk to build. Please free up some space or try a different disk.

    Build failed for task ./gradlew :datahub-frontend:dist -x yarnTest -x yarnLint

    This could mean that you need to update your Yarn version

    - + \ No newline at end of file diff --git a/docs/troubleshooting/general/index.html b/docs/troubleshooting/general/index.html index 3707211fb1633..8c0b81c12a9ab 100644 --- a/docs/troubleshooting/general/index.html +++ b/docs/troubleshooting/general/index.html @@ -8,13 +8,13 @@ - + - + \ No newline at end of file diff --git a/docs/troubleshooting/quickstart/index.html b/docs/troubleshooting/quickstart/index.html index a8de85b6c4663..3d07dcd6fd9f2 100644 --- a/docs/troubleshooting/quickstart/index.html +++ b/docs/troubleshooting/quickstart/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ to deploying with a case-sensitive collation (utf8mb4_bin) by default. In order to update a deployment that was started before Oct 26, 2021 (v0.8.16 and below) to have the new collation, you must run this command against your SQL DB directly:

    ALTER TABLE metadata_aspect_v2 CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_bin;

    I've modified the default user.props file to include a custom username and password, but I don't see the new user(s) inside the Users & Groups tab. Why not?

    Currently, user.props is a file used by the JAAS PropertyFileLoginModule solely for the purpose of Authentication. The file is not used as an source from which to ingest additional metadata about the user. For that, you'll need to ingest some custom information about your new user using the Rest.li APIs or the File-based ingestion source.

    For an example of a file that ingests user information, check out single_mce.json, which ingests a single user object into DataHub. Notice that the "urn" field provided will need to align with the custom username you've provided in user.props file. For example, if your user.props file contains:

    my-custom-user:my-custom-password

    You'll need to ingest some metadata of the following form to see it inside the DataHub UI:

    {
    "auditHeader": null,
    "proposedSnapshot": {
    "com.linkedin.pegasus2avro.metadata.snapshot.CorpUserSnapshot": {
    "urn": "urn:li:corpuser:my-custom-user",
    "aspects": [
    {
    "com.linkedin.pegasus2avro.identity.CorpUserInfo": {
    "active": true,
    "displayName": {
    "string": "The name of the custom user"
    },
    "email": "my-custom-user-email@example.io",
    "title": {
    "string": "Engineer"
    },
    "managerUrn": null,
    "departmentId": null,
    "departmentName": null,
    "firstName": null,
    "lastName": null,
    "fullName": {
    "string": "My Custom User"
    },
    "countryCode": null
    }
    }
    ]
    }
    },
    "proposedDelta": null
    }

    I've configured OIDC, but I cannot login. I get continuously redirected. What do I do?

    Sorry to hear that!

    This phenomena may be due to the size of a Cookie DataHub uses to authenticate its users. If it's too large ( > 4096), then you'll see this behavior. The cookie embeds an encoded version of the information returned by your OIDC Identity Provider - if they return a lot of information, this can be the root cause.

    One solution is to use Play Cache to persist this session information for a user. This means the attributes about the user (and their session info) will be stored in an in-memory store in the datahub-frontend service, instead of a browser-side cookie.

    To configure the Play Cache session store, you can set the env variable "PAC4J_SESSIONSTORE_PROVIDER" as "PlayCacheSessionStore" for the datahub-frontend container.

    Do note that there are downsides to using the Play Cache. Specifically, it will make datahub-frontend a stateful server. If you have multiple instances of datahub-frontend deployed, you'll need to ensure that the same user is deterministically routed to the same service container (since the sessions are stored in memory). If you're using a single instance of datahub-frontend (the default), then things should "just work".

    For more details, please refer to https://github.com/datahub-project/datahub/pull/5114

    - + \ No newline at end of file diff --git a/docs/ui-ingestion/index.html b/docs/ui-ingestion/index.html index 53283f480f83f..3b70d21446d88 100644 --- a/docs/ui-ingestion/index.html +++ b/docs/ui-ingestion/index.html @@ -8,7 +8,7 @@ - + @@ -50,7 +50,7 @@ that your executor (datahub-actions) container is down.

    This container is responsible for executing requests to run ingestion when they come in, either on demand on a particular schedule. You can verify the health of the container using docker ps. Moreover, you can inspect the container logs using by finding the container id for the datahub-actions container and running docker logs <container-id>.

    When should I NOT use UI Ingestion?

    There are valid cases for ingesting metadata without the UI-based ingestion scheduler. For example,

    • You have written a custom ingestion Source
    • Your data sources are not reachable on the network where DataHub is deployed
    • Your ingestion source requires context from a local filesystem (e.g. input files, environment variables, etc)
    • You want to distribute metadata ingestion among multiple producers / environments

    How do I attach policies to the actions pod to give it permissions to pull metadata from various sources?

    This varies across the underlying platform. For AWS, please refer to this guide.

    Demo

    Click here to see a full demo of the UI Ingestion feature.

    Feedback / Questions / Concerns

    We want to hear from you! For any inquiries, including Feedback, Questions, or Concerns, reach out on Slack!

    - + \ No newline at end of file diff --git a/docs/what-is-datahub/datahub-concepts/index.html b/docs/what-is-datahub/datahub-concepts/index.html index c10053cc2ef7d..bd5a49520c21b 100644 --- a/docs/what-is-datahub/datahub-concepts/index.html +++ b/docs/what-is-datahub/datahub-concepts/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ In orchestration systems, this is sometimes referred to as an individual "Task" within a "DAG". Examples include an Airflow Task.

    Data Flow

    An executable collection of Data Jobs with dependencies among them, or a DAG. Sometimes referred to as a "Pipeline". Examples include an Airflow DAG.

    Glossary Term

    Shared vocabulary within the data ecosystem.

    Glossary Term Group

    Glossary Term Group is similar to a folder, containing Terms and even other Term Groups to allow for a nested structure.

    Tag

    Tags are informal, loosely controlled labels that help in search & discovery. They can be added to datasets, dataset schemas, or containers, for an easy way to label or categorize entities – without having to associate them to a broader business glossary or vocabulary.

    Domain

    Domains are curated, top-level folders or categories where related assets can be explicitly grouped.

    Owner

    Owner refers to the users or groups that has ownership rights over entities. For example, owner can be acceessed to dataset or a column or a dataset.

    Users (CorpUser)

    CorpUser represents an identity of a person (or an account) in the enterprise.

    Groups (CorpGroup)

    CorpGroup represents an identity of a group of users in the enterprise.

    Metadata Model

    Entity

    An entity is the primary node in the metadata graph. For example, an instance of a Dataset or a CorpUser is an Entity.

    Aspect

    An aspect is a collection of attributes that describes a particular facet of an entity. Aspects can be shared across entities, for example "Ownership" is an aspect that is re-used across all the Entities that have owners.

    Relationships

    A relationship represents a named edge between 2 entities. They are declared via foreign key attributes within Aspects along with a custom annotation (@Relationship).

    - + \ No newline at end of file diff --git a/docs/what/aspect/index.html b/docs/what/aspect/index.html index cc614714c7d30..1c7a143c03ade 100644 --- a/docs/what/aspect/index.html +++ b/docs/what/aspect/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ it’d follow this pattern naturally, which means one will end up with smaller, more modular, endpoints instead of giant ones.

    Here’s an example metadata aspect. Note that the admin and members fields are implicitly conveying a relationship between Group entity & User entity. It’s very natural to save such relationships as URNs in a metadata aspect. The relationship section explains how this relationship can be explicitly extracted and modelled.

    namespace com.linkedin.group

    import com.linkedin.common.AuditStamp
    import com.linkedin.common.CorpuserUrn

    /**
    * The membership metadata for a group
    */
    record Membership {

    /** Audit stamp for the last change */
    auditStamp: AuditStamp

    /** Admin of the group */
    admin: CorpuserUrn

    /** Members of the group, ordered in descending importance */
    members: array[CorpuserUrn]
    }
    - + \ No newline at end of file diff --git a/docs/what/delta/index.html b/docs/what/delta/index.html index f0682f4c22bd0..b0b50c09881e7 100644 --- a/docs/what/delta/index.html +++ b/docs/what/delta/index.html @@ -8,7 +8,7 @@ - + @@ -20,7 +20,7 @@ This is because the rest.li endpoint will have the logic that performs the corresponding partial update based on the information in the model. That said, it’s common to have fields that denote the list of items to be added or removed (e.g. membersToAdd & membersToRemove from below)
  • Similar to metadata snapshots, entity that supports metadata delta will add an entity-specific metadata delta (e.g. GroupDelta from below) that unions all supported partial update models.
  • The entity-specific metadata delta is then added to the global Delta typeref, which is added as part of Metadata Change Event and used during Metadata Ingestion.
  • namespace com.linkedin.group

    import com.linkedin.common.CorpuserUrn

    /**
    * A metadata delta for a specific group entity
    */
    record MembershipPartialUpdate {

    /** List of members to be added to the group */
    membersToAdd: array[CorpuserUrn]

    /** List of members to be removed from the group */
    membersToRemove: array[CorpuserUrn]
    }
    namespace com.linkedin.metadata.delta

    import com.linkedin.common.CorpGroupUrn
    import com.linkedin.group.MembershipPartialUpdate

    /**
    * A metadata delta for a specific group entity
    */
    record GroupDelta {

    /** URN for the entity the metadata delta is associated with */
    urn: CorpGroupUrn

    /** The specific type of metadata delta to apply */
    delta: union[MembershipPartialUpdate]
    }
    namespace com.linkedin.metadata.delta

    /**
    * A union of all supported metadata delta types.
    */
    typeref Delta = union[GroupDelta]
    - + \ No newline at end of file diff --git a/docs/what/entity/index.html b/docs/what/entity/index.html index 081418a47bd93..59cddf2390def 100644 --- a/docs/what/entity/index.html +++ b/docs/what/entity/index.html @@ -8,14 +8,14 @@ - +
    - + \ No newline at end of file diff --git a/docs/what/gma/index.html b/docs/what/gma/index.html index cef0724265e36..ec1f76eafb25d 100644 --- a/docs/what/gma/index.html +++ b/docs/what/gma/index.html @@ -8,14 +8,14 @@ - +

    What is Generalized Metadata Architecture (GMA)?

    GMA is the backend infrastructure for DataHub. Unlike existing architectures, GMA leverages multiple storage technologies to efficiently service the four most commonly used query patterns

    • Document-oriented CRUD
    • Complex queries (including joining distributed tables)
    • Graph traversal
    • Fulltext search and autocomplete

    GMA also embraces a distributed model, where each team owns, develops and operates their own metadata services (known as GMS), while the metadata are automatically aggregated to populate the central metadata graph and search indexes. This is made possible by standardizing the metadata models and the access layer.

    We strongly believe that GMA can bring tremendous leverage to any team that has a need to store and access metadata. Moreover, standardizing metadata modeling promotes a model-first approach to developments, resulting in a more concise, consistent, and highly connected metadata ecosystem that benefits all DataHub users.

    - + \ No newline at end of file diff --git a/docs/what/gms/index.html b/docs/what/gms/index.html index f0efce2735e63..eb8801879b7d6 100644 --- a/docs/what/gms/index.html +++ b/docs/what/gms/index.html @@ -8,13 +8,13 @@ - +

    What is Generalized Metadata Service (GMS)?

    Metadata for entities onboarded to GMA is served through microservices known as Generalized Metadata Service (GMS). GMS typically provides a Rest.li API and must access the metadata using GMA DAOs.

    While a GMS is completely free to define its public APIs, we do provide a list of resource base classes to leverage for common patterns.

    GMA is designed to support a distributed fleet of GMS, each serving a subset of the GMA graph. However, for simplicity we include a single centralized GMS (datahub-gms) that serves all entities.

    - + \ No newline at end of file diff --git a/docs/what/graph/index.html b/docs/what/graph/index.html index d257bcc9a8e44..abd5a6940aafd 100644 --- a/docs/what/graph/index.html +++ b/docs/what/graph/index.html @@ -8,7 +8,7 @@ - + @@ -18,7 +18,7 @@ However, as stated in the Metadata Modeling section, the graph is merely a derived view of all metadata aspects thus can always be rebuilt directly from historic MAEs. Consequently, it is possible to build a specific snapshot of the graph in time by replaying MAEs up to that point.

    In theory, the system can work with any generic OLTP graph DB that supports the following operations:

    • Dynamical creation, modification, and removal of nodes and edges
    • Dynamical attachment of key-value properties to each node and edge
    • Transactional partial updates of properties of a specific node or edge
    • Fast ID-based retrieval of nodes & edges
    • Efficient queries involving both graph traversal and properties value filtering
    • Support efficient bidirectional graph traversal
    - + \ No newline at end of file diff --git a/docs/what/mxe/index.html b/docs/what/mxe/index.html index ede94d5636c19..4189a54060b63 100644 --- a/docs/what/mxe/index.html +++ b/docs/what/mxe/index.html @@ -8,7 +8,7 @@ - + @@ -50,7 +50,7 @@ Use Metadata Change Log instead.

    MAEs are emitted once any metadata change has been successfully committed into DataHub's storage layer.

    The default Kafka topic name for MAEs is MetadataAuditEvent_v4.

    Consumption

    No active consumers.

    Schema

    The PDL schema can be found here.

    Examples

    An example of an MAE emitted representing a change made to the 'ownership' aspect for an Entity (owner removed):

    {
    "oldSnapshot": {
    "com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot": {
    "urn": "urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)",
    "aspects": [
    {
    "com.linkedin.pegasus2avro.common.Ownership": {
    "owners": [
    {
    "owner": "urn:li:corpuser:jdoe",
    "type": "DATAOWNER",
    "source": null
    },
    {
    "owner": "urn:li:corpuser:datahub",
    "type": "DATAOWNER",
    "source": null
    }
    ],
    "lastModified": {
    "time": 1581407189000,
    "actor": "urn:li:corpuser:jdoe",
    "impersonator": null
    }
    }
    }
    ]
    }
    },
    "newSnapshot": {
    "com.linkedin.pegasus2avro.metadata.snapshot.DatasetSnapshot": {
    "urn": "urn:li:dataset:(urn:li:dataPlatform:hive,SampleHiveDataset,PROD)",
    "aspects": [
    {
    "com.linkedin.pegasus2avro.common.Ownership": {
    "owners": [
    {
    "owner": "urn:li:corpuser:datahub",
    "type": "DATAOWNER",
    "source": null
    }
    ],
    "lastModified": {
    "time": 1581407189000,
    "actor": "urn:li:corpuser:jdoe",
    "impersonator": null
    }
    }
    }
    ]
    }
    }
    }

    Failed Metadata Change Event (FMCE)

    When a Metadata Change Event cannot be processed successfully, the event is written to a dead letter queue in an event called Failed Metadata Change Event (FMCE).

    The event simply wraps the original Metadata Change Event and an error message, which contains the reason for rejection. This event can be used for debugging any potential ingestion issues, as well as for re-playing any previous rejected proposal if necessary.

    Emission

    FMCEs are emitted when MCEs cannot be successfully committed to DataHub's storage layer.

    Consumption

    No active consumers.

    Schema

    The PDL schema can be found here.

    The default Kafka topic name for FMCEs is FailedMetadataChangeEvent_v4.

    - + \ No newline at end of file diff --git a/docs/what/relationship/index.html b/docs/what/relationship/index.html index d47a080fa7bd4..f3bb609f96a22 100644 --- a/docs/what/relationship/index.html +++ b/docs/what/relationship/index.html @@ -8,7 +8,7 @@ - + @@ -25,7 +25,7 @@ We also introduce a @pairings annotation to limit the allowed source and destination URN types.

    While it’s possible to model relationships in rest.li as association resources, which often get stored as mapping tables, it is far more common to model them as "foreign keys" field in a metadata aspect. For instance, the Ownership aspect is likely to contain an array of owner’s corpuser URNs.

    Below is an example of how a relationship is modeled in PDL. Note that:

    1. As the source and destination are of generic URN type, we’re able to factor them out to a common BaseRelationship model.
    2. Each model is expected to have a @pairings annotation that is an array of all allowed source-destination URN pairs.
    3. Unlike entity attributes, there’s no requirement on making all relationship attributes optional since relationships do not support partial updates.
    namespace com.linkedin.metadata.relationship

    import com.linkedin.common.Urn

    /**
    * Common fields that apply to all relationships
    */
    record BaseRelationship {

    /**
    * Urn for the source of the relationship
    */
    source: Urn

    /**
    * Urn for the destination of the relationship
    */
    destination: Urn
    }
    namespace com.linkedin.metadata.relationship

    /**
    * Data model for a has-member relationship
    */
    @pairings = [ {
    "destination" : "com.linkedin.common.urn.CorpGroupUrn",
    "source" : "com.linkedin.common.urn.CorpUserUrn"
    } ]
    record HasMembership includes BaseRelationship
    {
    /**
    * The importance of the membership
    */
    importance: int
    }

    Direction of Relationships

    As relationships are modeled as directed edges between nodes, it’s natural to ask which way should it be pointing, or should there be edges going both ways? The answer is, "doesn’t really matter." It’s rather an aesthetic choice than technical one.

    For one, the actual direction doesn’t really impact the execution of graph queries. Most graph DBs are fully capable of traversing edges in reverse direction efficiently.

    That being said, generally there’s a more "natural way" to specify the direction of a relationship, which closely relate to how the metadata is stored. For example, the membership information for an LDAP group is generally stored as a list in group’s metadata. As a result, it’s more natural to model a HasMember relationship that points from a group to a member, instead of a IsMemberOf relationship pointing from member to group.

    Since all relationships are explicitly declared, it’s fairly easy for a user to discover what relationships are available and their directionality by inspecting the relationships directory. It’s also possible to provide a UI for the catalog of entities and relationships for analysts who are interested in building complex graph queries to gain insights into the metadata.

    High Cardinality Relationships

    See this doc for suggestions on how to best model relationships with high cardinality.

    - + \ No newline at end of file diff --git a/docs/what/search-document/index.html b/docs/what/search-document/index.html index fe3db5ca9ee9d..0665d7a3f4fc8 100644 --- a/docs/what/search-document/index.html +++ b/docs/what/search-document/index.html @@ -8,7 +8,7 @@ - + @@ -21,7 +21,7 @@ Since the document is also served as the main interface for the search API, the attributes can also be used to format the search snippet. As a result, one may be tempted to add as many attributes as needed. This is acceptable as the underlying search engine is designed to index a large number of fields.

    Below shows an example schema for the User search document. Note that:

    1. Each search document is required to have a type-specific urn field, generally maps to an entity in the graph.
    2. Similar to Entity, each document has an optional removed field for "soft deletion". This is captured in BaseDocument, which is expected to be included by all documents.
    3. Similar to Entity, all remaining fields are made optional to support partial updates.
    4. management shows an example of a string array field.
    5. ownedDataset shows an example on how a field can be derived from metadata aspects associated with other types of entity (in this case, Dataset).
    namespace com.linkedin.metadata.search

    /**
    * Common fields that may apply to all documents
    */
    record BaseDocument {

    /** Whether the entity has been removed or not */
    removed: optional boolean = false
    }
    namespace com.linkedin.metadata.search

    import com.linkedin.common.CorpuserUrn
    import com.linkedin.common.DatasetUrn

    /**
    * Data model for user entity search
    */
    record UserDocument includes BaseDocument {

    /** Urn for the user */
    urn: CorpuserUrn

    /** First name of the user */
    firstName: optional string

    /** Last name of the user */
    lastName: optional string

    /** The chain of management all the way to CEO */
    management: optional array[CorpuserUrn] = []

    /** Code for the cost center */
    costCenter: optional int

    /** The list of dataset the user owns */
    ownedDatasets: optional array[DatasetUrn] = []
    }
    - + \ No newline at end of file diff --git a/docs/what/search-index/index.html b/docs/what/search-index/index.html index 0586e624cdd32..78f4c3ed535de 100644 --- a/docs/what/search-index/index.html +++ b/docs/what/search-index/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ As the logic changes, a new version of the index will be created and populated from historic MAEs. Once it’s fully populated, the team can switch to the new version through a simple config change from their GMS. They can also rollback to an older version of index whenever needed.

    - + \ No newline at end of file diff --git a/docs/what/snapshot/index.html b/docs/what/snapshot/index.html index 5bc056622ccfd..df1682b7381df 100644 --- a/docs/what/snapshot/index.html +++ b/docs/what/snapshot/index.html @@ -8,14 +8,14 @@ - +

    What is a snapshot?

    A metadata snapshot models the current state of one or multiple metadata aspects associated with a particular entity. Each entity type is expected to have:

    1. An entity-specific aspect (e.g. CorpGroupAspect from below), which is a typeref containing a union of all possible metadata aspects for the entity.
    2. An entity-specific snapshot (e.g. CorpGroupSnapshot from below), which contains an array (aspects) of entity-specific aspects.
    namespace com.linkedin.metadata.aspect

    import com.linkedin.group.Membership
    import com.linkedin.group.SomeOtherMetadata

    /**
    * A union of all supported metadata aspects for a group
    */
    typeref CorpGroupAspect = union[Membership, SomeOtherMetadata]
    namespace com.linkedin.metadata.snapshot

    import com.linkedin.common.CorpGroupUrn
    import com.linkedin.metadata.aspect.CorpGroupAspect

    /**
    * A metadata snapshot for a specific Group entity.
    */
    record CorpGroupSnapshot {

    /** URN for the entity the metadata snapshot is associated with */
    urn: CorpGroupUrn

    /** The list of metadata aspects associated with the group */
    aspects: array[CorpGroupAspect]
    }

    The generic Snapshot typeref contains a union of all entity-specific snapshots and can therefore be used to represent the state of any metadata aspect for all supported entity types.

    namespace com.linkedin.metadata.snapshot

    /**
    * A union of all supported metadata snapshot types.
    */
    typeref Snapshot = union[DatasetSnapshot, CorpGroupSnapshot, CorpUserSnapshot]
    - + \ No newline at end of file diff --git a/docs/what/urn/index.html b/docs/what/urn/index.html index 6b7e5486d93c5..5d501d75244c6 100644 --- a/docs/what/urn/index.html +++ b/docs/what/urn/index.html @@ -8,7 +8,7 @@ - + @@ -19,7 +19,7 @@ any resource for which you need unique identifier for its each instance. While you can create URNs for GMA entities such as [DatasetUrn] with entity type dataset, you can also define URN for data platforms, [DataPlatformUrn].

    ID

    ID is the unique identifier part of a URN. It's unique for a specific entity type within a specific namespace. ID could contain a single field, or multi fields in the case of complex URNs. A complex URN can even contain other URNs as ID fields. This type of URN is also referred to as nested URN. For non-URN ID fields, the value can be either a string, number, or Pegasus Enum.

    Here are some example URNs with a single ID field:

    urn:li:dataPlatform:kafka
    urn:li:corpuser:jdoe

    DatasetUrn is an example of a complex nested URN. It contains 3 ID fields: platform, name and fabric, where platform is another URN. Here are some examples

    urn:li:dataset:(urn:li:dataPlatform:kafka,PageViewEvent,PROD)
    urn:li:dataset:(urn:li:dataPlatform:hdfs,PageViewEvent,EI)
    - + \ No newline at end of file diff --git a/guild/index.html b/guild/index.html index 2cecc00041b2f..9a229d0a361d0 100644 --- a/guild/index.html +++ b/guild/index.html @@ -8,13 +8,13 @@ - +
    -
    DataHub Data Practitioners Guild

    Celebrating community members that have gone above and beyond to contribute to the collective success of DataHub

    Top Code Contributor

    Aezo Teo

    Aezo Teo

    Data Engineer, Grab

    Contributed 5 commits in 2022 to the main DataHub Project & Helm repos, including Stateful Ingestion support for Presto-on-Hive

    Arun Vasudevan

    Arun Vasudevan

    Staff Software Engineer, Peloton

    Contributed 9 commits in 2022 to the main DataHub Project, DataHub Actions; improvements to Kafka Connect

    Boyuan Zhang

    Boyuan Zhang

    Data Engineer, Credit Karma

    Contributed 8 commits in 2022, including improvements to dbt & Kafka ingestion and support for Glue profiling

    Bumsoo Kim

    Bumsoo Kim

    Software Engineer

    Contributed 4 commits in 2022, including improvements to Airflow logging and DataHub Helm charts

    David Haglund

    David Haglund

    Data Engineer, SSAB

    Contributed 15 commits in 2022, including improvements to DataHub Helm Charts, DataHub docs, and more

    David Sánchez

    David Sánchez

    Principal Data Engineer, Cabify

    Contributed 7 commits in 2022, improving BigQuery and Tableau connectors and expanding MLFeatureTable functionality

    Djordje Mijatovic

    Djordje Mijatovic

    Senior Java Developer

    Contributed 6 commits in 2022, including building support for Neo4j multi-hop queries

    Ebu

    Ebu

    Core Staff, KDDI

    Contributed 5 commits in 2022, including a new Vertica ingestion source and animated DataHub logos

    Eric Ladouceur

    Eric Ladouceur

    Technical Advisor, Canadian Centre for Cyber Security

    Contributed 6 commits in 2022, including the Iceberg ingestion source

    Felix Lüdin

    Felix Lüdin

    Process Consultant Business Analytics, Endress+Hauser

    Contributed 15 commits in 2022 to the main DataHub Project, DataHub Helm chart, and DataHub Actions repos

    Jordan Wolinsky

    Jordan Wolinsky

    Senior Software Engineer, Zephyr AI

    Contributed 5 commits in 2022, including improvements to data profiling functionality

    Marcin Szymanski

    Marcin Szymanski

    Data Engineering Manager, Esure

    Contributed 5 commits in 2022, including improvements to Trino and Unity Catalog ingestion

    Mert Tunc

    Mert Tunc

    Staff Software Engineer, Udemy

    Contributed 6 commits in 2022, including improvements to Kafka and MySQL ingestion

    Mike Schlosser

    Mike Schlosser

    Lead Software Engineer, Kyros

    Contributed 6 commits in 2022, including support for Snowflake auth and fixes to Docker Compose

    Parham Ghazanfari

    Parham Ghazanfari

    Software Development Engineer, Amazon

    Contributed 4 commits in 2022, including support for MSK IAM authentication

    Piotr Skrydalewicz

    Piotr Skrydalewicz

    Data Engineering Consultant

    Contributed 5 commits in 2022, including improvements to dbt and Glue ingestion sources and support for SparkSQL dialect

    Xu Wang

    Xu Wang

    Staff Software Engineer, Included Health

    Contributed 6 commits in 2022, including metadata model changes to support Notebook entities

    Community Supporter

    Mohamad Amzar

    Mohamad Amzar

    Analytics Engineer, CDX

    Consistently provides guidance to others in the #troubleshoot and #ingestion Slack channels

    Nguyen Tri Hieu

    Nguyen Tri Hieu

    Data Engineer, Fossil Vietnam

    Regularly helps others across all support channels in Slack

    Nicholas Shook

    Nicholas Shook

    Platform Engineer

    Frequently helps others navigate implementation questions in the #all-things-deployment and #troubleshoot Slack channels

    Pablo Ochoa

    Pablo Ochoa

    Big Data and Data Governance Consultant, Graphenus

    Consistently jumps in to address questions in the #troubleshoot and #all-things-deployment Slack channels

    Patrick Braz

    Patrick Braz

    Data Engineering, Hurb

    Regularly provides guidance to Community Members in the #troubleshoot and #ingestion Slack channels

    Pedro Aguiar

    Pedro Aguiar

    Data Analyst

    Routinely provides helpful support to others in the #troubleshoot Slack channel

    Steve Pham

    Steve Pham

    Principal Engineer

    Reliably provides direction to Community Members across all support channels in Slack

    Xianglong LIU

    Xianglong LIU

    Data Platform Engineer

    Continually goes above and beyond to answer any and all questions from Community Members across all support in Slack

    DataHub Champion

    Abhishek Sharma

    Abhishek Sharma

    Software Engineer

    Driving adoption of DataHub at his organization and featured in our inaugural Humans of DataHub post

    Alexander Dobrev

    Alexander Dobrev

    Product Manager, Grab

    Shared Grab's expereince adopting and implementing DataHub during October 2022 Town Hall

    Amanda Ng

    Amanda Ng

    Senior Software Engineer, Grab

    Shared Grab's expereince adopting and implementing DataHub during October 2022 Town Hall

    Atul Saurav

    Atul Saurav

    Data Governance Architect

    Demoed custom desktop app to search for Glossary Terms outside of DataHub during August 2022 Town Hall

    Divya Manohar

    Divya Manohar

    Software Engineer, Stripe

    Shared an in-depth look into how The Stripe Team addressed data pipeline observability and timeliness concerns with DataHub

    Edward Vaisman

    Edward Vaisman

    Staff Customer Innovation Engineer, Confluent

    Demoed his contribution to define lineage relationships via YAML during February 2022 Town Hall

    Eric Cooklin

    Eric Cooklin

    Sr Data Engineer, Stash

    Shared his experience contributing to the DataHub project during January 2022 Town Hall

    Fredrik Sannholm

    Fredrik Sannholm

    Staff Engineer

    Driving DataHub adoption at Wolt and featured in Humans of DataHub

    Gary Stafford

    Gary Stafford

    Principal Solutions Architect/Analytics TFC, AWS

    Wrote an in-depth overview of DataHub's features and implementation strategies on Medium

    Harvey Li

    Harvey Li

    Lead Data Engineer, Grab

    Shared Grab's expereince adopting and implementing DataHub during October 2022 Town Hall

    Hyejin Yoon

    Hyejin Yoon

    Data Engineer, SOCAR

    Driving DataHub adoption at SOCAR, is a strong advocate for the DataHub Community, and featured in Humans of DataHub

    Imane Lafnoune

    Imane Lafnoune

    Data Engineer, Sicara

    Published a 5-min guide to integrate DataHub and Databricks

    Kartik Darapuneni

    Kartik Darapuneni

    Software Engineer, Included Health

    Demoed custom work to embed Looker resources in DataHub during April 2022 Town Hall

    Liangjun Jiang

    Liangjun Jiang

    Software Engineering Manager

    Published a DataHub deployment guide on Medium

    Mike Linthe

    Mike Linthe

    COO, Contiamo

    Strong advocate for adopting DataHub and featured in Humans of DataHub

    Nidhin Nandhakumar

    Nidhin Nandhakumar

    Senior Data Engineer, Coursera

    Driving DataHub adoption at Coursera

    Sergio Gómez

    Sergio Gómez

    Technical Lead, Adevinta

    Driving DataHub adoption at Adevinta and featured in Humans of DataHub

    Steven Po

    Steven Po

    Senior Data Engineer, Coursera

    Driving DataHub adoption at Coursera and featured in Humans of DataHub

    Vishal Shah

    Vishal Shah

    Senior Software Engineer, Etsy

    Shared Etsy's experience adopting and deploying DataHub at August 2022 Town Hall

    Zhong Xu

    Zhong Xu

    Software Engineer, Pinterest

    Shared Pinterest's journey adopting and deploying DataHub at December 2022 Town Hall

    - +
    DataHub Data Practitioners Guild

    Celebrating community members that have gone above and beyond to contribute to the collective success of DataHub

    Top Code Contributor

    Aezo Teo

    Aezo Teo

    Data Engineer, Grab

    Contributed 5 commits in 2022 to the main DataHub Project & Helm repos, including Stateful Ingestion support for Presto-on-Hive

    Arun Vasudevan

    Arun Vasudevan

    Staff Software Engineer, Peloton

    Contributed 9 commits in 2022 to the main DataHub Project, DataHub Actions; improvements to Kafka Connect

    Boyuan Zhang

    Boyuan Zhang

    Data Engineer, Credit Karma

    Contributed 8 commits in 2022, including improvements to dbt & Kafka ingestion and support for Glue profiling

    Bumsoo Kim

    Bumsoo Kim

    Software Engineer

    Contributed 4 commits in 2022, including improvements to Airflow logging and DataHub Helm charts

    David Haglund

    David Haglund

    Data Engineer, SSAB

    Contributed 15 commits in 2022, including improvements to DataHub Helm Charts, DataHub docs, and more

    David Sánchez

    David Sánchez

    Principal Data Engineer, Cabify

    Contributed 7 commits in 2022, improving BigQuery and Tableau connectors and expanding MLFeatureTable functionality

    Djordje Mijatovic

    Djordje Mijatovic

    Senior Java Developer

    Contributed 6 commits in 2022, including building support for Neo4j multi-hop queries

    Ebu

    Ebu

    Core Staff, KDDI

    Contributed 5 commits in 2022, including a new Vertica ingestion source and animated DataHub logos

    Eric Ladouceur

    Eric Ladouceur

    Technical Advisor, Canadian Centre for Cyber Security

    Contributed 6 commits in 2022, including the Iceberg ingestion source

    Felix Lüdin

    Felix Lüdin

    Process Consultant Business Analytics, Endress+Hauser

    Contributed 15 commits in 2022 to the main DataHub Project, DataHub Helm chart, and DataHub Actions repos

    Jordan Wolinsky

    Jordan Wolinsky

    Senior Software Engineer, Zephyr AI

    Contributed 5 commits in 2022, including improvements to data profiling functionality

    Marcin Szymanski

    Marcin Szymanski

    Data Engineering Manager, Esure

    Contributed 5 commits in 2022, including improvements to Trino and Unity Catalog ingestion

    Mert Tunc

    Mert Tunc

    Staff Software Engineer, Udemy

    Contributed 6 commits in 2022, including improvements to Kafka and MySQL ingestion

    Mike Schlosser

    Mike Schlosser

    Lead Software Engineer, Kyros

    Contributed 6 commits in 2022, including support for Snowflake auth and fixes to Docker Compose

    Parham Ghazanfari

    Parham Ghazanfari

    Software Development Engineer, Amazon

    Contributed 4 commits in 2022, including support for MSK IAM authentication

    Piotr Skrydalewicz

    Piotr Skrydalewicz

    Data Engineering Consultant

    Contributed 5 commits in 2022, including improvements to dbt and Glue ingestion sources and support for SparkSQL dialect

    Xu Wang

    Xu Wang

    Staff Software Engineer, Included Health

    Contributed 6 commits in 2022, including metadata model changes to support Notebook entities

    Community Supporter

    Mohamad Amzar

    Mohamad Amzar

    Analytics Engineer, CDX

    Consistently provides guidance to others in the #troubleshoot and #ingestion Slack channels

    Nguyen Tri Hieu

    Nguyen Tri Hieu

    Data Engineer, Fossil Vietnam

    Regularly helps others across all support channels in Slack

    Nicholas Shook

    Nicholas Shook

    Platform Engineer

    Frequently helps others navigate implementation questions in the #all-things-deployment and #troubleshoot Slack channels

    Pablo Ochoa

    Pablo Ochoa

    Big Data and Data Governance Consultant, Graphenus

    Consistently jumps in to address questions in the #troubleshoot and #all-things-deployment Slack channels

    Patrick Braz

    Patrick Braz

    Data Engineering, Hurb

    Regularly provides guidance to Community Members in the #troubleshoot and #ingestion Slack channels

    Pedro Aguiar

    Pedro Aguiar

    Data Analyst

    Routinely provides helpful support to others in the #troubleshoot Slack channel

    Steve Pham

    Steve Pham

    Principal Engineer

    Reliably provides direction to Community Members across all support channels in Slack

    Xianglong LIU

    Xianglong LIU

    Data Platform Engineer

    Continually goes above and beyond to answer any and all questions from Community Members across all support in Slack

    DataHub Champion

    Abhishek Sharma

    Abhishek Sharma

    Software Engineer

    Driving adoption of DataHub at his organization and featured in our inaugural Humans of DataHub post

    Alexander Dobrev

    Alexander Dobrev

    Product Manager, Grab

    Shared Grab's expereince adopting and implementing DataHub during October 2022 Town Hall

    Amanda Ng

    Amanda Ng

    Senior Software Engineer, Grab

    Shared Grab's expereince adopting and implementing DataHub during October 2022 Town Hall

    Atul Saurav

    Atul Saurav

    Data Governance Architect

    Demoed custom desktop app to search for Glossary Terms outside of DataHub during August 2022 Town Hall

    Divya Manohar

    Divya Manohar

    Software Engineer, Stripe

    Shared an in-depth look into how The Stripe Team addressed data pipeline observability and timeliness concerns with DataHub

    Edward Vaisman

    Edward Vaisman

    Staff Customer Innovation Engineer, Confluent

    Demoed his contribution to define lineage relationships via YAML during February 2022 Town Hall

    Eric Cooklin

    Eric Cooklin

    Sr Data Engineer, Stash

    Shared his experience contributing to the DataHub project during January 2022 Town Hall

    Fredrik Sannholm

    Fredrik Sannholm

    Staff Engineer

    Driving DataHub adoption at Wolt and featured in Humans of DataHub

    Gary Stafford

    Gary Stafford

    Principal Solutions Architect/Analytics TFC, AWS

    Wrote an in-depth overview of DataHub's features and implementation strategies on Medium

    Harvey Li

    Harvey Li

    Lead Data Engineer, Grab

    Shared Grab's expereince adopting and implementing DataHub during October 2022 Town Hall

    Hyejin Yoon

    Hyejin Yoon

    Data Engineer, SOCAR

    Driving DataHub adoption at SOCAR, is a strong advocate for the DataHub Community, and featured in Humans of DataHub

    Imane Lafnoune

    Imane Lafnoune

    Data Engineer, Sicara

    Published a 5-min guide to integrate DataHub and Databricks

    Kartik Darapuneni

    Kartik Darapuneni

    Software Engineer, Included Health

    Demoed custom work to embed Looker resources in DataHub during April 2022 Town Hall

    Liangjun Jiang

    Liangjun Jiang

    Software Engineering Manager

    Published a DataHub deployment guide on Medium

    Mike Linthe

    Mike Linthe

    COO, Contiamo

    Strong advocate for adopting DataHub and featured in Humans of DataHub

    Nidhin Nandhakumar

    Nidhin Nandhakumar

    Senior Data Engineer, Coursera

    Driving DataHub adoption at Coursera

    Sergio Gómez

    Sergio Gómez

    Technical Lead, Adevinta

    Driving DataHub adoption at Adevinta and featured in Humans of DataHub

    Steven Po

    Steven Po

    Senior Data Engineer, Coursera

    Driving DataHub adoption at Coursera and featured in Humans of DataHub

    Vishal Shah

    Vishal Shah

    Senior Software Engineer, Etsy

    Shared Etsy's experience adopting and deploying DataHub at August 2022 Town Hall

    Zhong Xu

    Zhong Xu

    Software Engineer, Pinterest

    Shared Pinterest's journey adopting and deploying DataHub at December 2022 Town Hall

    + \ No newline at end of file diff --git a/index.html b/index.html index e54242049fa62..96d375f4d136d 100644 --- a/index.html +++ b/index.html @@ -8,13 +8,13 @@ - +

    The #1 Open Source Metadata Platform

    DataHub is an extensible metadata platform that enables data discovery, data observability and federated governance to help tame the complexity of your data ecosystem.

    Built with ❤️ by Acryl Data and LinkedIn.

    Get Started →Join our Slack

    Get Started Now

    Run the following command to get started with DataHub.

    python3 -m pip install --upgrade pip wheel setuptools 
    python3 -m pip install --upgrade acryl-datahub
    datahub docker quickstart
    DataHub Quickstart GuideDeploying With Kubernetes

    Metadata 360

    Combine technical, operational and business metadata to provide a 360 degree view of your data entities.

    Shift-left

    Apply “shift-left” practices to pre-enrich important metadata using ingestion transformers, support for dbt meta-mapping and other features.

    Active Metadata

    Act on changes in metadata in real time by notifying key stakeholders, circuit-breaking business-critcal pipelines, propogating metadata across entites, and more.

    Open Source

    DataHub was originally built at LinkedIn and subsequently open-sourced under the Apache 2.0 License. It now has a thriving community with over a hundred contributors, and is widely used at many companies.

    Forward Looking Architecture

    DataHub follows a push-based architecture, which means it's built for continuously changing metadata. The modular design lets it scale with data growth at any organization, from a single database under your desk to multiple data centers spanning the globe.

    Massive Ecosystem

    DataHub has pre-built integrations with your favorite systems: Kafka, Airflow, MySQL, SQL Server, Postgres, LDAP, Snowflake, Hive, BigQuery, and many others. The community is continuously adding more integrations, so this list keeps getting longer and longer.

    ADLSAirflowAthenaAzure ADBigQueryClickhouseCouchBaseDatabricksDBTDeltalakeDruidElasticsearchFeastGlueGreat ExpectationsHadoopHiveIcebergKafkaKustoLookerMariaDBMetabaseModeMongoDBMSSQLMySQLNiFiOktaOraclePinotPostgreSQLPowerBIPrestoProtobufPulsarRedashRedshiftS3SalesforceSageMakerSnowflakeSparkSQLAlchemySupersetTableauTeradataTrinoADLSAirflowAthenaAzure ADBigQueryClickhouseCouchBaseDatabricksDBTDeltalakeDruidElasticsearchFeastGlueGreat ExpectationsHadoopHiveIcebergKafkaKustoLookerMariaDBMetabaseModeMongoDBMSSQLMySQLNiFiOktaOraclePinotPostgreSQLPowerBIPrestoProtobufPulsarRedashRedshiftS3SalesforceSageMakerSnowflakeSparkSQLAlchemySupersetTableauTeradataTrino

    A Modern Approach to Metadata Management

    Automated Metadata Ingestion

    Push-based ingestion can use a prebuilt emitter or can emit custom events using our framework.

    Pull-based ingestion crawls a metadata source. We have prebuilt integrations with Kafka, MySQL, MS SQL, Postgres, LDAP, Snowflake, Hive, BigQuery, and more. Ingestion can be automated using our Airflow integration or another scheduler of choice.

    Learn more about metadata ingestion with DataHub in the docs.

    recipe.yml
    source:
    type: "mysql"
    config:
    username: "datahub"
    password: "datahub"
    host_port: "localhost:3306"
    sink:
    type: "datahub-rest"
    config:
    server: 'http://localhost:8080'
    datahub ingest -c recipe.yml

    Discover Trusted Data

    Browse and search over a continuously updated catalog of datasets, dashboards, charts, ML models, and more.

    Understand Data in Context

    DataHub is the one-stop shop for documentation, schemas, ownership, lineage, pipelines, data quality, usage information, and more.

    Trusted Across the Industry

    LinkedInUdemyAirtelCourseraGeotabThoughtWorksExpedia GroupTypeformPelotonZyngaHurbRazerClassDojo
    “[DataHub] has made our legal team very happy with being able to keep track of our sensitive data [to answer questions like] Where’s it going? How’s it being processed? Where’s it ending up? Which third party tool or API’s are we sending it to and why? Who is responsible for this integration?”
    Wolt
    “DataHub aligns with our needs [for] data documentation, a unified search experience, lineage information, and additional metadata. We are also very impressed with the vibrant and supportive community.”
    Coursera
    “DataHub allows us to solve the data discovery problem, which was a big challenge in our organization, and now we are solving it.”
    Adevinta
    - + \ No newline at end of file diff --git a/integrations/index.html b/integrations/index.html index 8c337463525bb..956089d2b8a3d 100644 --- a/integrations/index.html +++ b/integrations/index.html @@ -8,13 +8,13 @@ - +

    DataHub Integrations

    Services that integrate with DataHub



    - + \ No newline at end of file diff --git a/schemas/datahub_ingestion_schema.json b/schemas/datahub_ingestion_schema.json index 321864da9eb6e..722c43aaceb20 100644 --- a/schemas/datahub_ingestion_schema.json +++ b/schemas/datahub_ingestion_schema.json @@ -6319,7 +6319,7 @@ "description": "The usage config to use when generating usage statistics", "default": { "bucket_duration": "DAY", - "end_time": "2023-08-24T21:05:51.278053+00:00", + "end_time": "2023-08-24T22:32:38.872355+00:00", "start_time": "2023-08-23T00:00:00+00:00", "top_n_queries": 10, "user_email_pattern": { @@ -11119,7 +11119,7 @@ "description": "Usage related configs", "default": { "bucket_duration": "DAY", - "end_time": "2023-08-24T21:05:43.557107+00:00", + "end_time": "2023-08-24T22:32:27.545907+00:00", "start_time": "2023-08-23T00:00:00+00:00", "top_n_queries": 10, "user_email_pattern": { diff --git a/search/index.html b/search/index.html index 23bf7996e1854..d1e9834c305f5 100644 --- a/search/index.html +++ b/search/index.html @@ -8,13 +8,13 @@ - +

    Search the documentation

    - + \ No newline at end of file