diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 4ceeab3d38..303c785539 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d05898328669a3f8ab0c2ecee37db2673d3ea3f7 \ No newline at end of file +0c86ea6dbd9a730c24ff0d4e509603e476955ac5 \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index aecd839d63..769b48fa35 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,39 @@ # Version changelog +## [Release] Release v1.53.0 + +### New Features and Improvements + + * Add `databricks_budget` resource ([#3955](https://github.com/databricks/terraform-provider-databricks/pull/3955)). + * Add `databricks_mlflow_models` data source ([#3874](https://github.com/databricks/terraform-provider-databricks/pull/3874)). + * Add computed attribute `table_serving_url` to `databricks_online_table` ([#4048](https://github.com/databricks/terraform-provider-databricks/pull/4048)). + * Add support for Identity Column in `databricks_sql_table` ([#4035](https://github.com/databricks/terraform-provider-databricks/pull/4035)). + + +### Bug Fixes + + * Add Sufficient Network Privileges to the Databricks Default Cross Account Policy ([#4027](https://github.com/databricks/terraform-provider-databricks/pull/4027)) + * Ignore presence or absence of `/Workspace` prefix for dashboard resource ([#4061](https://github.com/databricks/terraform-provider-databricks/pull/4061)). + * Refactor `databricks_permissions` and allow the current user to set their own permissions ([#3956](https://github.com/databricks/terraform-provider-databricks/pull/3956)). + * Set ID for online table resource if creation succeeds but it isn't available yet ([#4072](https://github.com/databricks/terraform-provider-databricks/pull/4072)). + + +### Documentation + + * Update CONTRIBUTING guide for plugin framework resources ([#4078](https://github.com/databricks/terraform-provider-databricks/pull/4078)) + * Add guide for OIDC authentication ([#4016](https://github.com/databricks/terraform-provider-databricks/pull/4016)). + * Correctly use native markdown callouts supported by TF Registry ([#4073](https://github.com/databricks/terraform-provider-databricks/pull/4073)). + * Fixing links to `databricks_service_principal` in TF guides ([#4020](https://github.com/databricks/terraform-provider-databricks/pull/4020)). + + +### Internal Changes + + * Fix Permissions Dashboard Test ([#4071](https://github.com/databricks/terraform-provider-databricks/pull/4071)). + * Bump Go SDK to latest and generate TF structs ([#4062](https://github.com/databricks/terraform-provider-databricks/pull/4062)). + * Skip Budget tests on GCP ([#4070](https://github.com/databricks/terraform-provider-databricks/pull/4070)). + * Update to latest OpenAPI spec and bump Go SDK ([#4069](https://github.com/databricks/terraform-provider-databricks/pull/4069)). + + ## [Release] Release v1.52.0 ### New Features and Improvements diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4062159c53..ab38782660 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -118,6 +118,22 @@ We are migrating the resource from SDKv2 to Plugin Framework provider and hence - `pluginfw`: Contains the changes specific to Plugin Framework. This package shouldn't depend on sdkv2 or common. - `sdkv2`: Contains the changes specific to SDKv2. This package shouldn't depend on pluginfw or common. +### Adding a new resource +1. Check if the directory for this particular resource exists under `internal/providers/pluginfw/resources`, if not create the directory eg: `cluster`, `volume` etc... Please note: Resources and Data sources are organized under the same package for that service. +2. Create a file with resource_resource-name.go and write the CRUD methods, schema for that resource. For reference, please take a look at existing resources eg: `resource_quality_monitor.go` +3. Create a file with `resource_resource-name_acc_test.go` and add integration tests here. +4. Create a file with `resource_resource-name_test.go` and add unit tests here. Note: Please make sure to abstract specific method of the resource so they are unit test friendly and not testing internal part of terraform plugin framework library. You can compare the diagnostics, for example: please take a look at: `data_cluster_test.go` +5. Add the resource under `internal/providers/pluginfw/pluginfw.go` in `Resources()` method. Please update the list so that it stays in alphabetically sorted order. +6. Create a PR and send it for review. + +### Adding a new data source +1. Check if the directory for this particular datasource exists under `internal/providers/pluginfw/resources`, if not create the directory eg: `cluster`, `volume` etc... Please note: Resources and Data sources are organized under the same package for that service. +2. Create a file with `data_resource-name.go` and write the CRUD methods, schema for that data source. For reference, please take a look at existing data sources eg: `data_cluster.go` +3. Create a file with `data_resource-name_acc_test.go` and add integration tests here. +4. Create a file with `data_resource-name_test.go` and add unit tests here. Note: Please make sure to abstract specific method of the resource so they are unit test friendly and not testing internal part of terraform plugin framework library. You can compare the diagnostics, for example: please take a look at: `data_cluster_test.go` +5. Add the resource under `internal/providers/pluginfw/pluginfw.go` in `DataSources()` method. Please update the list so that it stays in alphabetically sorted order. +6. Create a PR and send it for review. + ### Migrating resource to plugin framework Ideally there shouldn't be any behaviour change when migrating a resource or data source to either Go SDk or Plugin Framework. - Please make sure there are no breaking differences due to changes in schema by running: `make diff-schema`. diff --git a/aws/data_aws_crossaccount_policy.go b/aws/data_aws_crossaccount_policy.go index 6737e9376a..a5da5d9365 100644 --- a/aws/data_aws_crossaccount_policy.go +++ b/aws/data_aws_crossaccount_policy.go @@ -103,6 +103,10 @@ func DataAwsCrossaccountPolicy() common.Resource { // additional permissions for Databricks-managed VPC policy if data.PolicyType == "managed" { actions = append(actions, []string{ + "ec2:AttachInternetGateway", + "ec2:AllocateAddress", + "ec2:AssociateDhcpOptions", + "ec2:AssociateRouteTable", "ec2:CreateDhcpOptions", "ec2:CreateInternetGateway", "ec2:CreateNatGateway", diff --git a/aws/data_aws_crossaccount_policy_test.go b/aws/data_aws_crossaccount_policy_test.go index 2bdf183993..177cb166e9 100644 --- a/aws/data_aws_crossaccount_policy_test.go +++ b/aws/data_aws_crossaccount_policy_test.go @@ -16,7 +16,7 @@ func TestDataAwsCrossAccountDatabricksManagedPolicy(t *testing.T) { }.Apply(t) assert.NoError(t, err) j := d.Get("json") - assert.Lenf(t, j, 3032, "Strange length for policy: %s", j) + assert.Lenf(t, j, 3171, "Strange length for policy: %s", j) } func TestDataAwsCrossAccountCustomerManagedPolicy(t *testing.T) { @@ -42,7 +42,474 @@ func TestDataAwsCrossAccountPolicy_WithPassRoles(t *testing.T) { }.Apply(t) assert.NoError(t, err) j := d.Get("json") - assert.Lenf(t, j, 3168, "Strange length for policy: %s", j) + assert.Lenf(t, j, 3307, "Strange length for policy: %s", j) +} + +func TestDataAwsCrossAccountManagedPolicyRoles(t *testing.T) { + expectedJSON := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssignPrivateIpAddresses", + "ec2:CancelSpotInstanceRequests", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribePrefixLists", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:GetSpotPlacementScores", + "ec2:RequestSpotInstances", + "ec2:DescribeFleetHistory", + "ec2:ModifyFleet", + "ec2:DeleteFleets", + "ec2:DescribeFleetInstances", + "ec2:DescribeFleets", + "ec2:CreateFleet", + "ec2:DeleteLaunchTemplate", + "ec2:GetLaunchTemplateData", + "ec2:CreateLaunchTemplate", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:ModifyLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:CreateLaunchTemplateVersion", + "ec2:AssociateIamInstanceProfile", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:DisassociateIamInstanceProfile", + "ec2:ReplaceIamInstanceProfileAssociation", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:TerminateInstances", + "ec2:AttachInternetGateway", + "ec2:AllocateAddress", + "ec2:AssociateDhcpOptions", + "ec2:AssociateRouteTable", + "ec2:CreateDhcpOptions", + "ec2:CreateInternetGateway", + "ec2:CreateNatGateway", + "ec2:CreateRoute", + "ec2:CreateRouteTable", + "ec2:CreateSecurityGroup", + "ec2:CreateSubnet", + "ec2:CreateVpc", + "ec2:CreateVpcEndpoint", + "ec2:DeleteDhcpOptions", + "ec2:DeleteInternetGateway", + "ec2:DeleteNatGateway", + "ec2:DeleteRoute", + "ec2:DeleteRouteTable", + "ec2:DeleteSecurityGroup", + "ec2:DeleteSubnet", + "ec2:DeleteVpc", + "ec2:DeleteVpcEndpoints", + "ec2:DetachInternetGateway", + "ec2:DisassociateRouteTable", + "ec2:ModifyVpcAttribute", + "ec2:ReleaseAddress" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:PutRolePolicy" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + } + } + ] +}` + + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "managed"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + actualJSON := d.Get("json").(string) + assert.Equal(t, expectedJSON, actualJSON) + + // Negative test: ensure that customer policy is not equal to customer policy + managedD, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "customer"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + managedJSON := managedD.Get("json").(string) + assert.NotEqual(t, actualJSON, managedJSON) +} + +func TestDataAwsCrossAccountCustomerManagedPolicyRoles(t *testing.T) { + expectedJSON := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssignPrivateIpAddresses", + "ec2:CancelSpotInstanceRequests", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribePrefixLists", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:GetSpotPlacementScores", + "ec2:RequestSpotInstances", + "ec2:DescribeFleetHistory", + "ec2:ModifyFleet", + "ec2:DeleteFleets", + "ec2:DescribeFleetInstances", + "ec2:DescribeFleets", + "ec2:CreateFleet", + "ec2:DeleteLaunchTemplate", + "ec2:GetLaunchTemplateData", + "ec2:CreateLaunchTemplate", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:ModifyLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:CreateLaunchTemplateVersion", + "ec2:AssociateIamInstanceProfile", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateVolume", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:DisassociateIamInstanceProfile", + "ec2:ReplaceIamInstanceProfileAssociation", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress", + "ec2:RunInstances", + "ec2:TerminateInstances" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:PutRolePolicy" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + } + } + ] +}` + + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "customer"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + actualJSON := d.Get("json").(string) + assert.Equal(t, expectedJSON, actualJSON) + + // Negative test: ensure that customer policy is not equal to managed policy + managedD, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "managed"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + managedJSON := managedD.Get("json").(string) + assert.NotEqual(t, actualJSON, managedJSON) +} + +func TestDataAwsCrossAccountRestrictedPolicyRoles(t *testing.T) { + expectedJSON := `{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ec2:AssignPrivateIpAddresses", + "ec2:CancelSpotInstanceRequests", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeIamInstanceProfileAssociations", + "ec2:DescribeInstanceStatus", + "ec2:DescribeInstances", + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeNetworkAcls", + "ec2:DescribePrefixLists", + "ec2:DescribeReservedInstancesOfferings", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:DescribeVpcAttribute", + "ec2:DescribeVpcs", + "ec2:CreateTags", + "ec2:DeleteTags", + "ec2:GetSpotPlacementScores", + "ec2:RequestSpotInstances", + "ec2:DescribeFleetHistory", + "ec2:ModifyFleet", + "ec2:DeleteFleets", + "ec2:DescribeFleetInstances", + "ec2:DescribeFleets", + "ec2:CreateFleet", + "ec2:DeleteLaunchTemplate", + "ec2:GetLaunchTemplateData", + "ec2:CreateLaunchTemplate", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", + "ec2:ModifyLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", + "ec2:CreateLaunchTemplateVersion" + ], + "Resource": "*" + }, + { + "Effect": "Allow", + "Action": [ + "iam:CreateServiceLinkedRole", + "iam:PutRolePolicy" + ], + "Resource": "arn:aws:iam::*:role/aws-service-role/spot.amazonaws.com/AWSServiceRoleForEC2Spot", + "Condition": { + "StringLike": { + "iam:AWSServiceName": "spot.amazonaws.com" + } + } + }, + { + "Sid": "InstancePoolsSupport", + "Effect": "Allow", + "Action": [ + "ec2:AssociateIamInstanceProfile", + "ec2:DisassociateIamInstanceProfile", + "ec2:ReplaceIamInstanceProfileAssociation" + ], + "Resource": "arn:aws:ec2:us-west-2:123456789012:instance/*", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "AllowEc2RunInstancePerTag", + "Effect": "Allow", + "Action": "ec2:RunInstances", + "Resource": [ + "arn:aws:ec2:us-west-2:123456789012:volume/*", + "arn:aws:ec2:us-west-2:123456789012:instance/*" + ], + "Condition": { + "StringEquals": { + "aws:RequestTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "AllowEc2RunInstanceImagePerTag", + "Effect": "Allow", + "Action": "ec2:RunInstances", + "Resource": "arn:aws:ec2:us-west-2:123456789012:image/*", + "Condition": { + "StringEquals": { + "aws:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "AllowEc2RunInstancePerVPCid", + "Effect": "Allow", + "Action": "ec2:RunInstances", + "Resource": [ + "arn:aws:ec2:us-west-2:123456789012:network-interface/*", + "arn:aws:ec2:us-west-2:123456789012:subnet/*", + "arn:aws:ec2:us-west-2:123456789012:security-group/*" + ], + "Condition": { + "StringEquals": { + "ec2:vpc": "arn:aws:ec2:us-west-2:123456789012:vpc/vpc-abcdefg12345" + } + } + }, + { + "Sid": "AllowEc2RunInstanceOtherResources", + "Effect": "Allow", + "Action": "ec2:RunInstances", + "NotResource": [ + "arn:aws:ec2:us-west-2:123456789012:image/*", + "arn:aws:ec2:us-west-2:123456789012:network-interface/*", + "arn:aws:ec2:us-west-2:123456789012:subnet/*", + "arn:aws:ec2:us-west-2:123456789012:security-group/*", + "arn:aws:ec2:us-west-2:123456789012:volume/*", + "arn:aws:ec2:us-west-2:123456789012:instance/*" + ] + }, + { + "Sid": "EC2TerminateInstancesTag", + "Effect": "Allow", + "Action": "ec2:TerminateInstances", + "Resource": "arn:aws:ec2:us-west-2:123456789012:instance/*", + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "EC2AttachDetachVolumeTag", + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:DetachVolume" + ], + "Resource": [ + "arn:aws:ec2:us-west-2:123456789012:instance/*", + "arn:aws:ec2:us-west-2:123456789012:volume/*" + ], + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "EC2CreateVolumeByTag", + "Effect": "Allow", + "Action": "ec2:CreateVolume", + "Resource": "arn:aws:ec2:us-west-2:123456789012:volume/*", + "Condition": { + "StringEquals": { + "aws:RequestTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "EC2DeleteVolumeByTag", + "Effect": "Allow", + "Action": "ec2:DeleteVolume", + "Resource": [ + "arn:aws:ec2:us-west-2:123456789012:volume/*" + ], + "Condition": { + "StringEquals": { + "ec2:ResourceTag/Vendor": "Databricks" + } + } + }, + { + "Sid": "VpcNonresourceSpecificActions", + "Effect": "Allow", + "Action": [ + "ec2:AuthorizeSecurityGroupEgress", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:RevokeSecurityGroupEgress", + "ec2:RevokeSecurityGroupIngress" + ], + "Resource": "arn:aws:ec2:us-west-2:123456789012:security-group/sg-12345678", + "Condition": { + "StringEquals": { + "ec2:vpc": "arn:aws:ec2:us-west-2:123456789012:vpc/vpc-abcdefg12345" + } + } + } + ] +}` + + d, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: ` +policy_type = "restricted" +aws_account_id = "123456789012" +vpc_id = "vpc-abcdefg12345" +region = "us-west-2" +security_group_id = "sg-12345678" +`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + actualJSON := d.Get("json").(string) + assert.Equal(t, expectedJSON, actualJSON) + + // Negative test: ensure that restricted policy is not equal to managed policy + managedD, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "managed"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + managedJSON := managedD.Get("json").(string) + assert.NotEqual(t, actualJSON, managedJSON) + + // Negative test: ensure that restricted policy is not equal to customer policy + customerD, err := qa.ResourceFixture{ + Read: true, + Resource: DataAwsCrossaccountPolicy(), + NonWritable: true, + HCL: `policy_type = "customer"`, + ID: ".", + }.Apply(t) + assert.NoError(t, err) + customerJSON := customerD.Get("json").(string) + assert.NotEqual(t, actualJSON, customerJSON) } func TestDataAwsCrossAccountRestrictedPolicy(t *testing.T) { diff --git a/catalog/resource_external_location.go b/catalog/resource_external_location.go index f21549cb98..04985370ee 100644 --- a/catalog/resource_external_location.go +++ b/catalog/resource_external_location.go @@ -116,6 +116,9 @@ func ResourceExternalLocation() common.Resource { if !d.HasChangeExcept("owner") { return nil } + if d.HasChange("read_only") { + updateExternalLocationRequest.ForceSendFields = append(updateExternalLocationRequest.ForceSendFields, "ReadOnly") + } updateExternalLocationRequest.Owner = "" _, err = w.ExternalLocations.Update(ctx, updateExternalLocationRequest) diff --git a/catalog/resource_external_location_test.go b/catalog/resource_external_location_test.go index 26493c1005..c424bbc2fc 100644 --- a/catalog/resource_external_location_test.go +++ b/catalog/resource_external_location_test.go @@ -293,6 +293,7 @@ func TestUpdateExternalLocation(t *testing.T) { Url: "s3://foo/bar", CredentialName: "bcd", Comment: "def", + ReadOnly: false, }, }, { @@ -324,6 +325,52 @@ func TestUpdateExternalLocation(t *testing.T) { }.ApplyNoError(t) } +func TestUpdateExternalLocation_FromReadOnly(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.1/unity-catalog/external-locations/abc", + ExpectedRequest: catalog.UpdateExternalLocation{ + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + ReadOnly: false, + ForceSendFields: []string{"ReadOnly"}, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/external-locations/abc?", + Response: catalog.ExternalLocationInfo{ + Name: "abc", + Url: "s3://foo/bar", + CredentialName: "bcd", + Comment: "def", + ReadOnly: false, + }, + }, + }, + Resource: ResourceExternalLocation(), + Update: true, + ID: "abc", + InstanceState: map[string]string{ + "name": "abc", + "url": "s3://foo/bar", + "credential_name": "abc", + "comment": "def", + "read_only": "true", + }, + HCL: ` + name = "abc" + url = "s3://foo/bar" + credential_name = "bcd" + comment = "def" + read_only = false + `, + }.ApplyNoError(t) +} + func TestUpdateExternalLocationOnlyOwner(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/catalog/resource_online_table.go b/catalog/resource_online_table.go index 0dd75ffe90..7c317a9742 100644 --- a/catalog/resource_online_table.go +++ b/catalog/resource_online_table.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -const onlineTableDefaultProvisionTimeout = 45 * time.Minute +const onlineTableDefaultProvisionTimeout = 90 * time.Minute func waitForOnlineTableCreation(w *databricks.WorkspaceClient, ctx context.Context, onlineTableName string) error { return retry.RetryContext(ctx, onlineTableDefaultProvisionTimeout, func() *retry.RetryError { @@ -59,6 +59,7 @@ func ResourceOnlineTable() common.Resource { common.CustomizeSchemaPath(m, "spec", "source_table_full_name").SetCustomSuppressDiff(common.EqualFoldDiffSuppress) common.CustomizeSchemaPath(m, "name").SetRequired().SetForceNew() common.CustomizeSchemaPath(m, "status").SetReadOnly() + common.CustomizeSchemaPath(m, "table_serving_url").SetReadOnly() common.CustomizeSchemaPath(m, "spec", "pipeline_id").SetReadOnly() runTypes := []string{"spec.0.run_triggered", "spec.0.run_continuously"} @@ -79,13 +80,14 @@ func ResourceOnlineTable() common.Resource { if err != nil { return err } + // Note: We should set the id right after creation and before waiting for online table to be available. + // If the resource creation timeout is exceeded while waiting for the online table to be ready, this ensures the online table is persisted in the state. + d.SetId(res.Name) // this should be specified in the API Spec - filed a ticket to add it err = waitForOnlineTableCreation(w, ctx, res.Name) if err != nil { - return err } - d.SetId(res.Name) return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/catalog/resource_online_table_test.go b/catalog/resource_online_table_test.go index 104d6a21c6..1deddd02a3 100644 --- a/catalog/resource_online_table_test.go +++ b/catalog/resource_online_table_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/terraform-provider-databricks/qa" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" ) @@ -108,7 +109,7 @@ func TestOnlineTableCreate_ErrorInWait(t *testing.T) { }, Status: &catalog.OnlineTableStatus{DetailedState: catalog.OnlineTableStateOfflineFailed}, } - qa.ResourceFixture{ + d, err := qa.ResourceFixture{ MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { e := w.GetMockOnlineTablesAPI().EXPECT() e.Create(mock.Anything, catalog.CreateOnlineTableRequest{ @@ -124,7 +125,9 @@ func TestOnlineTableCreate_ErrorInWait(t *testing.T) { Resource: ResourceOnlineTable(), HCL: onlineTableHcl, Create: true, - }.ExpectError(t, "online table status returned OFFLINE_FAILED for online table: main.default.online_table") + }.Apply(t) + qa.AssertErrorStartsWith(t, err, "online table status returned OFFLINE_FAILED for online table: main.default.online_table") + assert.Equal(t, "main.default.online_table", d.Id()) } func TestOnlineTableRead(t *testing.T) { diff --git a/catalog/resource_sql_table.go b/catalog/resource_sql_table.go index 922d2d0cd2..ce9d4dbd7a 100644 --- a/catalog/resource_sql_table.go +++ b/catalog/resource_sql_table.go @@ -2,6 +2,7 @@ package catalog import ( "context" + "encoding/json" "fmt" "log" "reflect" @@ -22,12 +23,24 @@ import ( var MaxSqlExecWaitTimeout = 50 type SqlColumnInfo struct { - Name string `json:"name"` - Type string `json:"type_text,omitempty" tf:"alias:type,computed"` - Comment string `json:"comment,omitempty"` - Nullable bool `json:"nullable,omitempty" tf:"default:true"` + Name string `json:"name"` + Type string `json:"type_text,omitempty" tf:"alias:type,computed"` + Identity IdentityColumn `json:"identity,omitempty"` + Comment string `json:"comment,omitempty"` + Nullable bool `json:"nullable,omitempty" tf:"default:true"` + TypeJson string `json:"type_json,omitempty" tf:"computed"` } +type TypeJson struct { + Metadata map[string]any `json:"metadata,omitempty"` +} + +type IdentityColumn string + +const IdentityColumnNone IdentityColumn = "" +const IdentityColumnAlways IdentityColumn = "always" +const IdentityColumnDefault IdentityColumn = "default" + type SqlTableInfo struct { Name string `json:"name"` CatalogName string `json:"catalog_name" tf:"force_new"` @@ -108,6 +121,28 @@ func parseComment(s string) string { return strings.ReplaceAll(strings.ReplaceAll(s, `\'`, `'`), `'`, `\'`) } +func reconstructIdentity(c *SqlColumnInfo) (IdentityColumn, error) { + if c.TypeJson == "" { + return IdentityColumnNone, nil + } + var typeJson TypeJson + err := json.Unmarshal([]byte(c.TypeJson), &typeJson) + if err != nil { + return IdentityColumnNone, err + } + if _, ok := typeJson.Metadata["delta.identity.start"]; !ok { + return IdentityColumnNone, nil + } + explicit, ok := typeJson.Metadata["delta.identity.allowExplicitInsert"] + if !ok { + return IdentityColumnNone, nil + } + if explicit.(bool) { + return IdentityColumnDefault, nil + } + return IdentityColumnAlways, nil +} + func (ti *SqlTableInfo) initCluster(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) (err error) { defaultClusterName := "terraform-sql-table" clustersAPI := clusters.NewClustersAPI(ctx, c) @@ -171,7 +206,22 @@ func (ti *SqlTableInfo) getOrCreateCluster(clusterName string, clustersAPI clust return aclCluster.ClusterID, nil } +func (ci *SqlColumnInfo) getColumnType() string { + var colType string + switch ci.Identity { + case IdentityColumnAlways: + colType = fmt.Sprintf("%s GENERATED ALWAYS AS IDENTITY", ci.Type) + case IdentityColumnDefault: + colType = fmt.Sprintf("%s GENERATED BY DEFAULT AS IDENTITY", ci.Type) + default: + colType = ci.Type + } + return colType +} + func (ti *SqlTableInfo) serializeColumnInfo(col SqlColumnInfo) string { + var colType = col.getColumnType() + notNull := "" if !col.Nullable { notNull = " NOT NULL" @@ -181,7 +231,7 @@ func (ti *SqlTableInfo) serializeColumnInfo(col SqlColumnInfo) string { if col.Comment != "" { comment = fmt.Sprintf(" COMMENT '%s'", parseComment(col.Comment)) } - return fmt.Sprintf("%s %s%s%s", col.getWrappedColumnName(), col.Type, notNull, comment) // id INT NOT NULL COMMENT 'something' + return fmt.Sprintf("%s %s%s%s", col.getWrappedColumnName(), colType, notNull, comment) // id INT NOT NULL COMMENT 'something' } func (ti *SqlTableInfo) serializeColumnInfos() string { @@ -502,6 +552,9 @@ func assertNoColumnTypeDiff(oldCols []interface{}, newColumnInfos []SqlColumnInf if getColumnType(oldColMap["type"].(string)) != getColumnType(newColumnInfos[i].Type) { return fmt.Errorf("changing the 'type' of an existing column is not supported") } + if oldColMap["identity"].(string) != string(newColumnInfos[i].Identity) { + return fmt.Errorf("changing the 'identity' type of an existing column is not supported") + } } return nil } @@ -602,6 +655,13 @@ func ResourceSqlTable() common.Resource { if err != nil { return err } + for i := range ti.ColumnInfos { + c := &ti.ColumnInfos[i] + c.Identity, err = reconstructIdentity(c) + if err != nil { + return err + } + } return common.StructToData(ti, tableSchema, d) }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { diff --git a/catalog/resource_sql_table_test.go b/catalog/resource_sql_table_test.go index 795a1d3f31..f2f0a6c5e2 100644 --- a/catalog/resource_sql_table_test.go +++ b/catalog/resource_sql_table_test.go @@ -35,6 +35,36 @@ func TestResourceSqlTableCreateStatement_External(t *testing.T) { assert.Contains(t, stmt, "COMMENT 'terraform managed'") } +func TestResourceSqlTableCreateStatement_IdentityColumn(t *testing.T) { + ti := &SqlTableInfo{ + Name: "bar", + CatalogName: "main", + SchemaName: "foo", + TableType: "EXTERNAL", + DataSourceFormat: "DELTA", + StorageLocation: "s3://ext-main/foo/bar1", + StorageCredentialName: "somecred", + Comment: "terraform managed", + ColumnInfos: []SqlColumnInfo{ + { + Name: "id", + Type: "bigint", + Identity: "default", + }, + { + Name: "name", + Comment: "a comment", + }, + }, + } + stmt := ti.buildTableCreateStatement() + assert.Contains(t, stmt, "CREATE EXTERNAL TABLE `main`.`foo`.`bar`") + assert.Contains(t, stmt, "USING DELTA") + assert.Contains(t, stmt, "(`id` bigint GENERATED BY DEFAULT AS IDENTITY NOT NULL, `name` NOT NULL COMMENT 'a comment')") + assert.Contains(t, stmt, "LOCATION 's3://ext-main/foo/bar1' WITH (CREDENTIAL `somecred`)") + assert.Contains(t, stmt, "COMMENT 'terraform managed'") +} + func TestResourceSqlTableCreateStatement_View(t *testing.T) { ti := &SqlTableInfo{ Name: "bar", @@ -1334,6 +1364,169 @@ func TestResourceSqlTableCreateTable_ExistingSQLWarehouse(t *testing.T) { assert.NoError(t, err) } +func TestResourceSqlTableCreateTableWithIdentityColumn_ExistingSQLWarehouse(t *testing.T) { + qa.ResourceFixture{ + CommandMock: func(commandStr string) common.CommandResults { + return common.CommandResults{ + ResultType: "", + Data: nil, + } + }, + HCL: ` + name = "bar" + catalog_name = "main" + schema_name = "foo" + table_type = "MANAGED" + data_source_format = "DELTA" + storage_location = "abfss://container@account/somepath" + warehouse_id = "existingwarehouse" + + column { + name = "id" + type = "bigint" + identity = "default" + } + column { + name = "name" + type = "string" + comment = "name of thing" + } + column { + name = "number" + type = "bigint" + identity = "always" + } + comment = "this table is managed by terraform" + `, + Fixtures: []qa.HTTPFixture{ + { + Method: "POST", + Resource: "/api/2.0/sql/statements/", + ExpectedRequest: sql.ExecuteStatementRequest{ + Statement: "CREATE TABLE `main`.`foo`.`bar` (`id` bigint GENERATED BY DEFAULT AS IDENTITY, `name` string COMMENT 'name of thing', `number` bigint GENERATED ALWAYS AS IDENTITY)\nUSING DELTA\nCOMMENT 'this table is managed by terraform'\nLOCATION 'abfss://container@account/somepath';", + WaitTimeout: "50s", + WarehouseId: "existingwarehouse", + OnWaitTimeout: sql.ExecuteStatementRequestOnWaitTimeoutCancel, + }, + Response: sql.StatementResponse{ + StatementId: "statement1", + Status: &sql.StatementStatus{ + State: "SUCCEEDED", + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/tables/main.foo.bar", + Response: SqlTableInfo{ + Name: "bar", + CatalogName: "main", + SchemaName: "foo", + TableType: "EXTERNAL", + DataSourceFormat: "DELTA", + StorageLocation: "s3://ext-main/foo/bar1", + StorageCredentialName: "somecred", + Comment: "terraform managed", + Properties: map[string]string{ + "one": "two", + "three": "four", + }, + ColumnInfos: []SqlColumnInfo{ + { + Name: "id", + Type: "bigint", + TypeJson: "{\"type\":\"bigint\",\"nullable\":true, \"metadata\":{\"delta.identity.start\":1,\"delta.identity.allowExplicitInsert\":true}}", + }, + { + Name: "name", + Type: "string", + Comment: "name of thing", + }, + { + Name: "number", + Type: "bigint", + TypeJson: "{\"type\":\"bigint\",\"nullable\":true, \"metadata\":{\"delta.identity.start\":1,\"delta.identity.allowExplicitInsert\":false}}", + }, + }, + }, + }, + }, + Create: true, + Resource: ResourceSqlTable(), + }.ApplyAndExpectData(t, map[string]any{ + "column.0.identity": "default", + "column.1.identity": "", + "column.2.identity": "always", + }) +} + +func TestResourceSqlTableReadTableWithIdentityColumn_ExistingSQLWarehouse(t *testing.T) { + qa.ResourceFixture{ + CommandMock: func(commandStr string) common.CommandResults { + return common.CommandResults{ + ResultType: "", + Data: nil, + } + }, + HCL: ` + name = "bar" + catalog_name = "main" + schema_name = "foo" + table_type = "MANAGED" + data_source_format = "DELTA" + storage_location = "abfss://container@account/somepath" + warehouse_id = "existingwarehouse" + + + comment = "this table is managed by terraform" + `, + Fixtures: []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/tables/main.foo.bar", + Response: SqlTableInfo{ + Name: "bar", + CatalogName: "main", + SchemaName: "foo", + TableType: "EXTERNAL", + DataSourceFormat: "DELTA", + StorageLocation: "s3://ext-main/foo/bar1", + StorageCredentialName: "somecred", + Comment: "terraform managed", + Properties: map[string]string{ + "one": "two", + "three": "four", + }, + ColumnInfos: []SqlColumnInfo{ + { + Name: "id", + Type: "bigint", + TypeJson: "{\"type\":\"bigint\",\"nullable\":true, \"metadata\":{\"delta.identity.start\":1,\"delta.identity.allowExplicitInsert\":false}}", + }, + { + Name: "name", + Type: "string", + Comment: "name of thing", + }, + { + Name: "number", + Type: "bigint", + TypeJson: "{\"type\":\"bigint\",\"nullable\":true, \"metadata\":{\"delta.identity.start\":1,\"delta.identity.allowExplicitInsert\":true}}", + }, + }, + }, + }, + }, + ID: "main.foo.bar", + Read: true, + Resource: ResourceSqlTable(), + }.ApplyAndExpectData(t, map[string]any{ + "column.0.identity": "always", + "column.1.identity": "", + "column.2.identity": "default", + }) +} + func TestResourceSqlTableCreateTable_OnlyManagedProperties(t *testing.T) { qa.ResourceFixture{ CommandMock: func(commandStr string) common.CommandResults { diff --git a/catalog/resource_storage_credential.go b/catalog/resource_storage_credential.go index 38c6a92109..17e7896ae3 100644 --- a/catalog/resource_storage_credential.go +++ b/catalog/resource_storage_credential.go @@ -196,6 +196,9 @@ func ResourceStorageCredential() common.Resource { return nil } + if d.HasChange("read_only") { + update.ForceSendFields = append(update.ForceSendFields, "ReadOnly") + } update.Owner = "" _, err := acc.StorageCredentials.Update(ctx, catalog.AccountsUpdateStorageCredential{ CredentialInfo: &update, @@ -240,6 +243,9 @@ func ResourceStorageCredential() common.Resource { return nil } + if d.HasChange("read_only") { + update.ForceSendFields = append(update.ForceSendFields, "ReadOnly") + } update.Owner = "" _, err = w.StorageCredentials.Update(ctx, update) if err != nil { diff --git a/catalog/resource_storage_credential_test.go b/catalog/resource_storage_credential_test.go index e09f8af8e8..7e3c31b35a 100644 --- a/catalog/resource_storage_credential_test.go +++ b/catalog/resource_storage_credential_test.go @@ -432,6 +432,54 @@ func TestUpdateStorageCredentials(t *testing.T) { }.ApplyNoError(t) } +func TestUpdateStorageCredentialsFromReadOnly(t *testing.T) { + qa.ResourceFixture{ + Fixtures: []qa.HTTPFixture{ + { + Method: "PATCH", + Resource: "/api/2.1/unity-catalog/storage-credentials/a", + ExpectedRequest: catalog.UpdateStorageCredential{ + AwsIamRole: &catalog.AwsIamRoleRequest{ + RoleArn: "CHANGED", + }, + Comment: "c", + ReadOnly: false, + ForceSendFields: []string{"ReadOnly"}, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/unity-catalog/storage-credentials/a?", + Response: catalog.StorageCredentialInfo{ + Name: "a", + AwsIamRole: &catalog.AwsIamRoleResponse{ + RoleArn: "CHANGED", + }, + MetastoreId: "d", + Comment: "c", + ReadOnly: false, + }, + }, + }, + Resource: ResourceStorageCredential(), + Update: true, + ID: "a", + InstanceState: map[string]string{ + "name": "a", + "comment": "c", + "read_only": "true", + }, + HCL: ` + name = "a" + aws_iam_role { + role_arn = "CHANGED" + } + comment = "c" + read_only = false + `, + }.ApplyNoError(t) +} + func TestUpdateStorageCredentialsWithOwnerOnly(t *testing.T) { qa.ResourceFixture{ Fixtures: []qa.HTTPFixture{ diff --git a/common/resource.go b/common/resource.go index 77ba894853..9e639eb962 100644 --- a/common/resource.go +++ b/common/resource.go @@ -440,6 +440,16 @@ func genericDatabricksData[T, P, C any]( } } +// WorkspacePathPrefixDiffSuppress suppresses diffs for workspace paths where both sides +// may or may not include the `/Workspace` prefix. +// +// This is the case for dashboards where at create time, the user may include the `/Workspace` +// prefix for the `parent_path` field, but the read response will not include the prefix. +func WorkspacePathPrefixDiffSuppress(k, old, new string, d *schema.ResourceData) bool { + const prefix = "/Workspace" + return strings.TrimPrefix(old, prefix) == strings.TrimPrefix(new, prefix) +} + func EqualFoldDiffSuppress(k, old, new string, d *schema.ResourceData) bool { if strings.EqualFold(old, new) { log.Printf("[INFO] Suppressing diff on %s", k) diff --git a/common/resource_test.go b/common/resource_test.go index 360c10b476..e93885a02c 100644 --- a/common/resource_test.go +++ b/common/resource_test.go @@ -179,6 +179,14 @@ func TestCustomizeDiffRobustness(t *testing.T) { assert.EqualError(t, err, "cannot customize diff for sample: panic: oops") } +func TestWorkspacePathPrefixDiffSuppress(t *testing.T) { + assert.True(t, WorkspacePathPrefixDiffSuppress("k", "/Workspace/foo/bar", "/Workspace/foo/bar", nil)) + assert.True(t, WorkspacePathPrefixDiffSuppress("k", "/Workspace/foo/bar", "/foo/bar", nil)) + assert.True(t, WorkspacePathPrefixDiffSuppress("k", "/foo/bar", "/Workspace/foo/bar", nil)) + assert.True(t, WorkspacePathPrefixDiffSuppress("k", "/foo/bar", "/foo/bar", nil)) + assert.False(t, WorkspacePathPrefixDiffSuppress("k", "/Workspace/1", "/Workspace/2", nil)) +} + func TestEqualFoldDiffSuppress(t *testing.T) { assert.True(t, EqualFoldDiffSuppress("k", "A", "a", nil)) assert.False(t, EqualFoldDiffSuppress("k", "A", "A2", nil)) diff --git a/common/version.go b/common/version.go index 417761fcf5..86d57fba64 100644 --- a/common/version.go +++ b/common/version.go @@ -3,7 +3,7 @@ package common import "context" var ( - version = "1.52.0" + version = "1.53.0" // ResourceName is resource name without databricks_ prefix ResourceName contextKey = 1 // Provider is the current instance of provider diff --git a/dashboards/resource_dashboard.go b/dashboards/resource_dashboard.go index 531a6e5de4..d872b33f49 100644 --- a/dashboards/resource_dashboard.go +++ b/dashboards/resource_dashboard.go @@ -42,7 +42,7 @@ func (Dashboard) CustomizeSchema(s *common.CustomizableSchema) *common.Customiza s.SchemaPath("md5").SetComputed() // ForceNew fields - s.SchemaPath("parent_path").SetForceNew() + s.SchemaPath("parent_path").SetCustomSuppressDiff(common.WorkspacePathPrefixDiffSuppress).SetForceNew() // ConflictsWith fields s.SchemaPath("serialized_dashboard").SetConflictsWith([]string{"file_path"}) diff --git a/docs/data-sources/mlflow_models.md b/docs/data-sources/mlflow_models.md new file mode 100644 index 0000000000..b8b67c9096 --- /dev/null +++ b/docs/data-sources/mlflow_models.md @@ -0,0 +1,42 @@ +--- +subcategory: "MLflow" +--- +# databricks_mlflow_models Data Source + +-> **Note** This data source could be only used with workspace-level provider! + +Retrieves a list of [databricks_mlflow_model](../resources/mlflow_model.md) objects, that were created by Terraform or manually, so that special handling could be applied. + +## Example Usage + +```hcl +data "databricks_mlflow_models" "this" {} + +output "model" { + value = data.databricks_mlflow_models.this +} +``` + +```hcl +data "databricks_mlflow_models" "this" {} + +check "model_list_not_empty" { + assert { + condition = length(data.databricks_mlflow_models.this.names) != 0 + error_message = "Model list is empty." + } +} + +check "model_list_contains_model" { + assert { + condition = contains(data.databricks_mlflow_models.this.names, "model_1") + error_message = "model_1 is missing in model list." + } +} +``` + +## Attribute Reference + +This data source exports the following attributes: + +* `names` - List of names of [databricks_mlflow_model](./mlflow_model.md) \ No newline at end of file diff --git a/docs/guides/aws-e2-firewall-hub-and-spoke.md b/docs/guides/aws-e2-firewall-hub-and-spoke.md index 78c0ff0cb2..9b5f785f67 100644 --- a/docs/guides/aws-e2-firewall-hub-and-spoke.md +++ b/docs/guides/aws-e2-firewall-hub-and-spoke.md @@ -12,7 +12,7 @@ You can provision multiple Databricks workspaces with Terraform, and where many ## Provider initialization for AWS workspaces -This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. If you use AWS Firewall to block most traffic but allow the URLs to which Databricks needs to connect, please update the configuration based on your region. You can get the configuration details for your region from [Firewall Appliance](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#firewall-appliance-infrastructure) document. +This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](../resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. If you use AWS Firewall to block most traffic but allow the URLs to which Databricks needs to connect, please update the configuration based on your region. You can get the configuration details for your region from [Firewall Appliance](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#firewall-appliance-infrastructure) document. ```hcl variable "client_id" {} diff --git a/docs/guides/aws-e2-firewall-workspace.md b/docs/guides/aws-e2-firewall-workspace.md index d7b00334f9..5e7af4c57c 100644 --- a/docs/guides/aws-e2-firewall-workspace.md +++ b/docs/guides/aws-e2-firewall-workspace.md @@ -14,7 +14,7 @@ For more information, please visit [Data Exfiltration Protection With Databricks ## Provider initialization for AWS workspaces -This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. If you are using AWS Firewall to block most traffic but allow the URLs that Databricks needs to connect to, please update the configuration based on your region. You can get the configuration details for your region from [Firewall Appliance](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#firewall-appliance-infrastructure) document. +This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](../resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. If you are using AWS Firewall to block most traffic but allow the URLs that Databricks needs to connect to, please update the configuration based on your region. You can get the configuration details for your region from [Firewall Appliance](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html#firewall-appliance-infrastructure) document. ```hcl variable "client_id" {} diff --git a/docs/guides/aws-workspace.md b/docs/guides/aws-workspace.md index 596076f66c..47b9e15e24 100644 --- a/docs/guides/aws-workspace.md +++ b/docs/guides/aws-workspace.md @@ -12,7 +12,7 @@ You can provision multiple Databricks workspaces with Terraform. ## Provider initialization for AWS workspaces -This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. +This guide assumes you have the `client_id`, which is the `application_id` of the [Service Principal](../resources/service_principal.md), `client_secret`, which is its secret, and `databricks_account_id`, which can be found in the top right corner of the [Account Console](https://accounts.cloud.databricks.com). (see [instruction](https://docs.databricks.com/dev-tools/authentication-oauth.html#step-2-create-an-oauth-secret-for-a-service-principal)). This guide is provided as is and assumes you will use it as the basis for your setup. ```hcl variable "client_id" {} diff --git a/docs/resources/access_control_rule_set.md b/docs/resources/access_control_rule_set.md index 3a7767c570..1bd2ee1e50 100644 --- a/docs/resources/access_control_rule_set.md +++ b/docs/resources/access_control_rule_set.md @@ -4,13 +4,13 @@ subcategory: "Security" # databricks_access_control_rule_set Resource --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. This resource allows you to manage access rules on Databricks account level resources. For convenience we allow accessing this resource through the Databricks account and workspace. --> **Note** Currently, we only support managing access rules on service principal, group and account resources through `databricks_access_control_rule_set`. +-> Currently, we only support managing access rules on service principal, group and account resources through `databricks_access_control_rule_set`. --> **Warning** `databricks_access_control_rule_set` cannot be used to manage access rules for resources supported by [databricks_permissions](permissions.md). Refer to its documentation for more information. +!> `databricks_access_control_rule_set` cannot be used to manage access rules for resources supported by [databricks_permissions](permissions.md). Refer to its documentation for more information. ## Service principal rule set usage diff --git a/docs/resources/artifact_allowlist.md b/docs/resources/artifact_allowlist.md index a65fe6dd57..d6272b9322 100644 --- a/docs/resources/artifact_allowlist.md +++ b/docs/resources/artifact_allowlist.md @@ -3,10 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_artifact_allowlist Resource --> **Note** - It is required to define all allowlist for an artifact type in a single resource, otherwise Terraform cannot guarantee config drift prevention. +~> It is required to define all allowlist for an artifact type in a single resource, otherwise Terraform cannot guarantee config drift prevention. --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the allowlist in UC so that users can leverage these artifacts on compute configured with shared access mode. diff --git a/docs/resources/automatic_cluster_update_setting.md b/docs/resources/automatic_cluster_update_setting.md index 152a95b9ea..b8f4e719aa 100644 --- a/docs/resources/automatic_cluster_update_setting.md +++ b/docs/resources/automatic_cluster_update_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_automatic_cluster_update_workspace_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! The `databricks_automatic_cluster_update_workspace_setting` resource allows you to control whether automatic cluster update is enabled for the current workspace. By default, it is turned off. Enabling this feature on a workspace requires that you add the Enhanced Security and Compliance add-on. diff --git a/docs/resources/budget.md b/docs/resources/budget.md new file mode 100644 index 0000000000..31378d4254 --- /dev/null +++ b/docs/resources/budget.md @@ -0,0 +1,103 @@ +--- +subcategory: "FinOps" +--- +# databricks_budget Resource + +-> Initialize provider with `alias = "account"`, and `host` pointing to the account URL, like, `host = "https://accounts.cloud.databricks.com"`. Use `provider = databricks.account` for all account-level resources. + +-> This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). + +This resource allows you to manage [Databricks Budgets](https://docs.databricks.com/en/admin/account-settings/budgets.html). + +## Example Usage + +```hcl +resource "databricks_budget" "this" { + display_name = "databricks-workspace-budget" + + alert_configurations { + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "840" + + action_configurations { + action_type = "EMAIL_NOTIFICATION" + target = "abc@gmail.com" + } + } + + filter { + workspace_id { + operator = "IN" + values = [ + 1234567890098765 + ] + } + + tags { + key = "Team" + value { + operator = "IN" + values = ["Data Science"] + } + } + + tags { + key = "Environment" + value { + operator = "IN" + values = ["Development"] + } + } + } +} +``` + +## Argument Reference + +The following arguments are available: + +* `display_name` - (Required) Name of the budget in Databricks Account. + +### alert_configurations Configuration Block (Required) + +* `time_period` - (Required, String Enum) The time window of usage data for the budget. (Enum: `MONTH`) +* `trigger_type` - (Required, String Enum) The evaluation method to determine when this budget alert is in a triggered state. (Enum: `CUMULATIVE_SPENDING_EXCEEDED`) +* `quantity_type` - (Required, String Enum) The way to calculate cost for this budget alert. This is what quantity_threshold is measured in. (Enum: `LIST_PRICE_DOLLARS_USD`) +* `quantity_threshold` - (Required, String) The threshold for the budget alert to determine if it is in a triggered state. The number is evaluated based on `quantity_type`. +* `action_configurations` - (Required) List of action configurations to take when the budget alert is triggered. Consists of the following fields: + * `action_type` - (Required, String Enum) The type of action to take when the budget alert is triggered. (Enum: `EMAIL_NOTIFICATION`) + * `target` - (Required, String) The target of the action. For `EMAIL_NOTIFICATION`, this is the email address to send the notification to. + +### filter Configuration Block (Optional) + +* `workspace_id` - (Optional) Filter by workspace ID (if empty, include usage all usage for this account). Consists of the following fields: + * `operator` - (Required, String Enum) The operator to use for the filter. (Enum: `IN`) + * `values` - (Required, List of numbers) The values to filter by. +* `tags` - (Optional) List of tags to filter by. Consists of the following fields: + * `key` - (Required, String) The key of the tag. + * `value` - (Required) Consists of the following fields: + * `operator` - (Required, String Enum) The operator to use for the filter. (Enum: `IN`) + * `values` - (Required, List of strings) The values to filter by. + +## Attribute Reference + +In addition to all arguments above, the following attributes are exported: + +* `budget_configuration_id` - The ID of the budget configuration. +* `account_id` - The ID of the Databricks Account. + +## Import + +This resource can be imported by Databricks account ID and Budget. + +```sh +terraform import databricks_budget.this '|' +``` + +## Related Resources + +The following resources are used in the context: + +* [databricks_mws_workspaces](mws_workspaces.md) to set up Databricks workspaces. diff --git a/docs/resources/catalog.md b/docs/resources/catalog.md index 980c6c837d..fb854f4786 100644 --- a/docs/resources/catalog.md +++ b/docs/resources/catalog.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_catalog Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. diff --git a/docs/resources/catalog_workspace_binding.md b/docs/resources/catalog_workspace_binding.md index 5520192fb5..0eafd00a2c 100644 --- a/docs/resources/catalog_workspace_binding.md +++ b/docs/resources/catalog_workspace_binding.md @@ -3,17 +3,15 @@ subcategory: "Unity Catalog" --- # databricks_catalog_workspace_binding Resource --> **NOTE**This resource has been deprecated and will be removed soon. Please use the [databricks_workspace_binding resource](./workspace_binding.md) instead. +~> This resource has been deprecated and will be removed soon. Please use the [databricks_workspace_binding resource](./workspace_binding.md) instead. If you use workspaces to isolate user data access, you may want to limit catalog access to specific workspaces in your account, also known as workspace-catalog binding By default, Databricks assigns the catalog to all workspaces attached to the current metastore. By using `databricks_catalog_workspace_binding`, the catalog will be unassigned from all workspaces and only assigned explicitly using this resource. --> **Note** - To use this resource the catalog must have its isolation mode set to `ISOLATED` in the [`databricks_catalog`](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/catalog#isolation_mode) resource. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration). +-> To use this resource the catalog must have its isolation mode set to `ISOLATED` in the [`databricks_catalog`](https://registry.terraform.io/providers/databricks/databricks/latest/docs/resources/catalog#isolation_mode) resource. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration). --> **Note** - If the catalog's isolation mode was set to `ISOLATED` using Terraform then the catalog will have been automatically bound to the workspace it was created from. +-> If the catalog's isolation mode was set to `ISOLATED` using Terraform then the catalog will have been automatically bound to the workspace it was created from. ## Example Usage diff --git a/docs/resources/cluster.md b/docs/resources/cluster.md index c68cc8aa81..a856812192 100644 --- a/docs/resources/cluster.md +++ b/docs/resources/cluster.md @@ -5,7 +5,7 @@ subcategory: "Compute" This resource allows you to manage [Databricks Clusters](https://docs.databricks.com/clusters/index.html). --> **Note** In case of [`Cannot access cluster ####-######-####### that was terminated or unpinned more than 30 days ago`](https://github.com/databricks/terraform-provider-databricks/issues/1197#issuecomment-1069386670) errors, please upgrade to v0.5.5 or later. If for some reason you cannot upgrade the version of provider, then the other viable option to unblock the apply pipeline is [`terraform state rm path.to.databricks_cluster.resource`](https://www.terraform.io/cli/commands/state/rm) command. +-> In case of [`Cannot access cluster ####-######-####### that was terminated or unpinned more than 30 days ago`](https://github.com/databricks/terraform-provider-databricks/issues/1197#issuecomment-1069386670) errors, please upgrade to v0.5.5 or later. If for some reason you cannot upgrade the version of provider, then the other viable option to unblock the apply pipeline is [`terraform state rm path.to.databricks_cluster.resource`](https://www.terraform.io/cli/commands/state/rm) command. ```hcl data "databricks_node_type" "smallest" { @@ -130,7 +130,7 @@ resource "databricks_cluster" "single_node" { ### (Legacy) High-Concurrency clusters --> **Note** This is a legacy cluster type, not related to the real serverless compute. See [Clusters UI changes and cluster access modes](https://docs.databricks.com/archive/compute/cluster-ui-preview.html#legacy) for information on what access mode to use when creating new clusters. +~> This is a legacy cluster type, not related to the real serverless compute. See [Clusters UI changes and cluster access modes](https://docs.databricks.com/archive/compute/cluster-ui-preview.html#legacy) for information on what access mode to use when creating new clusters. To create High-Concurrency cluster, following settings should be provided: @@ -163,7 +163,7 @@ resource "databricks_cluster" "cluster_with_table_access_control" { To install libraries, one must specify each library in a separate configuration block. Each different type of library has a slightly different syntax. It's possible to set only one type of library within one config block. Otherwise, the plan will fail with an error. --> **Note** Please consider using [databricks_library](library.md) resource for a more flexible setup. +-> Please consider using [databricks_library](library.md) resource for a more flexible setup. Installing JAR artifacts on a cluster. Location can be anything, that is DBFS or mounted object store (s3, adls, ...) @@ -484,7 +484,7 @@ resource "databricks_cluster" "this" { ### cluster_mount_info blocks (experimental) --> **Note** The underlying API is experimental and may change in the future. +~> The underlying API is experimental and may change in the future. It's possible to mount NFS (Network File System) resources into the Spark containers inside the cluster. You can specify one or more `cluster_mount_info` blocks describing the mount. This block has following attributes: diff --git a/docs/resources/compliance_security_profile_setting.md b/docs/resources/compliance_security_profile_setting.md index 6bb5afc090..acf7f6ef7e 100644 --- a/docs/resources/compliance_security_profile_setting.md +++ b/docs/resources/compliance_security_profile_setting.md @@ -4,9 +4,9 @@ subcategory: "Settings" # databricks_compliance_security_profile_workspace_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! --> **Note** This setting can NOT be disabled once it is enabled. +~> This setting can NOT be disabled once it is enabled. The `databricks_compliance_security_profile_workspace_setting` resource allows you to control whether to enable the compliance security profile for the current workspace. Enabling it on a workspace is permanent. By default, it is diff --git a/docs/resources/connection.md b/docs/resources/connection.md index f7421bd5da..c568c72847 100644 --- a/docs/resources/connection.md +++ b/docs/resources/connection.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_connection (Resource) --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Lakehouse Federation is the query federation platform for Databricks. Databricks uses Unity Catalog to manage query federation. To make a dataset available for read-only querying using Lakehouse Federation, you create the following: diff --git a/docs/resources/dbfs_file.md b/docs/resources/dbfs_file.md index e3b9c73eea..78607c733f 100644 --- a/docs/resources/dbfs_file.md +++ b/docs/resources/dbfs_file.md @@ -49,7 +49,7 @@ resource "databricks_library" "app" { ## Argument Reference --> **Note** DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. +-> DBFS files would only be changed, if Terraform stage did change. This means that any manual changes to managed file won't be overwritten by Terraform, if there's no local change. The following arguments are supported: diff --git a/docs/resources/default_namespace_setting.md b/docs/resources/default_namespace_setting.md index cf3c5ee36a..f21698fcb6 100644 --- a/docs/resources/default_namespace_setting.md +++ b/docs/resources/default_namespace_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_default_namespace_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! The `databricks_default_namespace_setting` resource allows you to operate the setting configuration for the default namespace in the Databricks workspace. Setting the default catalog for the workspace determines the catalog that is used when queries do not reference diff --git a/docs/resources/enhanced_security_monitoring_setting.md b/docs/resources/enhanced_security_monitoring_setting.md index 64a18c9fcb..18e0de3e1a 100644 --- a/docs/resources/enhanced_security_monitoring_setting.md +++ b/docs/resources/enhanced_security_monitoring_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_enhanced_security_monitoring_workspace_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! The `databricks_enhanced_security_monitoring_workspace_setting` resource allows you to control whether enhanced security monitoring is enabled for the current workspace. If the compliance security profile is enabled, this is automatically enabled. By default, diff --git a/docs/resources/entitlements.md b/docs/resources/entitlements.md index 15d7f42f6f..10852c4bb9 100644 --- a/docs/resources/entitlements.md +++ b/docs/resources/entitlements.md @@ -5,7 +5,7 @@ subcategory: "Security" This resource allows you to set entitlements to existing [databricks_users](user.md), [databricks_group](group.md) or [databricks_service_principal](service_principal.md). --> **Note** You must define entitlements of a principal using either `databricks_entitlements` or directly within one of [databricks_users](user.md), [databricks_group](group.md) or [databricks_service_principal](service_principal.md). Having entitlements defined in both resources will result in non-deterministic behaviour. +-> You must define entitlements of a principal using either `databricks_entitlements` or directly within one of [databricks_users](user.md), [databricks_group](group.md) or [databricks_service_principal](service_principal.md). Having entitlements defined in both resources will result in non-deterministic behaviour. ## Example Usage diff --git a/docs/resources/external_location.md b/docs/resources/external_location.md index 59cc555685..2495510bb0 100644 --- a/docs/resources/external_location.md +++ b/docs/resources/external_location.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_external_location Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: diff --git a/docs/resources/global_init_script.md b/docs/resources/global_init_script.md index bb8e50d98b..90d5d42b36 100644 --- a/docs/resources/global_init_script.md +++ b/docs/resources/global_init_script.md @@ -31,7 +31,7 @@ resource "databricks_global_init_script" "init2" { ## Argument Reference --> **Note** Global init script in the Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed global init script won't be overwritten by Terraform, if there's no local change to source. +-> Global init script in the Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed global init script won't be overwritten by Terraform, if there's no local change to source. The size of a global init script source code must not exceed 64Kb. The following arguments are supported: diff --git a/docs/resources/grant.md b/docs/resources/grant.md index 2adfdb7d67..5f2f8b4326 100644 --- a/docs/resources/grant.md +++ b/docs/resources/grant.md @@ -3,11 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_grant Resource --> **Note** - This article refers to the privileges and inheritance model in Privilege Model version 1.0. If you created your metastore during the public preview (before August 25, 2022), you can upgrade to Privilege Model version 1.0 following [Upgrade to privilege inheritance](https://docs.databricks.com/data-governance/unity-catalog/hive-metastore.html) +-> This article refers to the privileges and inheritance model in Privilege Model version 1.0. If you created your metastore during the public preview (before August 25, 2022), you can upgrade to Privilege Model version 1.0 following [Upgrade to privilege inheritance](https://docs.databricks.com/data-governance/unity-catalog/hive-metastore.html) --> **Note** - Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. Account-level principal grants can be assigned with any valid workspace as the Unity Catalog is decoupled from specific workspaces. More information in [the official documentation](https://docs.databricks.com/data-governance/unity-catalog/index.html). +-> Most of Unity Catalog APIs are only accessible via **workspace-level APIs**. This design may change in the future. Account-level principal grants can be assigned with any valid workspace as the Unity Catalog is decoupled from specific workspaces. More information in [the official documentation](https://docs.databricks.com/data-governance/unity-catalog/index.html). In Unity Catalog all users initially have no access to data. Only Metastore Admins can create objects and can grant/revoke access on individual objects to users and groups. Every securable object in Unity Catalog has an owner. The owner can be any account-level user or group, called principals in general. The principal that creates an object becomes its owner. Owners receive `ALL_PRIVILEGES` on the securable object (e.g., `SELECT` and `MODIFY` on a table), as well as the permission to grant privileges to other principals. diff --git a/docs/resources/grants.md b/docs/resources/grants.md index 25f22c91af..64a52d83df 100644 --- a/docs/resources/grants.md +++ b/docs/resources/grants.md @@ -3,11 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_grants Resource --> **Note** - This article refers to the privileges and inheritance model in Privilege Model version 1.0. If you created your metastore during the public preview (before August 25, 2022), you can upgrade to Privilege Model version 1.0 following [Upgrade to privilege inheritance](https://docs.databricks.com/data-governance/unity-catalog/hive-metastore.html) +-> This article refers to the privileges and inheritance model in Privilege Model version 1.0. If you created your metastore during the public preview (before August 25, 2022), you can upgrade to Privilege Model version 1.0 following [Upgrade to privilege inheritance](https://docs.databricks.com/data-governance/unity-catalog/hive-metastore.html) --> **Note** - Unity Catalog APIs are accessible via **workspace-level APIs**. This design may change in the future. Account-level principal grants can be assigned with any valid workspace as the Unity Catalog is decoupled from specific workspaces. More information in [the official documentation](https://docs.databricks.com/data-governance/unity-catalog/index.html). +-> Most of Unity Catalog APIs are only accessible via **workspace-level APIs**. This design may change in the future. Account-level principal grants can be assigned with any valid workspace as the Unity Catalog is decoupled from specific workspaces. More information in [the official documentation](https://docs.databricks.com/data-governance/unity-catalog/index.html). Two different resources help you manage your Unity Catalog grants for a securable. Each of these resources serves a different use case: diff --git a/docs/resources/group.md b/docs/resources/group.md index 5b23e83c66..aa6321e50f 100644 --- a/docs/resources/group.md +++ b/docs/resources/group.md @@ -5,9 +5,9 @@ subcategory: "Security" This resource allows you to manage both [account groups and workspace-local groups](https://docs.databricks.com/administration-guide/users-groups/groups.html). You can use the [databricks_group_member resource](group_member.md) to assign Databricks users, [service principals](service_principal.md) as well as other groups as members of the group. This is useful if you are using an application to sync users & groups with SCIM API. --> **Note** To assign an account level group to a workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). +-> To assign an account level group to a workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). --> **Note** Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level groups. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level groups. +-> Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level groups. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level groups. To create account groups in the Databricks account, the provider must be configured accordingly. On AWS deployment with `host = "https://accounts.cloud.databricks.com"` and `account_id = "00000000-0000-0000-0000-000000000000"`. On Azure deployments `host = "https://accounts.azuredatabricks.net"`, `account_id = "00000000-0000-0000-0000-000000000000"` and using [AAD tokens](https://registry.terraform.io/providers/databricks/databricks/latest/docs#special-configurations-for-azure) as authentication. diff --git a/docs/resources/group_instance_profile.md b/docs/resources/group_instance_profile.md index 9da28aeda1..01f9bfae97 100644 --- a/docs/resources/group_instance_profile.md +++ b/docs/resources/group_instance_profile.md @@ -39,7 +39,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/group_role.md b/docs/resources/group_role.md index 395df9f264..147d214ffa 100644 --- a/docs/resources/group_role.md +++ b/docs/resources/group_role.md @@ -59,7 +59,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/instance_pool.md b/docs/resources/instance_pool.md index 9663576c63..2c7e052cf5 100644 --- a/docs/resources/instance_pool.md +++ b/docs/resources/instance_pool.md @@ -5,7 +5,7 @@ subcategory: "Compute" This resource allows you to manage [instance pools](https://docs.databricks.com/clusters/instance-pools/index.html) to reduce [cluster](cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use instances. An instance pool reduces [cluster](cluster.md) start and auto-scaling times by maintaining a set of idle, ready-to-use cloud instances. When a [cluster](cluster.md) attached to a pool needs an instance, it first attempts to allocate one of the pool’s idle instances. If the pool has no idle instances, it expands by allocating a new instance from the instance provider in order to accommodate the cluster’s request. When a [cluster](cluster.md) releases an instance, it returns to the pool and is free for another [cluster](cluster.md) to use. Only clusters attached to a pool can use that pool’s idle instances. --> **Note** It is important to know that different cloud service providers have different `node_type_id`, `disk_specs` and potentially other configurations. +-> It is important to know that different cloud service providers have different `node_type_id`, `disk_specs` and potentially other configurations. ## Example Usage diff --git a/docs/resources/instance_profile.md b/docs/resources/instance_profile.md index 23ef84f205..b8461dd941 100644 --- a/docs/resources/instance_profile.md +++ b/docs/resources/instance_profile.md @@ -5,7 +5,7 @@ subcategory: "Deployment" This resource allows you to manage AWS EC2 instance profiles that users can launch [databricks_cluster](cluster.md) and access data, like [databricks_mount](mount.md). The following example demonstrates how to create an instance profile and create a cluster with it. When creating a new `databricks_instance_profile`, Databricks validates that it has sufficient permissions to launch instances with the instance profile. This validation uses AWS dry-run mode for the [AWS EC2 RunInstances API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_RunInstances.html). --> **Note** Please switch to [databricks_storage_credential](storage_credential.md) with Unity Catalog to manage storage credentials, which provides a better and faster way for managing credential security. +-> Please switch to [databricks_storage_credential](storage_credential.md) with Unity Catalog to manage storage credentials, which provides a better and faster way for managing credential security. ```hcl variable "crossaccount_role_name" { diff --git a/docs/resources/ip_access_list.md b/docs/resources/ip_access_list.md index 44b8c20f46..107ea35144 100644 --- a/docs/resources/ip_access_list.md +++ b/docs/resources/ip_access_list.md @@ -5,7 +5,7 @@ subcategory: "Security" Security-conscious enterprises that use cloud SaaS applications need to restrict access to their own employees. Authentication helps to prove user identity, but that does not enforce network location of the users. Accessing a cloud service from an unsecured network can pose security risks to an enterprise, especially when the user may have authorized access to sensitive or personal data. Enterprise network perimeters apply security policies and limit access to external services (for example, firewalls, proxies, DLP, and logging), so access beyond these controls are assumed to be untrusted. Please see [IP Access List](https://docs.databricks.com/security/network/ip-access-list.html) for full feature documentation. --> **Note** The total number of IP addresses and CIDR scopes provided across all ACL Lists in a workspace can not exceed 1000. Refer to the docs above for specifics. +-> The total number of IP addresses and CIDR scopes provided across all ACL Lists in a workspace can not exceed 1000. Refer to the docs above for specifics. ## Example Usage diff --git a/docs/resources/job.md b/docs/resources/job.md index e8e3c9cdc2..dc8eebc587 100644 --- a/docs/resources/job.md +++ b/docs/resources/job.md @@ -8,7 +8,7 @@ The `databricks_job` resource allows you to manage [Databricks Jobs](https://doc ## Example Usage --> **Note** In Terraform configuration, it is recommended to define tasks in alphabetical order of their `task_key` arguments, so that you get consistent and readable diff. Whenever tasks are added or removed, or `task_key` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `task` blocks as an ordered list. Alternatively, `task` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task. +-> In Terraform configuration, it is recommended to define tasks in alphabetical order of their `task_key` arguments, so that you get consistent and readable diff. Whenever tasks are added or removed, or `task_key` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `task` blocks as an ordered list. Alternatively, `task` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task. It is possible to create [a Databricks job](https://docs.databricks.com/data-engineering/jobs/jobs-user-guide.html) using `task` blocks. A single task is defined with the `task` block containing one of the `*_task` blocks, `task_key`, and additional arguments described below. @@ -142,7 +142,7 @@ This block describes individual tasks: * `timeout_seconds` - (Optional) (Integer) An optional timeout applied to each run of this job. The default behavior is to have no timeout. * `webhook_notifications` - (Optional) (List) An optional set of system destinations (for example, webhook destinations or Slack) to be notified when runs of this task begins, completes or fails. The default behavior is to not send any notifications. This field is a block and is documented below. --> **Note** If no `job_cluster_key`, `existing_cluster_id`, or `new_cluster` were specified in task definition, then task will executed using serverless compute. +-> If no `job_cluster_key`, `existing_cluster_id`, or `new_cluster` were specified in task definition, then task will executed using serverless compute. #### condition_task Configuration Block @@ -186,7 +186,7 @@ You also need to include a `git_source` block to configure the repository that c * `pipeline_id` - (Required) The pipeline's unique ID. * `full_refresh` - (Optional) (Bool) Specifies if there should be full refresh of the pipeline. --> **Note** The following configuration blocks are only supported inside a `task` block +-> The following configuration blocks are only supported inside a `task` block #### python_wheel_task Configuration Block @@ -318,7 +318,7 @@ This block describes upstream dependencies of a given task. For multiple upstrea * `task_key` - (Required) The name of the task this task depends on. * `outcome` - (Optional, string) Can only be specified on condition task dependencies. The outcome of the dependent task that must be met for this task to run. Possible values are `"true"` or `"false"`. --> **Note** Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Terraform diffs. +-> Similar to the tasks themselves, each dependency inside the task need to be declared in alphabetical order with respect to task_key in order to get consistent Terraform diffs. ### run_as Configuration Block @@ -434,7 +434,7 @@ webhook_notifications { * `id` - ID of the system notification that is notified when an event defined in `webhook_notifications` is triggered. --> **Note** The following configuration blocks can be standalone or nested inside a `task` block +-> The following configuration blocks can be standalone or nested inside a `task` block ### notification_settings Configuration Block diff --git a/docs/resources/library.md b/docs/resources/library.md index 5eab7306fb..c693bfed8d 100644 --- a/docs/resources/library.md +++ b/docs/resources/library.md @@ -5,7 +5,7 @@ subcategory: "Compute" Installs a [library](https://docs.databricks.com/libraries/index.html) on [databricks_cluster](cluster.md). Each different type of library has a slightly different syntax. It's possible to set only one type of library within one resource. Otherwise, the plan will fail with an error. --> **Note** `databricks_library` resource would always start the associated cluster if it's not running, so make sure to have auto-termination configured. It's not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart. +-> `databricks_library` resource would always start the associated cluster if it's not running, so make sure to have auto-termination configured. It's not possible to atomically change the version of the same library without cluster restart. Libraries are fully removed from the cluster only after restart. ## Installing library on all clusters @@ -71,7 +71,7 @@ resource "databricks_library" "app" { Installing Python PyPI artifacts. You can optionally also specify the `repo` parameter for a custom PyPI mirror, which should be accessible without any authentication for the network that cluster runs in. --> **Note** `repo` host should be accessible from the Internet by Databricks control plane. If connectivity to custom PyPI repositories is required, please modify cluster-node `/etc/pip.conf` through [databricks_global_init_script](global_init_script.md). +-> `repo` host should be accessible from the Internet by Databricks control plane. If connectivity to custom PyPI repositories is required, please modify cluster-node `/etc/pip.conf` through [databricks_global_init_script](global_init_script.md). ```hcl resource "databricks_library" "fbprophet" { @@ -126,7 +126,7 @@ resource "databricks_library" "rkeops" { ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/metastore.md b/docs/resources/metastore.md index 6cd5a8417e..8193f68600 100644 --- a/docs/resources/metastore.md +++ b/docs/resources/metastore.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore Resource --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. A metastore is the top-level container of objects in Unity Catalog. It stores data assets (tables and views) and the permissions that govern access to them. Databricks account admins can create metastores and assign them to Databricks workspaces in order to control which workloads use each metastore. diff --git a/docs/resources/metastore_assignment.md b/docs/resources/metastore_assignment.md index 6a336a312b..11a94307cd 100644 --- a/docs/resources/metastore_assignment.md +++ b/docs/resources/metastore_assignment.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore_assignment (Resource) --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. A single [databricks_metastore](metastore.md) can be shared across Databricks workspaces, and each linked workspace has a consistent view of the data and a single set of access policies. You can only create a single metastore for each region in which your organization operates. diff --git a/docs/resources/metastore_data_access.md b/docs/resources/metastore_data_access.md index 290eb061cb..04823fe417 100644 --- a/docs/resources/metastore_data_access.md +++ b/docs/resources/metastore_data_access.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_metastore_data_access (Resource) --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. Optionally, each [databricks_metastore](metastore.md) can have a default [databricks_storage_credential](storage_credential.md) defined as `databricks_metastore_data_access`. This will be used by Unity Catalog to access data in the root storage location if defined. diff --git a/docs/resources/mlflow_model.md b/docs/resources/mlflow_model.md index 60c310d295..208866d8d7 100644 --- a/docs/resources/mlflow_model.md +++ b/docs/resources/mlflow_model.md @@ -5,7 +5,7 @@ subcategory: "MLflow" This resource allows you to create [MLflow models](https://docs.databricks.com/applications/mlflow/models.html) in Databricks. -**Note** This documentation covers the Workspace Model Registry. Databricks recommends using [Models in Unity Catalog](registered_model.md). Models in Unity Catalog provides centralized model governance, cross-workspace access, lineage, and deployment. +-> This documentation covers the Workspace Model Registry. Databricks recommends using [Models in Unity Catalog](registered_model.md). Models in Unity Catalog provides centralized model governance, cross-workspace access, lineage, and deployment. ## Example Usage diff --git a/docs/resources/mlflow_webhook.md b/docs/resources/mlflow_webhook.md index 96f62e20ff..fd280cf9b4 100644 --- a/docs/resources/mlflow_webhook.md +++ b/docs/resources/mlflow_webhook.md @@ -112,7 +112,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/model_serving.md b/docs/resources/model_serving.md index 0cac9cb8f5..0bb116bfa9 100644 --- a/docs/resources/model_serving.md +++ b/docs/resources/model_serving.md @@ -5,7 +5,7 @@ subcategory: "Serving" This resource allows you to manage [Model Serving](https://docs.databricks.com/machine-learning/model-serving/index.html) endpoints in Databricks. -**Note** If you replace `served_models` with `served_entities` in an existing serving endpoint, the serving endpoint will briefly go into an update state (~30 seconds) and increment the config version. +-> If you replace `served_models` with `served_entities` in an existing serving endpoint, the serving endpoint will briefly go into an update state (~30 seconds) and increment the config version. ## Example Usage diff --git a/docs/resources/mount.md b/docs/resources/mount.md index a0446b03ac..ff187b2daa 100644 --- a/docs/resources/mount.md +++ b/docs/resources/mount.md @@ -5,9 +5,9 @@ subcategory: "Storage" This resource will [mount your cloud storage](https://docs.databricks.com/data/databricks-file-system.html#mount-object-storage-to-dbfs) on `dbfs:/mnt/name`. Right now it supports mounting AWS S3, Azure (Blob Storage, ADLS Gen1 & Gen2), Google Cloud Storage. It is important to understand that this will start up the [cluster](cluster.md) if the cluster is terminated. The read and refresh terraform command will require a cluster and may take some time to validate the mount. -**Note** When `cluster_id` is not specified, it will create the smallest possible cluster in the default availability zone with name equal to or starting with `terraform-mount` for the shortest possible amount of time. To avoid mount failure due to potentially quota or capacity issues with the default cluster, we recommend specifying a cluster to use for mounting. +-> When `cluster_id` is not specified, it will create the smallest possible cluster in the default availability zone with name equal to or starting with `terraform-mount` for the shortest possible amount of time. To avoid mount failure due to potentially quota or capacity issues with the default cluster, we recommend specifying a cluster to use for mounting. -**Note** CRUD operations on a databricks mount require a running cluster. Due to limitations of terraform and the databricks mounts APIs, if the cluster the mount was most recently created / updated using no longer exists AND the mount is destroyed as a part of a terraform apply, we mark it as deleted without cleaning it up from the workspace. +-> CRUD operations on a databricks mount require a running cluster. Due to limitations of terraform and the databricks mounts APIs, if the cluster the mount was most recently created / updated using no longer exists AND the mount is destroyed as a part of a terraform apply, we mark it as deleted without cleaning it up from the workspace. This resource provides two ways of mounting a storage account: @@ -62,9 +62,9 @@ resource "databricks_mount" "this" { ### Example mounting ADLS Gen2 with AAD passthrough --> **Note** AAD passthrough is considered a legacy data access pattern. Use Unity Catalog for fine-grained data access control. +-> AAD passthrough is considered a legacy data access pattern. Use Unity Catalog for fine-grained data access control. --> **Note** Mounts using AAD passthrough cannot be created using a service principal. +-> Mounts using AAD passthrough cannot be created using a service principal. To mount ALDS Gen2 with Azure Active Directory Credentials passthrough we need to execute the mount commands using the cluster configured with AAD Credentials passthrough & provide necessary configuration parameters (see [documentation](https://docs.microsoft.com/en-us/azure/databricks/security/credential-passthrough/adls-passthrough#--mount-azure-data-lake-storage-to-dbfs-using-credential-passthrough) for more details). @@ -341,7 +341,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_credentials.md b/docs/resources/mws_credentials.md index 7271ee6a68..0a5c69daae 100644 --- a/docs/resources/mws_credentials.md +++ b/docs/resources/mws_credentials.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_credentials Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` This resource to configure the cross-account role for creation of new workspaces within AWS. diff --git a/docs/resources/mws_customer_managed_keys.md b/docs/resources/mws_customer_managed_keys.md index 3d46c6707c..206158766d 100644 --- a/docs/resources/mws_customer_managed_keys.md +++ b/docs/resources/mws_customer_managed_keys.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_customer_managed_keys Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` This resource to configure KMS keys for new workspaces within AWS or GCP. This is to support the following features: @@ -14,7 +14,7 @@ Please follow this [complete runnable example](../guides/aws-workspace.md) with ## Example Usage --> **Note** If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour. +-> If you've used the resource before, please add `use_cases = ["MANAGED_SERVICES"]` to keep the previous behaviour. ### Customer-managed key for managed services @@ -251,7 +251,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_log_delivery.md b/docs/resources/mws_log_delivery.md index f22b1abbf6..98e25273ff 100644 --- a/docs/resources/mws_log_delivery.md +++ b/docs/resources/mws_log_delivery.md @@ -3,7 +3,7 @@ subcategory: "Log Delivery" --- # databricks_mws_log_delivery Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` This resource configures the delivery of the two supported log types from Databricks workspaces: [billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) and [audit logs](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). @@ -150,7 +150,7 @@ Resource exports the following attributes: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_ncc_binding.md b/docs/resources/mws_ncc_binding.md index af64e6a94f..6615294941 100644 --- a/docs/resources/mws_ncc_binding.md +++ b/docs/resources/mws_ncc_binding.md @@ -3,9 +3,9 @@ subcategory: "Deployment" --- # databricks_mws_ncc_binding Resource --> **Note** Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. +-> Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. --> **Public Preview** This feature is available for AWS & Azure only, and is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html) in AWS. +-> This feature is available for AWS & Azure only, and is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html) in AWS. Allows you to attach a [Network Connectivity Config](mws_network_connectivity_config) object to a [databricks_mws_workspaces](mws_workspaces.md) resource to create a [Databricks Workspace that leverages serverless network connectivity configs](https://learn.microsoft.com/en-us/azure/databricks/sql/admin/serverless-firewall). diff --git a/docs/resources/mws_ncc_private_endpoint_rule.md b/docs/resources/mws_ncc_private_endpoint_rule.md index 0180f1e587..50fba93908 100644 --- a/docs/resources/mws_ncc_private_endpoint_rule.md +++ b/docs/resources/mws_ncc_private_endpoint_rule.md @@ -3,9 +3,9 @@ subcategory: "Deployment" --- # databricks_mws_ncc_private_endpoint_rule Resource --> **Note** Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. +-> Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. --> **Note** This feature is only available in Azure. +-> This feature is only available in Azure. Allows you to create a private endpoint in a [Network Connectivity Config](mws_network_connectivity_config.md) that can be used to [configure private connectivity from serverless compute](https://learn.microsoft.com/en-us/azure/databricks/security/network/serverless-network-security/serverless-private-link). diff --git a/docs/resources/mws_network_connectivity_config.md b/docs/resources/mws_network_connectivity_config.md index 401d8a98db..bf16a35caf 100644 --- a/docs/resources/mws_network_connectivity_config.md +++ b/docs/resources/mws_network_connectivity_config.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_network_connectivity_config Resource --> **Note** Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. +-> Initialize provider with `alias = "account"`, `host = "https://accounts.azuredatabricks.net"` and use `provider = databricks.account` for all `databricks_mws_*` resources. -> **Public Preview** This feature is available for AWS & Azure only, and is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html) in AWS. diff --git a/docs/resources/mws_networks.md b/docs/resources/mws_networks.md index cc26d438c9..1c7f41ee01 100644 --- a/docs/resources/mws_networks.md +++ b/docs/resources/mws_networks.md @@ -5,7 +5,7 @@ subcategory: "Deployment" ## Databricks on AWS usage --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` Use this resource to [configure VPC](https://docs.databricks.com/administration-guide/cloud-configurations/aws/customer-managed-vpc.html) & subnets for new workspaces within AWS. It is essential to understand that this will require you to configure your provider separately for the multiple workspaces resources. @@ -16,13 +16,13 @@ Use this resource to [configure VPC](https://docs.databricks.com/administration- * Subnets must have outbound access to the public network using a [aws_nat_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/nat_gateway) and [aws_internet_gateway](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/internet_gateway), or other similar customer-managed appliance infrastructure. * The NAT gateway must be set up in its subnet (public_subnets in the example below) that routes quad-zero (0.0.0.0/0) traffic to an internet gateway or other customer-managed appliance infrastructure. --> **Note** The NAT gateway needs only one IP address per AZ. Hence, the public subnet only needs two IP addresses. In order to limit the number of IP addresses in the public subnet, you can specify a secondary CIDR block (cidr_block_public) using the argument secondary_cidr_blocks then pass it to the public_subnets argument. Please review the [IPv4 CIDR block association restrictions](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) when choosing the secondary cidr block. +-> The NAT gateway needs only one IP address per AZ. Hence, the public subnet only needs two IP addresses. In order to limit the number of IP addresses in the public subnet, you can specify a secondary CIDR block (cidr_block_public) using the argument secondary_cidr_blocks then pass it to the public_subnets argument. Please review the [IPv4 CIDR block association restrictions](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html) when choosing the secondary cidr block. Please follow this [complete runnable example](../guides/aws-workspace.md) with new VPC and new workspace setup. Please pay special attention to the fact that there you have two different instances of a databricks provider - one for deploying workspaces (with `host="https://accounts.cloud.databricks.com/"`) and another for the workspace you've created with `databricks_mws_workspaces` resource. If you want both creations of workspaces & clusters within the same Terraform module (essentially the same directory), you should use the provider aliasing feature of Terraform. We strongly recommend having one terraform module to create workspace + PAT token and the rest in different modules. ## Databricks on GCP usage --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.gcp.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.gcp.databricks.com"` and use `provider = databricks.mws` Use this resource to [configure VPC](https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/customer-managed-vpc.html) & subnet for new workspaces within GCP. It is essential to understand that this will require you to configure your provider separately for the multiple workspaces resources. @@ -215,7 +215,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_private_access_settings.md b/docs/resources/mws_private_access_settings.md index 3fbc4577b2..24de15f5e7 100644 --- a/docs/resources/mws_private_access_settings.md +++ b/docs/resources/mws_private_access_settings.md @@ -11,7 +11,7 @@ It is strongly recommended that customers read the [Enable AWS Private Link](htt ## Databricks on AWS usage --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` ```hcl resource "databricks_mws_private_access_settings" "pas" { @@ -42,7 +42,7 @@ resource "databricks_mws_workspaces" "this" { ## Databricks on GCP usage --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.gcp.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.gcp.databricks.com"` and use `provider = databricks.mws` ```hcl resource "databricks_mws_workspaces" "this" { @@ -85,7 +85,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_storage_configurations.md b/docs/resources/mws_storage_configurations.md index c7018a4205..779c5f3d67 100644 --- a/docs/resources/mws_storage_configurations.md +++ b/docs/resources/mws_storage_configurations.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_storage_configurations Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` This resource to configure root bucket new workspaces within AWS. @@ -55,7 +55,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_vpc_endpoint.md b/docs/resources/mws_vpc_endpoint.md index 1600df1e40..97ed95330d 100644 --- a/docs/resources/mws_vpc_endpoint.md +++ b/docs/resources/mws_vpc_endpoint.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_vpc_endpoint Resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws` Enables you to register [aws_vpc_endpoint](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc_endpoint) resources or gcp vpc_endpoint resources with Databricks such that they can be used as part of a [databricks_mws_networks](mws_networks.md) configuration. @@ -200,7 +200,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +-> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/mws_workspaces.md b/docs/resources/mws_workspaces.md index c56f1c51c6..4f48777274 100644 --- a/docs/resources/mws_workspaces.md +++ b/docs/resources/mws_workspaces.md @@ -3,7 +3,7 @@ subcategory: "Deployment" --- # databricks_mws_workspaces resource --> **Note** Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws`. We require all `databricks_mws_*` resources to be created within its own dedicated terraform module of your environment. Usually this module creates VPC and IAM roles as well. Code that creates workspaces and code that [manages workspaces](../guides/workspace-management.md) must be in separate terraform modules to avoid common confusion between `provider = databricks.mws` and `provider = databricks.created_workspace`. This is why we specify `databricks_host` and `databricks_token` outputs, that have to be used in the latter modules: +-> Initialize provider with `alias = "mws"`, `host = "https://accounts.cloud.databricks.com"` and use `provider = databricks.mws`. We require all `databricks_mws_*` resources to be created within its own dedicated terraform module of your environment. Usually this module creates VPC and IAM roles as well. Code that creates workspaces and code that [manages workspaces](../guides/workspace-management.md) must be in separate terraform modules to avoid common confusion between `provider = databricks.mws` and `provider = databricks.created_workspace`. This is why we specify `databricks_host` and `databricks_token` outputs, that have to be used in the latter modules: ```hcl provider "databricks" { @@ -14,7 +14,7 @@ provider "databricks" { This resource allows you to set up [workspaces on AWS](https://docs.databricks.com/getting-started/overview.html#e2-architecture-1) or [workspaces on GCP](https://docs.gcp.databricks.com/administration-guide/account-settings-gcp/workspaces.html). Please follow this complete runnable example on [AWS](../guides/aws-workspace.md) or [GCP](../guides/gcp-workspace.md) with new VPC and new workspace setup. --> **Note** On Azure you need to use [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace) resource to create Azure Databricks workspaces. +-> On Azure you need to use [azurerm_databricks_workspace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/databricks_workspace) resource to create Azure Databricks workspaces. ## Example Usage @@ -315,7 +315,7 @@ output "databricks_token" { ## Argument Reference --> **Note** All workspaces would be verified to get into runnable state or deleted upon failure. You can only update `credentials_id`, `network_id`, and `storage_customer_managed_key_id`, `private_access_settings_id` on a running workspace. +-> All workspaces would be verified to get into runnable state or deleted upon failure. You can only update `credentials_id`, `network_id`, and `storage_customer_managed_key_id`, `private_access_settings_id` on a running workspace. The following arguments are available: @@ -342,7 +342,7 @@ The following arguments are available: You can specify a `token` block in the body of the workspace resource, so that Terraform manages the refresh of the PAT token for the deployment user. The other option is to create [databricks_obo_token](obo_token.md), though it requires Premium or Enterprise plan enabled as well as more complex setup. Token block exposes `token_value`, that holds sensitive PAT token and optionally it can accept two arguments: --> **Note** Tokens managed by `token {}` block are recreated when expired. +-> Tokens managed by `token {}` block are recreated when expired. * `comment` - (Optional) Comment, that will appear in "User Settings / Access Tokens" page on Workspace UI. By default it's "Terraform PAT". * `lifetime_seconds` - (Optional) Token expiry lifetime. By default its 2592000 (30 days). @@ -392,7 +392,7 @@ You can reset local DNS caches before provisioning new workspaces with one of th ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/notebook.md b/docs/resources/notebook.md index 1c00e64d4d..2f2bc46e79 100644 --- a/docs/resources/notebook.md +++ b/docs/resources/notebook.md @@ -44,7 +44,7 @@ resource "databricks_notebook" "lesson" { ## Argument Reference --> **Note** Notebook on Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed notebook won't be overwritten by Terraform, if there's no local change to notebook sources. Notebooks are identified by their path, so changing notebook's name manually on the workspace and then applying Terraform state would result in creation of notebook from Terraform state. +-> Notebook on Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed notebook won't be overwritten by Terraform, if there's no local change to notebook sources. Notebooks are identified by their path, so changing notebook's name manually on the workspace and then applying Terraform state would result in creation of notebook from Terraform state. The size of a notebook source code must not exceed a few megabytes. The following arguments are supported: diff --git a/docs/resources/obo_token.md b/docs/resources/obo_token.md index d43c6b6975..bd5a00c6ca 100644 --- a/docs/resources/obo_token.md +++ b/docs/resources/obo_token.md @@ -9,7 +9,7 @@ This resource creates [On-Behalf-Of tokens](https://docs.databricks.com/administ Creating a token for a narrowly-scoped service principal, that would be the only one (besides admins) allowed to use PAT token in this given workspace, keeping your automated deployment highly secure. --> **Note** A given declaration of `databricks_permissions.token_usage` would OVERWRITE permissions to use PAT tokens from any existing groups with token usage permissions such as the `users` group. To avoid this, be sure to include any desired groups in additional `access_control` blocks in the Terraform configuration file. +-> A given declaration of `databricks_permissions.token_usage` would OVERWRITE permissions to use PAT tokens from any existing groups with token usage permissions such as the `users` group. To avoid this, be sure to include any desired groups in additional `access_control` blocks in the Terraform configuration file. ```hcl resource "databricks_service_principal" "this" { @@ -78,7 +78,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/online_table.md b/docs/resources/online_table.md index 66aa48025c..df026991aa 100644 --- a/docs/resources/online_table.md +++ b/docs/resources/online_table.md @@ -3,8 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_online_table (Resource) - --> **Note** This resource can only be used on a Unity Catalog-enabled workspace! +-> This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Online Table](https://docs.databricks.com/en/machine-learning/feature-store/online-tables.html) in Databricks. An online table is a read-only copy of a Delta Table that is stored in row-oriented format optimized for online access. Online tables are fully serverless tables that auto-scale throughput capacity with the request load and provide low latency and high throughput access to data of any scale. Online tables are designed to work with Databricks Model Serving, Feature Serving, and retrieval-augmented generation (RAG) applications where they are used for fast data lookups. @@ -26,8 +25,7 @@ resource "databricks_online_table" "this" { ## Argument Reference --> **Note** If any parameter changes, online table is recreated. - +~> If any parameter changes, online table is recreated. The following arguments are supported - check [API docs](https://docs.databricks.com/api/workspace/onlinetables/create) for all supported parameters: @@ -50,6 +48,7 @@ In addition to all arguments above, the following attributes are exported: * `status` - object describing status of the online table: * `detailed_state` - The state of the online table. * `message` - A text description of the current state of the online table. +* `table_serving_url` - Data serving REST API URL for this table. ## Import diff --git a/docs/resources/permissions.md b/docs/resources/permissions.md index ef5e1c4de8..8e2e236dfe 100644 --- a/docs/resources/permissions.md +++ b/docs/resources/permissions.md @@ -4,15 +4,15 @@ subcategory: "Security" # databricks_permissions Resource -This resource allows you to generically manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspace. It would guarantee that only _admins_, _authenticated principal_ and those declared within `access_control` blocks would have specified access. It is not possible to remove management rights from _admins_ group. +This resource allows you to generically manage [access control](https://docs.databricks.com/security/access-control/index.html) in Databricks workspaces. It ensures that only _admins_, _authenticated principal_ and those declared within `access_control` blocks would have specified access. It is not possible to remove management rights from _admins_ group. --> **Note** Configuring this resource for an object will **OVERWRITE** any existing permissions of the same type unless imported, and changes made outside of Terraform will be reset unless the changes are also reflected in the configuration. +~> This resource is _authoritative_ for permissions on objects. Configuring this resource for an object will **OVERWRITE** any existing permissions of the same type unless imported, and changes made outside of Terraform will be reset. --> **Note** It is not possible to lower permissions for `admins` or your own user anywhere from `CAN_MANAGE` level, so Databricks Terraform Provider [removes](https://github.com/databricks/terraform-provider-databricks/blob/main/permissions/resource_permissions.go#L324-L332) those `access_control` blocks automatically. +-> It is not possible to lower permissions for `admins`, so Databricks Terraform Provider removes those `access_control` blocks automatically. --> **Note** If multiple permission levels are specified for an identity (e.g. `CAN_RESTART` and `CAN_MANAGE` for a cluster), only the highest level permission is returned and will cause permanent drift. +-> If multiple permission levels are specified for an identity (e.g. `CAN_RESTART` and `CAN_MANAGE` for a cluster), only the highest level permission is returned and will cause permanent drift. --> **Warning** To manage access control on service principals, use [databricks_access_control_rule_set](access_control_rule_set.md). +~> To manage access control on service principals, use [databricks_access_control_rule_set](access_control_rule_set.md). ## Cluster usage @@ -347,7 +347,7 @@ resource "databricks_permissions" "notebook_usage_by_id" { } ``` --> **Note**: when importing a permissions resource, only the `notebook_id` is filled! +-> when importing a permissions resource, only the `notebook_id` is filled! ## Workspace file usage @@ -408,7 +408,7 @@ resource "databricks_permissions" "workspace_file_usage_by_id" { } ``` --> **Note**: when importing a permissions resource, only the `workspace_file_id` is filled! +-> when importing a permissions resource, only the `workspace_file_id` is filled! ## Folder usage @@ -474,7 +474,7 @@ resource "databricks_permissions" "folder_usage_by_id" { } ``` --> **Note**: when importing a permissions resource, only the `directory_id` is filled! +-> when importing a permissions resource, only the `directory_id` is filled! ## Repos usage @@ -801,7 +801,7 @@ resource "databricks_permissions" "sql_dashboard_usage" { [SQL queries](https://docs.databricks.com/sql/user/security/access-control/query-acl.html) have three possible permissions: `CAN_VIEW`, `CAN_RUN` and `CAN_MANAGE`: --> **Note** If you do not define an `access_control` block granting `CAN_MANAGE` explictly for the user calling this provider, Databricks Terraform Provider will add `CAN_MANAGE` permission for the caller. This is a failsafe to prevent situations where the caller is locked out from making changes to the targeted `databricks_sql_query` resource when backend API do not apply permission inheritance correctly. +-> If you do not define an `access_control` block granting `CAN_MANAGE` explictly for the user calling this provider, Databricks Terraform Provider will add `CAN_MANAGE` permission for the caller. This is a failsafe to prevent situations where the caller is locked out from making changes to the targeted `databricks_sql_query` resource when backend API do not apply permission inheritance correctly. ```hcl resource "databricks_group" "auto" { @@ -912,7 +912,7 @@ access_control { Arguments for the `access_control` block are: --> **Note** It is not possible to lower permissions for `admins` or your own user anywhere from `CAN_MANAGE` level, so Databricks Terraform Provider [removes](https://github.com/databricks/terraform-provider-databricks/blob/main/permissions/resource_permissions.go#L324-L332) those `access_control` blocks automatically. +-> It is not possible to lower permissions for `admins` or your own user anywhere from `CAN_MANAGE` level, so Databricks Terraform Provider [removes](https://github.com/databricks/terraform-provider-databricks/blob/main/permissions/resource_permissions.go#L324-L332) those `access_control` blocks automatically. - `permission_level` - (Required) permission level according to specific resource. See examples above for the reference. diff --git a/docs/resources/provider.md b/docs/resources/provider.md index 25ebe76601..6366a1f69b 100644 --- a/docs/resources/provider.md +++ b/docs/resources/provider.md @@ -3,13 +3,13 @@ subcategory: "Delta Sharing" --- # databricks_provider Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! In Delta Sharing, a provider is an entity that shares data with a recipient. Within a metastore, Unity Catalog provides the ability to create a provider which contains a list of shares that have been shared with you. A `databricks_provider` is contained within [databricks_metastore](metastore.md) and can contain a list of shares that have been shared with you. -Note that Databricks to Databricks sharing automatically creates the provider. +-> Databricks to Databricks sharing automatically creates the provider. ## Example Usage diff --git a/docs/resources/recipient.md b/docs/resources/recipient.md index 6df597ea92..0f88cd05ea 100644 --- a/docs/resources/recipient.md +++ b/docs/resources/recipient.md @@ -3,7 +3,7 @@ subcategory: "Delta Sharing" --- # databricks_recipient Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! In Delta Sharing, a recipient is an entity that receives shares from a provider. In Unity Catalog, a share is a securable object that represents an organization and associates it with a credential or secure sharing identifier that allows that organization to access one or more shares. diff --git a/docs/resources/registered_model.md b/docs/resources/registered_model.md index 44c583102b..4de27e474d 100644 --- a/docs/resources/registered_model.md +++ b/docs/resources/registered_model.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_registered_model Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! This resource allows you to create [Models in Unity Catalog](https://docs.databricks.com/en/mlflow/models-in-uc.html) in Databricks. diff --git a/docs/resources/repo.md b/docs/resources/repo.md index b7d40b93ab..c03805ee4f 100644 --- a/docs/resources/repo.md +++ b/docs/resources/repo.md @@ -5,7 +5,7 @@ subcategory: "Workspace" This resource allows you to manage [Databricks Git folders](https://docs.databricks.com/en/repos/index.html) (formerly known as Databricks Repos). --> **Note** To create a Git folder from a private repository you need to configure Git token as described in the [documentation](https://docs.databricks.com/en/repos/index.html#configure-your-git-integration-with-databricks). To set this token you can use [databricks_git_credential](git_credential.md) resource. +-> To create a Git folder from a private repository you need to configure Git token as described in the [documentation](https://docs.databricks.com/en/repos/index.html#configure-your-git-integration-with-databricks). To set this token you can use [databricks_git_credential](git_credential.md) resource. ## Example Usage @@ -20,7 +20,7 @@ resource "databricks_repo" "nutter_in_home" { ## Argument Reference --> **Note** Git folder in Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed repository won't be overwritten by Terraform, if there's no local changes to configuration. If Git folder in Databricks workspace is modified, application of configuration changes will fail. +-> Git folder in Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed repository won't be overwritten by Terraform, if there's no local changes to configuration. If Git folder in Databricks workspace is modified, application of configuration changes will fail. The following arguments are supported: diff --git a/docs/resources/restrict_workspace_admins_setting.md b/docs/resources/restrict_workspace_admins_setting.md index 765825f866..988fed0052 100644 --- a/docs/resources/restrict_workspace_admins_setting.md +++ b/docs/resources/restrict_workspace_admins_setting.md @@ -4,7 +4,7 @@ subcategory: "Settings" # databricks_restrict_workspace_admins_setting Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! The `databricks_restrict_workspace_admins_setting` resource lets you control the capabilities of workspace admins. @@ -20,7 +20,7 @@ With the status set to `RESTRICT_TOKENS_AND_JOB_RUN_AS`, workspace admins can: 2. Only change a job owner to themselves. 3. Only change the job run_as setting to themselves a service principal on which they have the Service Principal User role. --> **Note** Only account admins can update the setting. And the account admin must be part of the workspace to change the setting status. +~> Only account admins can update the setting. And the account admin must be part of the workspace to change the setting status. ## Example Usage diff --git a/docs/resources/schema.md b/docs/resources/schema.md index 65a144c4e9..1f1442ee7d 100644 --- a/docs/resources/schema.md +++ b/docs/resources/schema.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_schema Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Within a metastore, Unity Catalog provides a 3-level namespace for organizing data: Catalogs, Databases (also called Schemas), and Tables / Views. diff --git a/docs/resources/service_principal.md b/docs/resources/service_principal.md index 717b76acc7..cac064512a 100644 --- a/docs/resources/service_principal.md +++ b/docs/resources/service_principal.md @@ -11,9 +11,9 @@ There are different types of service principals: * Databricks-managed - exists only inside the Databricks platform (all clouds) and couldn't be used for accessing non-Databricks services. * Azure-managed - existing Azure service principal (enterprise application) is registered inside Databricks. It could be used to work with other Azure services. --> **Note** To assign account level service principals to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). +-> To assign account level service principals to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). --> **Note** Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level service principals. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level service principals. +-> Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level service principals. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level service principals. To create service principals in the Databricks account, the provider must be configured with `host = "https://accounts.cloud.databricks.com"` on AWS deployments or `host = "https://accounts.azuredatabricks.net"` and authenticate using the supported authentication method for account operations. diff --git a/docs/resources/service_principal_role.md b/docs/resources/service_principal_role.md index 511089d7b0..f7ef4371d0 100644 --- a/docs/resources/service_principal_role.md +++ b/docs/resources/service_principal_role.md @@ -39,7 +39,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/service_principal_secret.md b/docs/resources/service_principal_secret.md index f98abd9b3e..05f740d864 100644 --- a/docs/resources/service_principal_secret.md +++ b/docs/resources/service_principal_secret.md @@ -3,7 +3,7 @@ subcategory: "Security" --- # databricks_service_principal_secret Resource --> **Note** This resource can only be used with an account-level provider. +-> This resource can only be used with an account-level provider. With this resource you can create a secret for a given [Service Principals](https://docs.databricks.com/administration-guide/users-groups/service-principals.html). diff --git a/docs/resources/share.md b/docs/resources/share.md index 38252a8818..5dfb7128c0 100644 --- a/docs/resources/share.md +++ b/docs/resources/share.md @@ -3,7 +3,7 @@ subcategory: "Delta Sharing" --- # databricks_share Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! In Delta Sharing, a share is a read-only collection of tables and table partitions that a provider wants to share with one or more recipients. If your recipient uses a Unity Catalog-enabled Databricks workspace, you can also include notebook files, views (including dynamic views that restrict access at the row and column level), Unity Catalog volumes, and Unity Catalog models in a share. @@ -11,7 +11,7 @@ In a Unity Catalog-enabled Databricks workspace, a share is a securable object r ## Example Usage --> **Note** In Terraform configuration, it is recommended to define objects in alphabetical order of their `name` arguments, so that you get consistent and readable diff. Whenever objects are added or removed, or `name` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `object` blocks as an ordered list. Alternatively, `object` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task. +-> In Terraform configuration, it is recommended to define objects in alphabetical order of their `name` arguments, so that you get consistent and readable diff. Whenever objects are added or removed, or `name` is renamed, you'll observe a change in the majority of tasks. It's related to the fact that the current version of the provider treats `object` blocks as an ordered list. Alternatively, `object` block could have been an unordered set, though end-users would see the entire block replaced upon a change in single property of the task. Creating a Delta Sharing share and add some existing tables to it diff --git a/docs/resources/sql_alert.md b/docs/resources/sql_alert.md index f523c2acc7..689a52a5d5 100644 --- a/docs/resources/sql_alert.md +++ b/docs/resources/sql_alert.md @@ -5,7 +5,7 @@ subcategory: "Databricks SQL" This resource allows you to manage [Databricks SQL Alerts](https://docs.databricks.com/sql/user/queries/index.html). -**Note:** To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). +-> To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). ## Example Usage diff --git a/docs/resources/sql_dashboard.md b/docs/resources/sql_dashboard.md index 42551121a2..5c153f96c1 100644 --- a/docs/resources/sql_dashboard.md +++ b/docs/resources/sql_dashboard.md @@ -3,12 +3,12 @@ subcategory: "Databricks SQL" --- # databricks_sql_dashboard Resource --> **Note:** Please switch to [databricks_dashboard](dashboard.md) to author new AI/BI dashboards using the latest tooling +-> Please switch to [databricks_dashboard](dashboard.md) to author new AI/BI dashboards using the latest tooling. This resource is used to manage [Legacy dashboards](https://docs.databricks.com/sql/user/dashboards/index.html). To manage [SQL resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). --> **Note:** documentation for this resource is a work in progress. +-> documentation for this resource is a work in progress. A dashboard may have one or more [widgets](sql_widget.md). diff --git a/docs/resources/sql_permissions.md b/docs/resources/sql_permissions.md index 663cd92dbe..43f754391e 100644 --- a/docs/resources/sql_permissions.md +++ b/docs/resources/sql_permissions.md @@ -3,7 +3,7 @@ subcategory: "Security" --- # databricks_sql_permissions Resource --> **Note** Please switch to [databricks_grants](grants.md) with Unity Catalog to manage data access, which provides a better and faster way for managing data security. `databricks_grants` resource *doesn't require a technical cluster to perform operations*. On workspaces with Unity Catalog enabled, you may run into errors such as `Error: cannot create sql permissions: cannot read current grants: For unity catalog, please specify the catalog name explicitly. E.g. SHOW GRANT ``your.address@email.com`` ON CATALOG main`. This happens if your `default_catalog_name` was set to a UC catalog instead of `hive_metastore`. The workaround is to re-assign the metastore again with the default catalog set to be `hive_metastore`. See [databricks_metastore_assignment](metastore_assignment.md). +-> Please switch to [databricks_grants](grants.md) with Unity Catalog to manage data access, which provides a better and faster way for managing data security. `databricks_grants` resource *doesn't require a technical cluster to perform operations*. On workspaces with Unity Catalog enabled, you may run into errors such as `Error: cannot create sql permissions: cannot read current grants: For unity catalog, please specify the catalog name explicitly. E.g. SHOW GRANT ``your.address@email.com`` ON CATALOG main`. This happens if your `default_catalog_name` was set to a UC catalog instead of `hive_metastore`. The workaround is to re-assign the metastore again with the default catalog set to be `hive_metastore`. See [databricks_metastore_assignment](metastore_assignment.md). This resource manages data object access control lists in Databricks workspaces for things like tables, views, databases, and [more](https://docs.databricks.com/security/access-control/table-acls/object-privileges.html). In order to enable Table Access control, you have to login to the workspace as administrator, go to `Admin Console`, pick `Access Control` tab, click on `Enable` button in `Table Access Control` section, and click `Confirm`. The security guarantees of table access control **will only be effective if cluster access control is also turned on**. Please make sure that no users can create clusters in your workspace and all [databricks_cluster](cluster.md) have approximately the following configuration: diff --git a/docs/resources/sql_query.md b/docs/resources/sql_query.md index 27e12e03ea..90120a28a2 100644 --- a/docs/resources/sql_query.md +++ b/docs/resources/sql_query.md @@ -5,7 +5,7 @@ subcategory: "Databricks SQL" To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). -**Note:** documentation for this resource is a work in progress. +-> documentation for this resource is a work in progress. A query may have one or more [visualizations](sql_visualization.md). diff --git a/docs/resources/sql_table.md b/docs/resources/sql_table.md index aeec6d9ce9..67483248d6 100644 --- a/docs/resources/sql_table.md +++ b/docs/resources/sql_table.md @@ -109,6 +109,46 @@ resource "databricks_sql_table" "thing_view" { } ``` +## Use an Identity Column + +```hcl +resource "databricks_catalog" "sandbox" { + name = "sandbox" + comment = "this catalog is managed by terraform" + properties = { + purpose = "testing" + } +} +resource "databricks_schema" "things" { + catalog_name = databricks_catalog.sandbox.id + name = "things" + comment = "this database is managed by terraform" + properties = { + kind = "various" + } +} +resource "databricks_sql_table" "thing" { + provider = databricks.workspace + name = "quickstart_table" + catalog_name = databricks_catalog.sandbox.name + schema_name = databricks_schema.things.name + table_type = "MANAGED" + data_source_format = "DELTA" + storage_location = "" + column { + name = "id" + type = "bigint" + identity = "default" + } + column { + name = "name" + type = "string" + comment = "name of thing" + } + comment = "this table is managed by terraform" +} +``` + ## Argument Reference The following arguments are supported: @@ -137,6 +177,7 @@ Currently, changing the column definitions for a table will require dropping and * `name` - User-visible name of column * `type` - Column type spec (with metadata) as SQL text. Not supported for `VIEW` table_type. +* `identity` - (Optional) Whether field is an identity column. Can be `default`, `always` or unset. It is unset by default. * `comment` - (Optional) User-supplied free-form text. * `nullable` - (Optional) Whether field is nullable (Default: `true`) diff --git a/docs/resources/sql_visualization.md b/docs/resources/sql_visualization.md index b9ea7d6c99..b9dcf3b3a6 100644 --- a/docs/resources/sql_visualization.md +++ b/docs/resources/sql_visualization.md @@ -5,7 +5,7 @@ subcategory: "Databricks SQL" To manage [SQLA resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). -**Note:** documentation for this resource is a work in progress. +-> documentation for this resource is a work in progress. A visualization is always tied to a [query](sql_query.md). Every query may have one or more visualizations. diff --git a/docs/resources/sql_widget.md b/docs/resources/sql_widget.md index e890142dd8..05fed72737 100644 --- a/docs/resources/sql_widget.md +++ b/docs/resources/sql_widget.md @@ -3,11 +3,11 @@ subcategory: "Databricks SQL" --- # databricks_sql_widget Resource --> **Note:** Please switch to [databricks_dashboard](dashboard.md) to author new AI/BI dashboards using the latest tooling +-> Please switch to [databricks_dashboard](dashboard.md) to author new AI/BI dashboards using the latest tooling To manage [SQL resources](https://docs.databricks.com/sql/get-started/concepts.html) you must have `databricks_sql_access` on your [databricks_group](group.md#databricks_sql_access) or [databricks_user](user.md#databricks_sql_access). --> **Note:** documentation for this resource is a work in progress. +-> documentation for this resource is a work in progress. A widget is always tied to a [Legacy dashboard](sql_dashboard.md). Every dashboard may have one or more widgets. diff --git a/docs/resources/storage_credential.md b/docs/resources/storage_credential.md index b57120e8dd..87d90b853b 100644 --- a/docs/resources/storage_credential.md +++ b/docs/resources/storage_credential.md @@ -3,7 +3,7 @@ subcategory: "Unity Catalog" --- # databricks_storage_credential Resource --> **Note** This resource can be used with an account or workspace-level provider. +-> This resource can be used with an account or workspace-level provider. To work with external tables, Unity Catalog introduces two new objects to access and work with external cloud storage: diff --git a/docs/resources/system_schema.md b/docs/resources/system_schema.md index 80634f0859..02945703f9 100644 --- a/docs/resources/system_schema.md +++ b/docs/resources/system_schema.md @@ -3,9 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_system_schema Resource --> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). +-> This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Manages system tables enablement. System tables are a Databricks-hosted analytical store of your account’s operational data. System tables can be used for historical observability across your account. System tables must be enabled by an account admin. diff --git a/docs/resources/token.md b/docs/resources/token.md index 307a604c11..281399cffc 100644 --- a/docs/resources/token.md +++ b/docs/resources/token.md @@ -62,4 +62,4 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. diff --git a/docs/resources/user.md b/docs/resources/user.md index 03e16365c3..1e5633e541 100644 --- a/docs/resources/user.md +++ b/docs/resources/user.md @@ -5,9 +5,9 @@ subcategory: "Security" This resource allows you to manage [users in Databricks Workspace](https://docs.databricks.com/administration-guide/users-groups/users.html), [Databricks Account Console](https://accounts.cloud.databricks.com/) or [Azure Databricks Account Console](https://accounts.azuredatabricks.net). You can also [associate](group_member.md) Databricks users to [databricks_group](group.md). Upon user creation the user will receive a welcome email. You can also get information about caller identity using [databricks_current_user](../data-sources/current_user.md) data source. --> **Note** To assign account level users to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). +-> To assign account level users to workspace use [databricks_mws_permission_assignment](mws_permission_assignment.md). --> **Note** Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level users. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level users. +-> Entitlements, like, `allow_cluster_create`, `allow_instance_pool_create`, `databricks_sql_access`, `workspace_access` applicable only for workspace-level users. Use [databricks_entitlements](entitlements.md) resource to assign entitlements inside a workspace to account-level users. To create users in the Databricks account, the provider must be configured with `host = "https://accounts.cloud.databricks.com"` on AWS deployments or `host = "https://accounts.azuredatabricks.net"` and authenticate using [AAD tokens](https://registry.terraform.io/providers/databricks/databricks/latest/docs#special-configurations-for-azure) on Azure deployments. diff --git a/docs/resources/user_instance_profile.md b/docs/resources/user_instance_profile.md index 88e6016c8e..1b050b386f 100644 --- a/docs/resources/user_instance_profile.md +++ b/docs/resources/user_instance_profile.md @@ -39,7 +39,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/user_role.md b/docs/resources/user_role.md index 5921b2c886..8ece375046 100644 --- a/docs/resources/user_role.md +++ b/docs/resources/user_role.md @@ -59,7 +59,7 @@ In addition to all arguments above, the following attributes are exported: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. ## Related Resources diff --git a/docs/resources/vector_search_endpoint.md b/docs/resources/vector_search_endpoint.md index c90de0c25a..4f167bc9f6 100644 --- a/docs/resources/vector_search_endpoint.md +++ b/docs/resources/vector_search_endpoint.md @@ -3,7 +3,7 @@ subcategory: "Mosaic AI Vector Search" --- # databricks_vector_search_endpoint Resource --> **Note** This resource can only be used on a Unity Catalog-enabled workspace! +-> This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Mosaic AI Vector Search Endpoint](https://docs.databricks.com/en/generative-ai/vector-search.html) in Databricks. Mosaic AI Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Mosaic AI Vector Search Endpoint is used to create and access vector search indexes. diff --git a/docs/resources/vector_search_index.md b/docs/resources/vector_search_index.md index 0de0ac2c1f..d06db90637 100644 --- a/docs/resources/vector_search_index.md +++ b/docs/resources/vector_search_index.md @@ -3,7 +3,7 @@ subcategory: "Mosaic AI Vector Search" --- # databricks_vector_search_index Resource --> **Note** This resource can only be used on a Unity Catalog-enabled workspace! +-> This resource can only be used on a Unity Catalog-enabled workspace! This resource allows you to create [Mosaic AI Vector Search Index](https://docs.databricks.com/en/generative-ai/create-query-vector-search.html) in Databricks. Mosaic AI Vector Search is a serverless similarity search engine that allows you to store a vector representation of your data, including metadata, in a vector database. The Mosaic AI Vector Search Index provides the ability to search data in the linked Delta Table. diff --git a/docs/resources/volume.md b/docs/resources/volume.md index b116e42129..e95f54d8f3 100644 --- a/docs/resources/volume.md +++ b/docs/resources/volume.md @@ -3,9 +3,9 @@ subcategory: "Unity Catalog" --- # databricks_volume (Resource) --> **Public Preview** This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). +-> This feature is in [Public Preview](https://docs.databricks.com/release-notes/release-types.html). --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! Volumes are Unity Catalog objects representing a logical volume of storage in a cloud object storage location. Volumes provide capabilities for accessing, storing, governing, and organizing files. While tables provide governance over tabular datasets, volumes add governance over non-tabular datasets. You can use volumes to store and access files in any format, including structured, semi-structured, and unstructured data. diff --git a/docs/resources/workspace_binding.md b/docs/resources/workspace_binding.md index 5a649bd9e4..e7dd0105af 100644 --- a/docs/resources/workspace_binding.md +++ b/docs/resources/workspace_binding.md @@ -3,17 +3,15 @@ subcategory: "Unity Catalog" --- # databricks_workspace_binding Resource --> **Note** This resource can only be used with a workspace-level provider! +-> This resource can only be used with a workspace-level provider! If you use workspaces to isolate user data access, you may want to limit access to catalog, external locations or storage credentials from specific workspaces in your account, also known as workspace binding By default, Databricks assigns the securable to all workspaces attached to the current metastore. By using `databricks_workspace_binding`, the securable will be unassigned from all workspaces and only assigned explicitly using this resource. --> **Note** - To use this resource the securable must have its isolation mode set to `ISOLATED` (for [databricks_catalog](catalog.md)) or `ISOLATION_MODE_ISOLATED` (for (for [databricks_external_location](external_location.md) or [databricks_storage_credential](storage_credential.md)) for the `isolation_mode` attribute. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration), [this guide](https://docs.databricks.com/en/connect/unity-catalog/external-locations.html#workspace-binding) or [this guide](https://docs.databricks.com/en/connect/unity-catalog/storage-credentials.html#optional-assign-a-storage-credential-to-specific-workspaces). +-> To use this resource the securable must have its isolation mode set to `ISOLATED` (for [databricks_catalog](catalog.md)) or `ISOLATION_MODE_ISOLATED` (for (for [databricks_external_location](external_location.md) or [databricks_storage_credential](storage_credential.md)) for the `isolation_mode` attribute. Alternatively, the isolation mode can be set using the UI or API by following [this guide](https://docs.databricks.com/data-governance/unity-catalog/create-catalogs.html#configuration), [this guide](https://docs.databricks.com/en/connect/unity-catalog/external-locations.html#workspace-binding) or [this guide](https://docs.databricks.com/en/connect/unity-catalog/storage-credentials.html#optional-assign-a-storage-credential-to-specific-workspaces). --> **Note** - If the securable's isolation mode was set to `ISOLATED` using Terraform then the securable will have been automatically bound to the workspace it was created from. +-> If the securable's isolation mode was set to `ISOLATED` using Terraform then the securable will have been automatically bound to the workspace it was created from. ## Example Usage diff --git a/docs/resources/workspace_conf.md b/docs/resources/workspace_conf.md index 0986f8a133..6e02461381 100644 --- a/docs/resources/workspace_conf.md +++ b/docs/resources/workspace_conf.md @@ -4,11 +4,11 @@ subcategory: "Workspace" # databricks_workspace_conf Resource --> **Note** This resource has an evolving API, which may change in future versions of the provider. +~> This resource has an evolving API, which may change in future versions of the provider. Manages workspace configuration for expert usage. Currently, more than one instance of resource can exist in Terraform state, though there's no deterministic behavior, when they manage the same property. We strongly recommend to use a single `databricks_workspace_conf` per workspace. --> **Note** Deleting `databricks_workspace_conf` resources may fail depending on the configuration properties set, including but not limited to `enableIpAccessLists`, `enableGp3`, and `maxTokenLifetimeDays`. The provider will print a warning if this occurs. You can verify the workspace configuration by reviewing [the workspace settings in the UI](https://docs.databricks.com/en/admin/workspace-settings/index.html). +-> Deleting `databricks_workspace_conf` resources may fail depending on the configuration properties set, including but not limited to `enableIpAccessLists`, `enableGp3`, and `maxTokenLifetimeDays`. The provider will print a warning if this occurs. You can verify the workspace configuration by reviewing [the workspace settings in the UI](https://docs.databricks.com/en/admin/workspace-settings/index.html). ## Example Usage @@ -36,4 +36,4 @@ The following arguments are available: ## Import --> **Note** Importing this resource is not currently supported. +!> Importing this resource is not currently supported. diff --git a/docs/resources/workspace_file.md b/docs/resources/workspace_file.md index f7cbc8e1de..997e7eac24 100644 --- a/docs/resources/workspace_file.md +++ b/docs/resources/workspace_file.md @@ -34,7 +34,7 @@ resource "databricks_workspace_file" "init_script" { ## Argument Reference --> **Note** Files in Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed workspace files won't be overwritten by Terraform, if there's no local change to file sources. Workspace files are identified by their path, so changing file's name manually on the workspace and then applying Terraform state would result in creation of workspace file from Terraform state. +-> Files in Databricks workspace would only be changed, if Terraform stage did change. This means that any manual changes to managed workspace files won't be overwritten by Terraform, if there's no local change to file sources. Workspace files are identified by their path, so changing file's name manually on the workspace and then applying Terraform state would result in creation of workspace file from Terraform state. The size of a workspace file source code must not exceed a few megabytes. The following arguments are supported: diff --git a/exporter/exporter_test.go b/exporter/exporter_test.go index a027005090..43c6c10916 100644 --- a/exporter/exporter_test.go +++ b/exporter/exporter_test.go @@ -862,7 +862,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/clusters/test1", + Resource: "/api/2.0/permissions/clusters/test1?", Response: getJSONObject("test-data/get-cluster-permissions-test1-response.json"), }, { @@ -913,7 +913,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/clusters/test2", + Resource: "/api/2.0/permissions/clusters/test2?", Response: getJSONObject("test-data/get-cluster-permissions-test2-response.json"), }, { @@ -923,7 +923,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/cluster-policies/123", + Resource: "/api/2.0/permissions/cluster-policies/123?", Response: getJSONObject("test-data/get-cluster-policy-permissions.json"), }, { @@ -949,7 +949,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/clusters/awscluster", + Resource: "/api/2.0/permissions/clusters/awscluster?", Response: getJSONObject("test-data/get-cluster-permissions-awscluster-response.json"), }, { @@ -971,7 +971,7 @@ func TestImportingClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1089,7 +1089,7 @@ func TestImportingJobs_JobList(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/jobs/14", + Resource: "/api/2.0/permissions/jobs/14?", Response: getJSONObject("test-data/get-job-permissions-14.json"), }, { @@ -1112,7 +1112,7 @@ func TestImportingJobs_JobList(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1202,7 +1202,7 @@ func TestImportingJobs_JobList(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/cluster-policies/123", + Resource: "/api/2.0/permissions/cluster-policies/123?", Response: getJSONObject("test-data/get-cluster-policy-permissions.json"), }, { @@ -1218,7 +1218,7 @@ func TestImportingJobs_JobList(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1307,7 +1307,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/jobs/14", + Resource: "/api/2.0/permissions/jobs/14?", Response: getJSONObject("test-data/get-job-permissions-14.json"), ReuseRequest: true, }, @@ -1331,7 +1331,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1470,7 +1470,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/cluster-policies/123", + Resource: "/api/2.0/permissions/cluster-policies/123?", Response: getJSONObject("test-data/get-cluster-policy-permissions.json"), }, { @@ -1486,7 +1486,7 @@ func TestImportingJobs_JobListMultiTask(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/instance-pools/pool1", + Resource: "/api/2.0/permissions/instance-pools/pool1?", ReuseRequest: true, Response: getJSONObject("test-data/get-job-permissions-14.json"), }, @@ -1777,7 +1777,7 @@ func TestImportingRepos(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/repos/121232342", + Resource: "/api/2.0/permissions/repos/121232342?", Response: getJSONObject("test-data/get-repo-permissions.json"), }, }, @@ -1902,7 +1902,7 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/directories/4451965692354143", + Resource: "/api/2.0/permissions/directories/4451965692354143?", Response: getJSONObject("test-data/get-directory-permissions.json"), }, { @@ -1933,7 +1933,7 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/sql/warehouses/f562046bc1272886", + Resource: "/api/2.0/permissions/sql/warehouses/f562046bc1272886?", Response: getJSONObject("test-data/get-sql-endpoint-permissions.json"), }, { @@ -1962,12 +1962,12 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/preview/sql/permissions/queries/16c4f969-eea0-4aad-8f82-03d79b078dcc", + Resource: "/api/2.0/permissions/sql/queries/16c4f969-eea0-4aad-8f82-03d79b078dcc?", Response: getJSONObject("test-data/get-sql-query-permissions.json"), }, { Method: "GET", - Resource: "/api/2.0/preview/sql/permissions/dashboards/9cb0c8f5-6262-4a1f-a741-2181de76028f", + Resource: "/api/2.0/permissions/dbsql-dashboards/9cb0c8f5-6262-4a1f-a741-2181de76028f?", Response: getJSONObject("test-data/get-sql-dashboard-permissions.json"), }, { @@ -1983,7 +1983,7 @@ func TestImportingSqlObjects(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/preview/sql/permissions/alerts/3cf91a42-6217-4f3c-a6f0-345d489051b9", + Resource: "/api/2.0/permissions/sql/alerts/3cf91a42-6217-4f3c-a6f0-345d489051b9?", Response: getJSONObject("test-data/get-sql-alert-permissions.json"), }, }, @@ -2039,7 +2039,7 @@ func TestImportingDLTPipelines(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/repos/123", + Resource: "/api/2.0/permissions/repos/123?", Response: getJSONObject("test-data/get-repo-permissions.json"), }, { @@ -2085,12 +2085,12 @@ func TestImportingDLTPipelines(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/pipelines/123", + Resource: "/api/2.0/permissions/pipelines/123?", Response: getJSONObject("test-data/get-pipeline-permissions.json"), }, { Method: "GET", - Resource: "/api/2.0/permissions/notebooks/123", + Resource: "/api/2.0/permissions/notebooks/123?", Response: getJSONObject("test-data/get-notebook-permissions.json"), }, { @@ -2169,7 +2169,7 @@ func TestImportingDLTPipelines(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/files/789", + Resource: "/api/2.0/permissions/files/789?", Response: getJSONObject("test-data/get-workspace-file-permissions.json"), }, }, @@ -2257,7 +2257,7 @@ func TestImportingDLTPipelinesMatchingOnly(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/permissions/pipelines/123", + Resource: "/api/2.0/permissions/pipelines/123?", Response: getJSONObject("test-data/get-pipeline-permissions.json"), }, { diff --git a/exporter/importables.go b/exporter/importables.go index d2cb8d0f36..5ea235c335 100644 --- a/exporter/importables.go +++ b/exporter/importables.go @@ -32,7 +32,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/jobs" "github.com/databricks/terraform-provider-databricks/mws" - "github.com/databricks/terraform-provider-databricks/permissions" + "github.com/databricks/terraform-provider-databricks/permissions/entity" tfpipelines "github.com/databricks/terraform-provider-databricks/pipelines" "github.com/databricks/terraform-provider-databricks/repos" tfsettings "github.com/databricks/terraform-provider-databricks/settings" @@ -1184,7 +1184,7 @@ var resourcesMap map[string]importable = map[string]importable{ return (r.Data.Get("access_control.#").(int) == 0) }, Import: func(ic *importContext, r *resource) error { - var permissions permissions.PermissionsEntity + var permissions entity.PermissionsEntity s := ic.Resources["databricks_permissions"].Schema common.DataToStructPointer(r.Data, s, &permissions) for _, ac := range permissions.AccessControlList { diff --git a/exporter/importables_test.go b/exporter/importables_test.go index 544322a745..6bea1a8cf0 100644 --- a/exporter/importables_test.go +++ b/exporter/importables_test.go @@ -25,6 +25,7 @@ import ( "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/jobs" "github.com/databricks/terraform-provider-databricks/permissions" + "github.com/databricks/terraform-provider-databricks/permissions/entity" "github.com/databricks/terraform-provider-databricks/internal/providers/sdkv2" dlt_pipelines "github.com/databricks/terraform-provider-databricks/pipelines" @@ -220,8 +221,8 @@ func TestPermissions(t *testing.T) { assert.Equal(t, "abc", name) d.MarkNewResource() - err := common.StructToData(permissions.PermissionsEntity{ - AccessControlList: []permissions.AccessControlChange{ + err := common.StructToData(entity.PermissionsEntity{ + AccessControlList: []iam.AccessControlRequest{ { UserName: "a", }, diff --git a/finops/resource_budget.go b/finops/resource_budget.go new file mode 100644 index 0000000000..3213907929 --- /dev/null +++ b/finops/resource_budget.go @@ -0,0 +1,102 @@ +package finops + +import ( + "context" + "strings" + + "github.com/databricks/databricks-sdk-go/service/billing" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +func ResourceBudget() common.Resource { + s := common.StructToSchema(billing.BudgetConfiguration{}, func(m map[string]*schema.Schema) map[string]*schema.Schema { + common.CustomizeSchemaPath(m, "display_name").SetValidateFunc(validation.StringLenBetween(1, 128)) + for _, p := range []string{"account_id", "budget_configuration_id", "create_time", "update_time"} { + common.CustomizeSchemaPath(m, p).SetComputed() + } + common.CustomizeSchemaPath(m, "alert_configurations", "alert_configuration_id").SetComputed() + common.CustomizeSchemaPath(m, "alert_configurations", "action_configurations", "action_configuration_id").SetComputed() + // We need SuppressDiff because API returns a string representation of BigDecimal with a lot + // of trailing 0s, etc. + common.CustomizeSchemaPath(m, "alert_configurations", "quantity_threshold").SetCustomSuppressDiff(func(k, old, new string, d *schema.ResourceData) bool { + normalize := func(v string) string { + if strings.Contains(v, ".") { + v = strings.TrimRight(v, "0") + v = strings.TrimSuffix(v, ".") + } + return v + } + return normalize(old) == normalize(new) + }) + return m + }) + p := common.NewPairID("account_id", "budget_configuration_id") + return common.Resource{ + Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var create billing.CreateBudgetConfigurationBudget + common.DataToStructPointer(d, s, &create) + acc, err := c.AccountClient() + if err != nil { + return err + } + budget, err := acc.Budgets.Create(ctx, billing.CreateBudgetConfigurationRequest{Budget: create}) + if err != nil { + return err + } + d.Set("budget_configuration_id", budget.Budget.BudgetConfigurationId) + d.Set("account_id", c.Config.AccountID) + common.StructToData(budget.Budget, s, d) + p.Pack(d) + return nil + }, + Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + _, id, err := p.Unpack(d) + if err != nil { + return err + } + acc, err := c.AccountClient() + if err != nil { + return err + } + budget, err := acc.Budgets.GetByBudgetId(ctx, id) + if err != nil { + return err + } + return common.StructToData(budget.Budget, s, d) + }, + Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + var update billing.UpdateBudgetConfigurationBudget + _, id, err := p.Unpack(d) + if err != nil { + return err + } + common.DataToStructPointer(d, s, &update) + acc, err := c.AccountClient() + if err != nil { + return err + } + budget, err := acc.Budgets.Update(ctx, billing.UpdateBudgetConfigurationRequest{ + Budget: update, + BudgetId: id, + }) + if err != nil { + return err + } + return common.StructToData(budget.Budget, s, d) + }, + Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { + _, id, err := p.Unpack(d) + if err != nil { + return err + } + acc, err := c.AccountClient() + if err != nil { + return err + } + return acc.Budgets.DeleteByBudgetId(ctx, id) + }, + Schema: s, + } +} diff --git a/finops/resource_budget_test.go b/finops/resource_budget_test.go new file mode 100644 index 0000000000..311397155c --- /dev/null +++ b/finops/resource_budget_test.go @@ -0,0 +1,243 @@ +package finops + +import ( + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/billing" + "github.com/stretchr/testify/mock" + + "github.com/databricks/terraform-provider-databricks/qa" +) + +func getTestBudget() *billing.BudgetConfiguration { + return &billing.BudgetConfiguration{ + AccountId: "account_id", + AlertConfigurations: []billing.AlertConfiguration{ + { + ActionConfigurations: []billing.ActionConfiguration{ + { + ActionType: billing.ActionConfigurationTypeEmailNotification, + Target: "me@databricks.com", + }, + }, + QuantityThreshold: "840.840000000000000000", + QuantityType: billing.AlertConfigurationQuantityTypeListPriceDollarsUsd, + TimePeriod: billing.AlertConfigurationTimePeriodMonth, + TriggerType: billing.AlertConfigurationTriggerTypeCumulativeSpendingExceeded, + }, + }, + Filter: &billing.BudgetConfigurationFilter{ + Tags: []billing.BudgetConfigurationFilterTagClause{ + { + Key: "Environment", + Value: &billing.BudgetConfigurationFilterClause{ + Operator: billing.BudgetConfigurationFilterOperatorIn, + Values: []string{"Testing"}, + }, + }, + }, + WorkspaceId: &billing.BudgetConfigurationFilterWorkspaceIdClause{ + Operator: billing.BudgetConfigurationFilterOperatorIn, + Values: []int64{ + 1234567890098765, + }, + }, + }, + BudgetConfigurationId: "budget_configuration_id", + DisplayName: "budget_name", + } +} + +func TestResourceBudgetCreate(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockbudgetsAPI().EXPECT() + api.Create(mock.Anything, billing.CreateBudgetConfigurationRequest{ + Budget: billing.CreateBudgetConfigurationBudget{ + AlertConfigurations: []billing.CreateBudgetConfigurationBudgetAlertConfigurations{ + { + ActionConfigurations: []billing.CreateBudgetConfigurationBudgetActionConfigurations{ + { + ActionType: getTestBudget().AlertConfigurations[0].ActionConfigurations[0].ActionType, + Target: getTestBudget().AlertConfigurations[0].ActionConfigurations[0].Target, + }, + }, + QuantityThreshold: "840.84", + QuantityType: getTestBudget().AlertConfigurations[0].QuantityType, + TimePeriod: getTestBudget().AlertConfigurations[0].TimePeriod, + TriggerType: getTestBudget().AlertConfigurations[0].TriggerType, + }, + }, + DisplayName: getTestBudget().DisplayName, + Filter: getTestBudget().Filter, + }, + }).Return(&billing.CreateBudgetConfigurationResponse{Budget: getTestBudget()}, nil) + api.GetByBudgetId(mock.Anything, "budget_configuration_id").Return( + &billing.GetBudgetConfigurationResponse{Budget: getTestBudget()}, nil, + ) + }, + Create: true, + AccountID: "account_id", + HCL: ` + display_name = "budget_name" + + alert_configurations { + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "840.84" + + action_configurations { + action_type = "EMAIL_NOTIFICATION" + target = "me@databricks.com" + } + } + + filter { + tags { + key = "Environment" + value { + operator = "IN" + values = ["Testing"] + } + } + + workspace_id { + operator = "IN" + values = [ + 1234567890098765 + ] + } + } + `, + Resource: ResourceBudget(), + }.ApplyAndExpectData(t, map[string]any{ + "display_name": "budget_name", + "id": "account_id|budget_configuration_id", + "alert_configurations.#": 1, + "filter.#": 1, + }) +} + +func TestResourceBudgetRead(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + a.GetMockbudgetsAPI().EXPECT(). + GetByBudgetId(mock.Anything, "budget_configuration_id"). + Return(&billing.GetBudgetConfigurationResponse{Budget: getTestBudget()}, nil) + }, + Resource: ResourceBudget(), + Read: true, + New: true, + AccountID: "account_id", + ID: "account_id|budget_configuration_id", + }.ApplyAndExpectData(t, map[string]any{ + "display_name": "budget_name", + "id": "account_id|budget_configuration_id", + "alert_configurations.#": 1, + "filter.#": 1, + }) +} + +func TestResourceBudgetRead_UnpackError(t *testing.T) { + qa.ResourceFixture{ + Resource: ResourceBudget(), + Read: true, + New: true, + AccountID: "account_id", + ID: "budget_configuration_id", + }.ExpectError(t, "invalid ID: budget_configuration_id") +} + +func TestResourceBudgetUpdate(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + api := a.GetMockbudgetsAPI().EXPECT() + api.Update(mock.Anything, billing.UpdateBudgetConfigurationRequest{ + Budget: billing.UpdateBudgetConfigurationBudget{ + AccountId: getTestBudget().AccountId, + AlertConfigurations: []billing.AlertConfiguration{ + { + ActionConfigurations: []billing.ActionConfiguration{ + { + ActionType: getTestBudget().AlertConfigurations[0].ActionConfigurations[0].ActionType, + Target: getTestBudget().AlertConfigurations[0].ActionConfigurations[0].Target, + }, + }, + QuantityThreshold: "840.84", + QuantityType: getTestBudget().AlertConfigurations[0].QuantityType, + TimePeriod: getTestBudget().AlertConfigurations[0].TimePeriod, + TriggerType: getTestBudget().AlertConfigurations[0].TriggerType, + }, + }, + BudgetConfigurationId: getTestBudget().BudgetConfigurationId, + DisplayName: fmt.Sprintf("%s_update", getTestBudget().DisplayName), + Filter: getTestBudget().Filter, + }, + BudgetId: "budget_configuration_id", + }).Return(&billing.UpdateBudgetConfigurationResponse{Budget: getTestBudget()}, nil) + api.GetByBudgetId(mock.Anything, "budget_configuration_id").Return( + &billing.GetBudgetConfigurationResponse{Budget: &billing.BudgetConfiguration{ + AccountId: getTestBudget().AccountId, + AlertConfigurations: getTestBudget().AlertConfigurations, + BudgetConfigurationId: getTestBudget().BudgetConfigurationId, + DisplayName: fmt.Sprintf("%s_update", getTestBudget().DisplayName), + Filter: getTestBudget().Filter, + }}, nil, + ) + }, + Resource: ResourceBudget(), + Update: true, + HCL: ` + display_name = "budget_name_update" + + alert_configurations { + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "840.84" + + action_configurations { + action_type = "EMAIL_NOTIFICATION" + target = "me@databricks.com" + } + } + + filter { + tags { + key = "Environment" + value { + operator = "IN" + values = ["Testing"] + } + } + + workspace_id { + operator = "IN" + values = [ + 1234567890098765 + ] + } + } + `, + AccountID: "account_id", + ID: "account_id|budget_configuration_id", + }.ApplyAndExpectData(t, map[string]any{ + "display_name": "budget_name_update", + "id": "account_id|budget_configuration_id", + }) +} + +func TestResourceBudgetDelete(t *testing.T) { + qa.ResourceFixture{ + MockAccountClientFunc: func(a *mocks.MockAccountClient) { + a.GetMockbudgetsAPI().EXPECT().DeleteByBudgetId(mock.Anything, "budget_configuration_id").Return(nil) + }, + Resource: ResourceBudget(), + AccountID: "account_id", + Delete: true, + ID: "account_id|budget_configuration_id", + }.ApplyAndExpectData(t, nil) +} diff --git a/go.mod b/go.mod index 8f5de34e8d..e01145f07e 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/databricks/terraform-provider-databricks go 1.22 require ( - github.com/databricks/databricks-sdk-go v0.46.0 + github.com/databricks/databricks-sdk-go v0.48.0 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 github.com/hashicorp/hcl v1.0.0 diff --git a/go.sum b/go.sum index 9cace277b5..dfd13d335a 100644 --- a/go.sum +++ b/go.sum @@ -26,8 +26,8 @@ github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBS github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.46.0 h1:D0TxmtSVAOsdnfzH4OGtAmcq+8TyA7Z6fA6JEYhupeY= -github.com/databricks/databricks-sdk-go v0.46.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.48.0 h1:46KtsnRo+FGhC3izUXbpL0PXBNomvsdignYDhJZlm9s= +github.com/databricks/databricks-sdk-go v0.48.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/acceptance/budget_test.go b/internal/acceptance/budget_test.go new file mode 100644 index 0000000000..44b8262de9 --- /dev/null +++ b/internal/acceptance/budget_test.go @@ -0,0 +1,63 @@ +package acceptance + +import ( + "fmt" + "testing" +) + +var ( + budgetTemplate = `resource "databricks_budget" "this" { + display_name = "tf-{var.RANDOM}" + + alert_configurations { + time_period = "MONTH" + trigger_type = "CUMULATIVE_SPENDING_EXCEEDED" + quantity_type = "LIST_PRICE_DOLLARS_USD" + quantity_threshold = "%s" + + action_configurations { + action_type = "EMAIL_NOTIFICATION" + target = "me@databricks.com" + } + } + + filter { + tags { + key = "Environment" + value { + operator = "IN" + values = ["Testing"] + } + } + + workspace_id { + operator = "IN" + values = [ + 1234567890098765 + ] + } + } + }` +) + +func TestMwsAccBudgetCreate(t *testing.T) { + loadAccountEnv(t) + if isGcp(t) { + skipf(t)("not available on GCP") + } + AccountLevel(t, Step{ + Template: fmt.Sprintf(budgetTemplate, "840"), + }) +} + +func TestMwsAccBudgetUpdate(t *testing.T) { + loadAccountEnv(t) + if isGcp(t) { + skipf(t)("not available on GCP") + } + AccountLevel(t, Step{ + Template: fmt.Sprintf(budgetTemplate, "840"), + }, Step{ + Template: fmt.Sprintf(budgetTemplate, "940"), + }) +} diff --git a/internal/acceptance/dashboard_test.go b/internal/acceptance/dashboard_test.go index 5fbf28b03a..49118c9455 100644 --- a/internal/acceptance/dashboard_test.go +++ b/internal/acceptance/dashboard_test.go @@ -58,7 +58,7 @@ resource "databricks_permissions" "dashboard_usage" { return templateString } -// Altough EmbedCredentials is an optional field, please specify its value if you want to modify it. +// Although EmbedCredentials is an optional field, please specify its value if you want to modify it. func (t *templateStruct) SetAttributes(mapper map[string]string) templateStruct { // Switch case for each attribute. If it is set in the mapper, set it in the struct if val, ok := mapper["display_name"]; ok { @@ -491,3 +491,19 @@ func TestAccDashboardTestAll(t *testing.T) { }), }) } + +func TestAccDashboardWithWorkspacePrefix(t *testing.T) { + var template templateStruct + + // Test that the dashboard can use a /Workspace prefix on the parent path and not trigger recreation. + // If this does NOT work, the test fails with an error that the non-refresh plan is non-empty. + + WorkspaceLevel(t, Step{ + Template: makeTemplate(template.SetAttributes(map[string]string{ + "display_name": fmt.Sprintf("Test Dashboard - %s", qa.RandomName()), + "warehouse_id": "{env.TEST_DEFAULT_WAREHOUSE_ID}", + "parent_path": "/Workspace/Shared/provider-test", + "serialized_dashboard": `{\"pages\":[{\"name\":\"new_name\",\"displayName\":\"New Page\"}]}`, + })), + }) +} diff --git a/internal/acceptance/data_mlflow_models_test.go b/internal/acceptance/data_mlflow_models_test.go new file mode 100644 index 0000000000..7cd13a2a02 --- /dev/null +++ b/internal/acceptance/data_mlflow_models_test.go @@ -0,0 +1,44 @@ +package acceptance + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +func TestAccDataMlflowModels(t *testing.T) { + WorkspaceLevel(t, + Step{ + Template: ` + resource "databricks_mlflow_model" "this" { + name = "model-{var.RANDOM}" + + description = "My MLflow model description" + + tags { + key = "key1" + value = "value1" + } + tags { + key = "key2" + value = "value2" + } + } + + data "databricks_mlflow_models" "this" { + depends_on = [databricks_mlflow_model.this] + }`, + Check: func(s *terraform.State) error { + r, ok := s.RootModule().Resources["data.databricks_mlflow_models.this"] + if !ok { + return fmt.Errorf("data not found in state") + } + names := r.Primary.Attributes["names.#"] + if names == "" { + return fmt.Errorf("names are empty: %v", r.Primary.Attributes) + } + return nil + }, + }) +} diff --git a/internal/acceptance/permissions_test.go b/internal/acceptance/permissions_test.go index 5d803bd451..bcd67fa8c9 100644 --- a/internal/acceptance/permissions_test.go +++ b/internal/acceptance/permissions_test.go @@ -3,222 +3,837 @@ package acceptance import ( "context" "fmt" + "regexp" + "strconv" "testing" - "github.com/databricks/databricks-sdk-go/client" - "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/terraform-provider-databricks/common" - "github.com/databricks/terraform-provider-databricks/permissions" - - "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) -func TestAccDatabricksPermissionsResourceFullLifecycle(t *testing.T) { - randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - WorkspaceLevel(t, Step{ - Template: fmt.Sprintf(` - resource "databricks_notebook" "this" { - content_base64 = base64encode("# Databricks notebook source\nprint(1)") - path = "/Beginning/%[1]s/Init" - language = "PYTHON" +// +// databricks_permissions testing support +// + +type permissionSettings struct { + // Name of the SP or group. Must be quoted for a literal string, or can be a reference to another object. + ref string + // If true, the resource will not be created + skipCreation bool + permissionLevel string +} + +type makePermissionsConfig struct { + servicePrincipal []permissionSettings + group []permissionSettings + user []permissionSettings +} + +// Not used today, so this fails linting, but we can uncomment it if needed in the future. +// func servicePrincipalPermissions(permissionLevel ...string) func(*makePermissionsConfig) { +// return func(config *makePermissionsConfig) { +// config.servicePrincipal = simpleSettings(permissionLevel...) +// } +// } + +func groupPermissions(permissionLevel ...string) func(*makePermissionsConfig) { + return func(config *makePermissionsConfig) { + config.group = simpleSettings(permissionLevel...) + } +} + +func userPermissions(permissionLevel ...string) func(*makePermissionsConfig) { + return func(config *makePermissionsConfig) { + config.user = simpleSettings(permissionLevel...) + } +} + +func allPrincipalPermissions(permissionLevel ...string) func(*makePermissionsConfig) { + return func(config *makePermissionsConfig) { + config.servicePrincipal = append(config.servicePrincipal, simpleSettings(permissionLevel...)...) + config.group = append(config.group, simpleSettings(permissionLevel...)...) + config.user = append(config.user, simpleSettings(permissionLevel...)...) + } +} + +func currentPrincipalPermission(t *testing.T, permissionLevel string) func(*makePermissionsConfig) { + settings := permissionSettings{ + permissionLevel: permissionLevel, + ref: "data.databricks_current_user.me.user_name", + skipCreation: true, + } + return func(config *makePermissionsConfig) { + if isGcp(t) { + config.user = append(config.user, settings) + } else { + config.servicePrincipal = append(config.servicePrincipal, settings) } - resource "databricks_group" "first" { - display_name = "First %[1]s" + } +} + +func currentPrincipalType(t *testing.T) string { + if isGcp(t) { + return "user" + } + return "service_principal" +} + +func customPermission(name string, permissionSettings permissionSettings) func(*makePermissionsConfig) { + return func(config *makePermissionsConfig) { + switch name { + case "service_principal": + config.servicePrincipal = append(config.servicePrincipal, permissionSettings) + case "group": + config.group = append(config.group, permissionSettings) + case "user": + config.user = append(config.user, permissionSettings) + default: + panic(fmt.Sprintf("unknown permission type: %s", name)) } - resource "databricks_permissions" "dummy" { - notebook_path = databricks_notebook.this.id + } +} + +func simpleSettings(permissionLevel ...string) []permissionSettings { + var settings []permissionSettings + for _, level := range permissionLevel { + settings = append(settings, permissionSettings{permissionLevel: level}) + } + return settings +} + +func makePermissionsTestStage(idAttribute, idValue string, permissionOptions ...func(*makePermissionsConfig)) string { + config := makePermissionsConfig{} + for _, option := range permissionOptions { + option(&config) + } + var resources string + var accessControlBlocks string + addPermissions := func(permissionSettings []permissionSettings, resourceType, resourceNameAttribute, idAttribute, accessControlAttribute string, getName func(int) string) { + for i, permission := range permissionSettings { + if !permission.skipCreation { + resources += fmt.Sprintf(` + resource "%s" "_%d" { + %s = "permissions-%s" + }`, resourceType, i, resourceNameAttribute, getName(i)) + } + var name string + if permission.ref == "" { + name = fmt.Sprintf("%s._%d.%s", resourceType, i, idAttribute) + } else { + name = permission.ref + } + accessControlBlocks += fmt.Sprintf(` access_control { - group_name = databricks_group.first.display_name - permission_level = "CAN_MANAGE" + %s = %s + permission_level = "%s" + }`, accessControlAttribute, name, permission.permissionLevel) + } + } + addPermissions(config.servicePrincipal, "databricks_service_principal", "display_name", "application_id", "service_principal_name", func(i int) string { + return fmt.Sprintf("{var.STICKY_RANDOM}-%d", i) + }) + addPermissions(config.group, "databricks_group", "display_name", "display_name", "group_name", func(i int) string { + return fmt.Sprintf("{var.STICKY_RANDOM}-%d", i) + }) + addPermissions(config.user, "databricks_user", "user_name", "user_name", "user_name", func(i int) string { + return fmt.Sprintf("{var.STICKY_RANDOM}-%d@databricks.com", i) + }) + return fmt.Sprintf(` + data databricks_current_user me {} + %s + resource "databricks_permissions" "this" { + %s = %s + %s + } + `, resources, idAttribute, idValue, accessControlBlocks) +} + +func assertContainsPermission(t *testing.T, permissions *iam.ObjectPermissions, principalType, name string, permissionLevel iam.PermissionLevel) { + for _, acl := range permissions.AccessControlList { + switch principalType { + case "user": + if acl.UserName == name { + assert.Equal(t, permissionLevel, acl.AllPermissions[0].PermissionLevel) + return + } + case "service_principal": + if acl.ServicePrincipalName == name { + assert.Equal(t, permissionLevel, acl.AllPermissions[0].PermissionLevel) + return + } + case "group": + if acl.GroupName == name { + assert.Equal(t, permissionLevel, acl.AllPermissions[0].PermissionLevel) + return } - }`, randomName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("databricks_permissions.dummy", - "object_type", "notebook"), - resourceCheck("databricks_permissions.dummy", - func(ctx context.Context, client *common.DatabricksClient, id string) error { - permissions, err := permissions.NewPermissionsAPI(ctx, client).Read(id) - if err != nil { - return err - } - assert.GreaterOrEqual(t, len(permissions.AccessControlList), 1) - return nil - }), - ), - }, Step{ - Template: fmt.Sprintf(` - resource "databricks_notebook" "this" { - content_base64 = base64encode("# Databricks notebook source\nprint(1)") - path = "/Beginning/%[1]s/Init" - language = "PYTHON" } - resource "databricks_group" "first" { - display_name = "First %[1]s" + } + assert.Fail(t, fmt.Sprintf("permission not found for %s %s", principalType, name)) +} + +// +// databricks_permissions acceptance tests +// + +func TestAccPermissions_ClusterPolicy(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + policyTemplate := ` + resource "databricks_cluster_policy" "this" { + name = "{var.STICKY_RANDOM}" + definition = jsonencode({ + "spark_conf.spark.hadoop.javax.jdo.option.ConnectionURL": { + "type": "fixed", + "value": "jdbc:sqlserver://" + } + }) + }` + + WorkspaceLevel(t, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_policy_id", "databricks_cluster_policy.this.id", groupPermissions("CAN_USE")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_policy_id", "databricks_cluster_policy.this.id", currentPrincipalPermission(t, "CAN_USE"), allPrincipalPermissions("CAN_USE")), + }) +} + +func TestAccPermissions_InstancePool(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + policyTemplate := ` + data "databricks_node_type" "smallest" { + local_disk = true + } + + resource "databricks_instance_pool" "this" { + instance_pool_name = "{var.STICKY_RANDOM}" + min_idle_instances = 0 + max_capacity = 1 + node_type_id = data.databricks_node_type.smallest.id + idle_instance_autotermination_minutes = 10 + }` + + WorkspaceLevel(t, Step{ + Template: policyTemplate + makePermissionsTestStage("instance_pool_id", "databricks_instance_pool.this.id", groupPermissions("CAN_ATTACH_TO")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("instance_pool_id", "databricks_instance_pool.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_ATTACH_TO", "CAN_MANAGE")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("instance_pool_id", "databricks_instance_pool.this.id", currentPrincipalPermission(t, "CAN_ATTACH_TO")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for instance-pool, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Cluster(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + policyTemplate := ` + +data "databricks_spark_version" "latest" { +} + + resource "databricks_cluster" "this" { + cluster_name = "singlenode-{var.RANDOM}" + spark_version = data.databricks_spark_version.latest.id + instance_pool_id = "{env.TEST_INSTANCE_POOL_ID}" + num_workers = 0 + autotermination_minutes = 10 + spark_conf = { + "spark.databricks.cluster.profile" = "singleNode" + "spark.master" = "local[*]" } - resource "databricks_group" "second" { - display_name = "Second %[1]s" + custom_tags = { + "ResourceClass" = "SingleNode" } - resource "databricks_permissions" "dummy" { - notebook_path = databricks_notebook.this.id - access_control { - group_name = databricks_group.first.display_name - permission_level = "CAN_MANAGE" - } - access_control { - group_name = databricks_group.second.display_name - permission_level = "CAN_RUN" + }` + + WorkspaceLevel(t, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_id", "databricks_cluster.this.id", groupPermissions("CAN_ATTACH_TO")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_id", "databricks_cluster.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_ATTACH_TO", "CAN_RESTART", "CAN_MANAGE")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("cluster_id", "databricks_cluster.this.id", currentPrincipalPermission(t, "CAN_ATTACH_TO")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for cluster, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Job(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` + resource "databricks_job" "this" { + name = "{var.STICKY_RANDOM}" + }` + WorkspaceLevel(t, Step{ + Template: template + makePermissionsTestStage("job_id", "databricks_job.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: template + makePermissionsTestStage("job_id", "databricks_job.this.id", currentPrincipalPermission(t, "IS_OWNER"), allPrincipalPermissions("CAN_VIEW", "CAN_MANAGE_RUN", "CAN_MANAGE")), + }, Step{ + Template: template + makePermissionsTestStage("job_id", "databricks_job.this.id", currentPrincipalPermission(t, "CAN_MANAGE_RUN")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for job, allowed levels: CAN_MANAGE, IS_OWNER"), + }, Step{ + Template: template + makePermissionsTestStage("job_id", "databricks_job.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), userPermissions("IS_OWNER")), + }, Step{ + Template: template, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + jobId := s.RootModule().Resources["databricks_job.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "jobs", jobId) + assert.NoError(t, err) + idInt, err := strconv.Atoi(jobId) + assert.NoError(t, err) + job, err := w.Jobs.GetByJobId(context.Background(), int64(idInt)) + assert.NoError(t, err) + assertContainsPermission(t, permissions, currentPrincipalType(t), job.CreatorUserName, iam.PermissionLevelIsOwner) + return nil + }, + }) +} + +func TestAccPermissions_Pipeline(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + policyTemplate := ` + + locals { + name = "{var.STICKY_RANDOM}" + } + + resource "databricks_pipeline" "this" { + name = "${local.name}" + storage = "/test/${local.name}" + + library { + notebook { + path = databricks_notebook.this.path } - }`, randomName), - Check: resourceCheck("databricks_permissions.dummy", - func(ctx context.Context, client *common.DatabricksClient, id string) error { - permissions, err := permissions.NewPermissionsAPI(ctx, client).Read(id) - if err != nil { - return err - } - assert.GreaterOrEqual(t, len(permissions.AccessControlList), 2) - return nil - }), + } + continuous = false + }` + dltNotebookResource + + WorkspaceLevel(t, Step{ + Template: policyTemplate + makePermissionsTestStage("pipeline_id", "databricks_pipeline.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("pipeline_id", "databricks_pipeline.this.id", currentPrincipalPermission(t, "IS_OWNER"), allPrincipalPermissions("CAN_VIEW", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("pipeline_id", "databricks_pipeline.this.id", currentPrincipalPermission(t, "CAN_RUN")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for pipelines, allowed levels: CAN_MANAGE, IS_OWNER"), + }, Step{ + Template: policyTemplate + makePermissionsTestStage("pipeline_id", "databricks_pipeline.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), userPermissions("IS_OWNER"), groupPermissions("CAN_VIEW", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: policyTemplate, + Check: resourceCheck("databricks_pipeline.this", func(ctx context.Context, c *common.DatabricksClient, id string) error { + w, err := c.WorkspaceClient() + assert.NoError(t, err) + pipeline, err := w.Pipelines.GetByPipelineId(context.Background(), id) + assert.NoError(t, err) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "pipelines", id) + assert.NoError(t, err) + assertContainsPermission(t, permissions, currentPrincipalType(t), pipeline.CreatorUserName, iam.PermissionLevelIsOwner) + return nil + }), + }) +} + +func TestAccPermissions_Notebook_Path(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + notebookTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_notebook" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" + }` + WorkspaceLevel(t, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_path", "databricks_notebook.this.id", groupPermissions("CAN_RUN")), + }, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_path", "databricks_notebook.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: notebookTemplate + makePermissionsTestStage("notebook_path", "databricks_notebook.this.id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_path", "databricks_notebook.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for notebook, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Notebook_Id(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + notebookTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_notebook" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" + }` + WorkspaceLevel(t, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_id", "databricks_notebook.this.object_id", groupPermissions("CAN_RUN")), + }, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_id", "databricks_notebook.this.object_id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: notebookTemplate + makePermissionsTestStage("notebook_id", "databricks_notebook.this.object_id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: notebookTemplate + makePermissionsTestStage("notebook_id", "databricks_notebook.this.object_id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for notebook, allowed levels: CAN_MANAGE"), }) } -func TestAccDatabricksReposPermissionsResourceFullLifecycle(t *testing.T) { - randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) +func TestAccPermissions_Directory_Path(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + directoryTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + }` WorkspaceLevel(t, Step{ - Template: fmt.Sprintf(` + Template: directoryTemplate + makePermissionsTestStage("directory_path", "databricks_directory.this.id", groupPermissions("CAN_RUN")), + }, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_path", "databricks_directory.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: directoryTemplate + makePermissionsTestStage("directory_path", "databricks_directory.this.id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_path", "databricks_directory.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for directory, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Directory_Id(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + directoryTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + }` + WorkspaceLevel(t, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_id", "databricks_directory.this.object_id", groupPermissions("CAN_RUN")), + }, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_id", "databricks_directory.this.object_id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: directoryTemplate + makePermissionsTestStage("directory_id", "databricks_directory.this.object_id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: directoryTemplate + makePermissionsTestStage("directory_id", "databricks_directory.this.object_id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for directory, allowed levels: CAN_MANAGE"), + }) +} + +// This test exercises both by ID and by path permissions for the root directory. Testing them +// concurrently would result in a race condition. +func TestAccPermissions_Directory_RootDirectoryCorrectlyHandlesAdminUsers(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + expectedAclAfterDeletion := []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: iam.PermissionLevelCanManage, + ForceSendFields: []string{"Inherited", "PermissionLevel"}, + }, + }, + ForceSendFields: []string{"GroupName"}, + }, + } + WorkspaceLevel(t, Step{ + Template: makePermissionsTestStage("directory_id", "\"0\"", groupPermissions("CAN_RUN")), + }, Step{ + Template: `data databricks_current_user me {}`, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "directories", "0") + assert.NoError(t, err) + assert.Equal(t, expectedAclAfterDeletion, permissions.AccessControlList) + return nil + }, + }, Step{ + Template: makePermissionsTestStage("directory_path", "\"/\"", userPermissions("CAN_RUN")), + }, Step{ + Template: `data databricks_current_user me {}`, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "directories", "0") + assert.NoError(t, err) + assert.Equal(t, expectedAclAfterDeletion, permissions.AccessControlList) + return nil + }, + }) +} + +func TestAccPermissions_WorkspaceFile_Path(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + workspaceFile := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_workspace_file" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" + }` + WorkspaceLevel(t, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", groupPermissions("CAN_RUN")), + }, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_path", "databricks_workspace_file.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for file, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_WorkspaceFile_Id(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + workspaceFile := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_workspace_file" "this" { + source = "{var.CWD}/../../storage/testdata/tf-test-python.py" + path = "${databricks_directory.this.path}/test_notebook" + }` + WorkspaceLevel(t, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", groupPermissions("CAN_RUN")), + }, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + // The current user can be removed from permissions since they inherit permissions from the directory they created. + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", allPrincipalPermissions("CAN_RUN", "CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: workspaceFile + makePermissionsTestStage("workspace_file_id", "databricks_workspace_file.this.object_id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for file, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Repo_Id(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` resource "databricks_repo" "this" { url = "https://github.com/databrickslabs/tempo.git" - path = "/Repos/terraform-tests/tempo-%[1]s" + path = "/Repos/terraform-tests/tempo-{var.STICKY_RANDOM}" } - resource "databricks_group" "first" { - display_name = "First %[1]s" - } - resource "databricks_group" "second" { - display_name = "Second %[1]s" + ` + WorkspaceLevel(t, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", groupPermissions("CAN_MANAGE", "CAN_READ")), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("databricks_permissions.this", "object_type", "repo"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + repoId := s.RootModule().Resources["databricks_repo.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "repos", repoId) + assert.NoError(t, err) + group1Name := s.RootModule().Resources["databricks_group._0"].Primary.Attributes["display_name"] + assertContainsPermission(t, permissions, "group", group1Name, iam.PermissionLevelCanManage) + group2Name := s.RootModule().Resources["databricks_group._1"].Primary.Attributes["display_name"] + assertContainsPermission(t, permissions, "group", group2Name, iam.PermissionLevelCanRead) + return nil + }, + ), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_READ", "CAN_MANAGE", "CAN_RUN", "CAN_EDIT")), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", allPrincipalPermissions("CAN_READ", "CAN_MANAGE", "CAN_RUN", "CAN_EDIT")), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for repo, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_Repo_Path(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + template := ` + resource "databricks_repo" "this" { + url = "https://github.com/databrickslabs/tempo.git" + path = "/Repos/terraform-tests/tempo-{var.STICKY_RANDOM}" } - resource "databricks_permissions" "dummy" { - repo_path = databricks_repo.this.path - access_control { - group_name = databricks_group.first.display_name - permission_level = "CAN_MANAGE" - } - access_control { - group_name = databricks_group.second.display_name - permission_level = "CAN_RUN" - } - }`, randomName), + ` + WorkspaceLevel(t, Step{ + Template: template + makePermissionsTestStage("repo_path", "databricks_repo.this.path", groupPermissions("CAN_MANAGE", "CAN_RUN")), Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("databricks_permissions.dummy", - "object_type", "repo"), - resourceCheck("databricks_permissions.dummy", - func(ctx context.Context, client *common.DatabricksClient, id string) error { - permissions, err := permissions.NewPermissionsAPI(ctx, client).Read(id) - if err != nil { - return err - } - assert.GreaterOrEqual(t, len(permissions.AccessControlList), 2) - return nil - }), + resource.TestCheckResourceAttr("databricks_permissions.this", "object_type", "repo"), + func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + repoId := s.RootModule().Resources["databricks_repo.this"].Primary.ID + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "repos", repoId) + assert.NoError(t, err) + group1Name := s.RootModule().Resources["databricks_group._0"].Primary.Attributes["display_name"] + assertContainsPermission(t, permissions, "group", group1Name, iam.PermissionLevelCanManage) + group2Name := s.RootModule().Resources["databricks_group._1"].Primary.Attributes["display_name"] + assertContainsPermission(t, permissions, "group", group2Name, iam.PermissionLevelCanRun) + return nil + }, ), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_READ", "CAN_MANAGE", "CAN_RUN", "CAN_EDIT")), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", allPrincipalPermissions("CAN_READ", "CAN_MANAGE", "CAN_RUN", "CAN_EDIT")), + }, Step{ + Template: template + makePermissionsTestStage("repo_id", "databricks_repo.this.id", currentPrincipalPermission(t, "CAN_READ")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for repo, allowed levels: CAN_MANAGE"), }) } -func TestAccDatabricksPermissionsForSqlWarehouses(t *testing.T) { - // Random string to annotate newly created groups - randomName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) +func TestAccPermissions_Authorization_Passwords(t *testing.T) { + skipf(t)("ACLs for passwords are disabled on testing workspaces") + loadDebugEnvIfRunsFromIDE(t, "workspace") + WorkspaceLevel(t, Step{ + Template: makePermissionsTestStage("authorization", "\"passwords\"", groupPermissions("CAN_USE")), + }, Step{ + Template: makePermissionsTestStage("authorization", "\"passwords\"", customPermission("group", permissionSettings{ref: `"admins"`, skipCreation: true, permissionLevel: "CAN_USE"})), + }) +} - // Create a client to query the permissions API - c, err := client.New(&config.Config{}) - require.NoError(t, err) - permissionsClient := permissions.NewPermissionsAPI(context.Background(), &common.DatabricksClient{DatabricksClient: c}) +func TestAccPermissions_Authorization_Tokens(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + WorkspaceLevel(t, Step{ + Template: makePermissionsTestStage("authorization", "\"tokens\"", groupPermissions("CAN_USE")), + }, Step{ + Template: makePermissionsTestStage("authorization", "\"tokens\"", customPermission("group", permissionSettings{ref: `"users"`, skipCreation: true, permissionLevel: "CAN_USE"})), + }, Step{ + // Template needs to be non-empty + Template: "data databricks_current_user me {}", + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "authorization", "tokens") + assert.NoError(t, err) + assert.Len(t, permissions.AccessControlList, 1) + assert.Equal(t, iam.AccessControlResponse{ + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: iam.PermissionLevelCanManage, + ForceSendFields: []string{"Inherited", "PermissionLevel"}, + }, + }, + ForceSendFields: []string{"GroupName"}, + }, permissions.AccessControlList[0]) + return nil + }, + }) +} + +func TestAccPermissions_SqlWarehouses(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + sqlWarehouseTemplate := ` + resource "databricks_sql_endpoint" "this" { + name = "{var.STICKY_RANDOM}" + cluster_size = "2X-Small" + }` + WorkspaceLevel(t, Step{ + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", groupPermissions("CAN_USE")), + }, Step{ + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", currentPrincipalPermission(t, "IS_OWNER"), allPrincipalPermissions("CAN_USE", "CAN_MANAGE", "CAN_MONITOR")), + // Note: ideally we could test making a new user/SP the owner of the warehouse, but the new user + // needs cluster creation permissions, and the SCIM API doesn't provide get-after-put consistency, + // so this would introduce flakiness. + // }, Step{ + // Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), servicePrincipalPermissions("IS_OWNER")) + ` + // resource databricks_entitlements "this" { + // application_id = databricks_service_principal._0.application_id + // allow_cluster_create = true + // } + // `, + }, Step{ + Template: sqlWarehouseTemplate + makePermissionsTestStage("sql_endpoint_id", "databricks_sql_endpoint.this.id", currentPrincipalPermission(t, "CAN_USE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for warehouses, allowed levels: CAN_MANAGE, IS_OWNER"), + }, Step{ + Template: sqlWarehouseTemplate, + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + id := s.RootModule().Resources["databricks_sql_endpoint.this"].Primary.ID + warehouse, err := w.Warehouses.GetById(context.Background(), id) + assert.NoError(t, err) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "warehouses", id) + assert.NoError(t, err) + assertContainsPermission(t, permissions, currentPrincipalType(t), warehouse.CreatorName, iam.PermissionLevelIsOwner) + return nil + }, + }) +} - // Validates export attribute "object_type" for the permissions resource - // is set to warehouses - checkObjectType := resource.TestCheckResourceAttr("databricks_permissions.this", - "object_type", "warehouses") +func TestAccPermissions_SqlDashboard(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + dashboardTemplate := ` + resource "databricks_sql_dashboard" "this" { + name = "{var.STICKY_RANDOM}" + }` + WorkspaceLevel(t, Step{ + Template: dashboardTemplate + makePermissionsTestStage("sql_dashboard_id", "databricks_sql_dashboard.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: dashboardTemplate + makePermissionsTestStage("sql_dashboard_id", "databricks_sql_dashboard.this.id", currentPrincipalPermission(t, "CAN_VIEW")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for dashboard, allowed levels: CAN_MANAGE"), + }, Step{ + Template: dashboardTemplate + makePermissionsTestStage("sql_dashboard_id", "databricks_sql_dashboard.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), allPrincipalPermissions("CAN_VIEW", "CAN_READ", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }) +} - // Asserts value of a permission level for a group - assertPermissionLevel := func(t *testing.T, permissionId, groupName, permissionLevel string) { - // Query permissions on warehouse - warehousePermissions, err := permissionsClient.Read(permissionId) - require.NoError(t, err) +func TestAccPermissions_SqlAlert(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + alertTemplate := ` + resource "databricks_sql_query" "this" { + name = "{var.STICKY_RANDOM}-query" + query = "SELECT 1 AS p1, 2 as p2" + data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" + } + resource "databricks_sql_alert" "this" { + name = "{var.STICKY_RANDOM}-alert" + query_id = databricks_sql_query.this.id + options { + column = "p1" + op = ">=" + value = "3" + muted = false + } + }` + WorkspaceLevel(t, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_sql_alert.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_sql_alert.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: alertTemplate + makePermissionsTestStage("sql_alert_id", "databricks_sql_alert.this.id", currentPrincipalPermission(t, "CAN_VIEW"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for alert, allowed levels: CAN_MANAGE"), + }) +} - // Assert expected permission level is present - assert.Contains(t, warehousePermissions.AccessControlList, permissions.AccessControl{ - GroupName: groupName, - AllPermissions: []permissions.Permission{ - { - PermissionLevel: permissionLevel, - }, - }, - }) - } +func TestAccPermissions_SqlQuery(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + queryTemplate := ` + resource "databricks_sql_query" "this" { + name = "{var.STICKY_RANDOM}-query" + query = "SELECT 1 AS p1, 2 as p2" + data_source_id = "{env.TEST_DEFAULT_WAREHOUSE_DATASOURCE_ID}" + }` + WorkspaceLevel(t, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_sql_query.this.id", groupPermissions("CAN_VIEW")), + }, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_sql_query.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: queryTemplate + makePermissionsTestStage("sql_query_id", "databricks_sql_query.this.id", currentPrincipalPermission(t, "CAN_VIEW"), groupPermissions("CAN_VIEW", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for query, allowed levels: CAN_MANAGE"), + }) +} - // Get permission ID from the terraform state - getPermissionId := func(s *terraform.State) string { - resourcePermission, ok := s.RootModule().Resources["databricks_permissions.this"] - require.True(t, ok, "could not find permissions resource: databricks_permissions.this") - return resourcePermission.Primary.ID - } +func TestAccPermissions_Dashboard(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + dashboardTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" + } + resource "databricks_dashboard" "dashboard" { + display_name = "TF New Dashboard" + warehouse_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" + parent_path = databricks_directory.this.path + serialized_dashboard = "{\"pages\":[{\"name\":\"b532570b\",\"displayName\":\"New Page\"}]}" + } + ` + WorkspaceLevel(t, Step{ + Template: dashboardTemplate + makePermissionsTestStage("dashboard_id", "databricks_dashboard.dashboard.id", groupPermissions("CAN_READ")), + }, Step{ + Template: dashboardTemplate + makePermissionsTestStage("dashboard_id", "databricks_dashboard.dashboard.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + }, Step{ + Template: dashboardTemplate + makePermissionsTestStage("dashboard_id", "databricks_dashboard.dashboard.id", currentPrincipalPermission(t, "CAN_READ"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_RUN", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for dashboard, allowed levels: CAN_MANAGE"), + }) +} - // Configuration for step 1 of the test. Create a databricks_permissions - // resources, assigning a group CAN_MANAGE permission to the warehouse. - config1 := fmt.Sprintf(` - resource "databricks_group" "one" { - display_name = "test-warehouse-permission-one-%s" - } - resource "databricks_permissions" "this" { - sql_endpoint_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" - access_control { - group_name = databricks_group.one.display_name - permission_level = "CAN_MANAGE" +func TestAccPermissions_Experiment(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + experimentTemplate := ` + resource "databricks_directory" "this" { + path = "/permissions_test/{var.STICKY_RANDOM}" } - }`, randomName) + resource "databricks_mlflow_experiment" "this" { + name = "${databricks_directory.this.path}/experiment" + }` + WorkspaceLevel(t, Step{ + Template: experimentTemplate + makePermissionsTestStage("experiment_id", "databricks_mlflow_experiment.this.id", groupPermissions("CAN_READ")), + }, Step{ + Template: experimentTemplate + makePermissionsTestStage("experiment_id", "databricks_mlflow_experiment.this.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + }, Step{ + Template: experimentTemplate + makePermissionsTestStage("experiment_id", "databricks_mlflow_experiment.this.id", currentPrincipalPermission(t, "CAN_READ"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for mlflowExperiment, allowed levels: CAN_MANAGE"), + }) +} - // Configuration for step 2 of the test. Create another group and update - // permissions to CAN_USE for the second group - config2 := fmt.Sprintf(` - resource "databricks_group" "one" { - display_name = "test-warehouse-permission-one-%[1]s" - } - resource "databricks_group" "two" { - display_name = "test-warehouse-permission-two-%[1]s" +func TestAccPermissions_RegisteredModel(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + modelTemplate := ` + resource "databricks_mlflow_model" "m1" { + name = "tf-{var.STICKY_RANDOM}" + description = "tf-{var.STICKY_RANDOM} description" } - resource "databricks_permissions" "this" { - sql_endpoint_id = "{env.TEST_DEFAULT_WAREHOUSE_ID}" - access_control { - group_name = databricks_group.one.display_name - permission_level = "CAN_MANAGE" - } - access_control { - group_name = databricks_group.two.display_name - permission_level = "CAN_USE" - } - }`, randomName) - - WorkspaceLevel(t, - Step{ - Template: config1, - Check: resource.ComposeTestCheckFunc( - checkObjectType, - func(s *terraform.State) error { - id := getPermissionId(s) - assertPermissionLevel(t, id, "test-warehouse-permission-one-"+randomName, "CAN_MANAGE") - return nil + ` + WorkspaceLevel(t, Step{ + Template: modelTemplate + makePermissionsTestStage("registered_model_id", "databricks_mlflow_model.m1.registered_model_id", groupPermissions("CAN_READ")), + }, Step{ + Template: modelTemplate + makePermissionsTestStage("registered_model_id", "databricks_mlflow_model.m1.registered_model_id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE")), + }, Step{ + Template: modelTemplate + makePermissionsTestStage("registered_model_id", "databricks_mlflow_model.m1.registered_model_id", currentPrincipalPermission(t, "CAN_READ"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for registered-model, allowed levels: CAN_MANAGE"), + }) +} + +func TestAccPermissions_RegisteredModel_Root(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + WorkspaceLevel(t, Step{ + Template: makePermissionsTestStage("registered_model_id", "\"root\"", groupPermissions("CAN_READ")), + }, Step{ + Template: makePermissionsTestStage("registered_model_id", "\"root\"", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE")), + }, Step{ + Template: makePermissionsTestStage("registered_model_id", "\"root\"", currentPrincipalPermission(t, "CAN_READ"), groupPermissions("CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for registered-model, allowed levels: CAN_MANAGE"), + }, Step{ + Template: "data databricks_current_user me {}", + Check: func(s *terraform.State) error { + w := databricks.Must(databricks.NewWorkspaceClient()) + permissions, err := w.Permissions.GetByRequestObjectTypeAndRequestObjectId(context.Background(), "registered-models", "root") + assert.NoError(t, err) + assert.Len(t, permissions.AccessControlList, 1) + assert.Equal(t, iam.AccessControlResponse{ + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: iam.PermissionLevelCanManage, + ForceSendFields: []string{"Inherited", "PermissionLevel"}, + }, }, - ), + ForceSendFields: []string{"GroupName"}, + }, permissions.AccessControlList[0]) + return nil }, - Step{ - Template: config2, - Check: func(s *terraform.State) error { - id := getPermissionId(s) - assertPermissionLevel(t, id, "test-warehouse-permission-one-"+randomName, "CAN_MANAGE") - assertPermissionLevel(t, id, "test-warehouse-permission-two-"+randomName, "CAN_USE") - return nil - }, - }, - ) + }) +} + +func TestAccPermissions_ServingEndpoint(t *testing.T) { + loadDebugEnvIfRunsFromIDE(t, "workspace") + if isGcp(t) { + skipf(t)("Serving endpoints are not supported on GCP") + } + endpointTemplate := ` + resource "databricks_model_serving" "endpoint" { + name = "{var.STICKY_RANDOM}" + config { + served_models { + name = "prod_model" + model_name = "experiment-fixture-model" + model_version = "1" + workload_size = "Small" + scale_to_zero_enabled = true + } + traffic_config { + routes { + served_model_name = "prod_model" + traffic_percentage = 100 + } + } + } + }` + WorkspaceLevel(t, Step{ + Template: endpointTemplate + makePermissionsTestStage("serving_endpoint_id", "databricks_model_serving.endpoint.serving_endpoint_id", groupPermissions("CAN_VIEW")), + // Updating a serving endpoint seems to be flaky, so we'll only test that we can't remove management permissions for the current user. + // }, Step{ + // Template: endpointTemplate + makePermissionsTestStage("serving_endpoint_id", "databricks_model_serving.endpoint.id", currentPrincipalPermission(t, "CAN_MANAGE"), groupPermissions("CAN_VIEW", "CAN_QUERY", "CAN_MANAGE")), + }, Step{ + Template: endpointTemplate + makePermissionsTestStage("serving_endpoint_id", "databricks_model_serving.endpoint.serving_endpoint_id", currentPrincipalPermission(t, "CAN_VIEW"), groupPermissions("CAN_VIEW", "CAN_QUERY", "CAN_MANAGE")), + ExpectError: regexp.MustCompile("cannot remove management permissions for the current user for serving-endpoint, allowed levels: CAN_MANAGE"), + }) } diff --git a/internal/acceptance/sql_table_test.go b/internal/acceptance/sql_table_test.go index 0f0a87dec9..6ba5a83714 100644 --- a/internal/acceptance/sql_table_test.go +++ b/internal/acceptance/sql_table_test.go @@ -72,6 +72,70 @@ func TestUcAccResourceSqlTable_Managed(t *testing.T) { }) } +func TestUcAccResourceSqlTableWithIdentityColumn_Managed(t *testing.T) { + if os.Getenv("GOOGLE_CREDENTIALS") != "" { + skipf(t)("databricks_sql_table resource not available on GCP") + } + UnityWorkspaceLevel(t, Step{ + Template: ` + resource "databricks_schema" "this" { + name = "{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_sql_table" "this" { + name = "bar" + catalog_name = "main" + schema_name = databricks_schema.this.name + table_type = "MANAGED" + properties = { + this = "that" + something = "else" + } + + column { + name = "id" + type = "bigint" + identity = "default" + } + column { + name = "name" + type = "string" + } + comment = "this table is managed by terraform" + owner = "account users" + }`, + }, Step{ + Template: ` + resource "databricks_schema" "this" { + name = "{var.STICKY_RANDOM}" + catalog_name = "main" + } + + resource "databricks_sql_table" "this" { + name = "bar" + catalog_name = "main" + schema_name = databricks_schema.this.name + table_type = "MANAGED" + properties = { + that = "this" + something = "else2" + } + + column { + name = "id" + type = "bigint" + identity = "default" + } + column { + name = "name" + type = "string" + } + comment = "this table is managed by terraform..." + }`, + }) +} + func TestUcAccResourceSqlTable_External(t *testing.T) { UnityWorkspaceLevel(t, Step{ Template: ` diff --git a/internal/providers/sdkv2/sdkv2.go b/internal/providers/sdkv2/sdkv2.go index b9ee686121..65ab1f4973 100644 --- a/internal/providers/sdkv2/sdkv2.go +++ b/internal/providers/sdkv2/sdkv2.go @@ -29,6 +29,7 @@ import ( "github.com/databricks/terraform-provider-databricks/commands" "github.com/databricks/terraform-provider-databricks/common" "github.com/databricks/terraform-provider-databricks/dashboards" + "github.com/databricks/terraform-provider-databricks/finops" providercommon "github.com/databricks/terraform-provider-databricks/internal/providers/common" "github.com/databricks/terraform-provider-databricks/jobs" "github.com/databricks/terraform-provider-databricks/logger" @@ -99,6 +100,7 @@ func DatabricksProvider() *schema.Provider { "databricks_metastores": catalog.DataSourceMetastores().ToResource(), "databricks_mlflow_experiment": mlflow.DataSourceExperiment().ToResource(), "databricks_mlflow_model": mlflow.DataSourceModel().ToResource(), + "databricks_mlflow_models": mlflow.DataSourceModels().ToResource(), "databricks_mws_credentials": mws.DataSourceMwsCredentials().ToResource(), "databricks_mws_workspaces": mws.DataSourceMwsWorkspaces().ToResource(), "databricks_node_type": clusters.DataSourceNodeType().ToResource(), @@ -131,6 +133,7 @@ func DatabricksProvider() *schema.Provider { "databricks_azure_adls_gen1_mount": storage.ResourceAzureAdlsGen1Mount().ToResource(), "databricks_azure_adls_gen2_mount": storage.ResourceAzureAdlsGen2Mount().ToResource(), "databricks_azure_blob_mount": storage.ResourceAzureBlobMount().ToResource(), + "databricks_budget": finops.ResourceBudget().ToResource(), "databricks_catalog": catalog.ResourceCatalog().ToResource(), "databricks_catalog_workspace_binding": catalog.ResourceCatalogWorkspaceBinding().ToResource(), "databricks_connection": catalog.ResourceConnection().ToResource(), diff --git a/internal/service/apps_tf/model.go b/internal/service/apps_tf/model.go index 2ae21cc7d9..41a6990157 100755 --- a/internal/service/apps_tf/model.go +++ b/internal/service/apps_tf/model.go @@ -17,10 +17,18 @@ import ( type App struct { // The active deployment of the app. ActiveDeployment *AppDeployment `tfsdk:"active_deployment" tf:"optional"` + + AppStatus *ApplicationStatus `tfsdk:"app_status" tf:"optional"` + + ComputeStatus *ComputeStatus `tfsdk:"compute_status" tf:"optional"` // The creation time of the app. Formatted timestamp in ISO 6801. CreateTime types.String `tfsdk:"create_time" tf:"optional"` // The email of the user that created the app. Creator types.String `tfsdk:"creator" tf:"optional"` + // The default workspace file system path of the source code from which app + // deployment are created. This field tracks the workspace source code path + // of the last active deployment. + DefaultSourceCodePath types.String `tfsdk:"default_source_code_path" tf:"optional"` // The description of the app. Description types.String `tfsdk:"description" tf:"optional"` // The name of the app. The name must contain only lowercase alphanumeric @@ -28,12 +36,12 @@ type App struct { Name types.String `tfsdk:"name" tf:""` // The pending deployment of the app. PendingDeployment *AppDeployment `tfsdk:"pending_deployment" tf:"optional"` + // Resources for the app. + Resources []AppResource `tfsdk:"resources" tf:"optional"` ServicePrincipalId types.Int64 `tfsdk:"service_principal_id" tf:"optional"` ServicePrincipalName types.String `tfsdk:"service_principal_name" tf:"optional"` - - Status *AppStatus `tfsdk:"status" tf:"optional"` // The update time of the app. Formatted timestamp in ISO 6801. UpdateTime types.String `tfsdk:"update_time" tf:"optional"` // The email of the user that last updated the app. @@ -84,7 +92,7 @@ type AppDeployment struct { // the app in the workspace during deployment creation, whereas the latter // provides a system generated stable snapshotted source code path used by // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:""` + SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` // Status and status message of the deployment Status *AppDeploymentStatus `tfsdk:"status" tf:"optional"` // The update time of the deployment. Formatted timestamp in ISO 6801. @@ -132,16 +140,74 @@ type AppPermissionsRequest struct { AppName types.String `tfsdk:"-"` } -type AppStatus struct { - // Message corresponding with the app state. +type AppResource struct { + // Description of the App Resource. + Description types.String `tfsdk:"description" tf:"optional"` + + Job *AppResourceJob `tfsdk:"job" tf:"optional"` + // Name of the App Resource. + Name types.String `tfsdk:"name" tf:""` + + Secret *AppResourceSecret `tfsdk:"secret" tf:"optional"` + + ServingEndpoint *AppResourceServingEndpoint `tfsdk:"serving_endpoint" tf:"optional"` + + SqlWarehouse *AppResourceSqlWarehouse `tfsdk:"sql_warehouse" tf:"optional"` +} + +type AppResourceJob struct { + // Id of the job to grant permission on. + Id types.String `tfsdk:"id" tf:""` + // Permissions to grant on the Job. Supported permissions are: "CAN_MANAGE", + // "IS_OWNER", "CAN_MANAGE_RUN", "CAN_VIEW". + Permission types.String `tfsdk:"permission" tf:""` +} + +type AppResourceSecret struct { + // Key of the secret to grant permission on. + Key types.String `tfsdk:"key" tf:""` + // Permission to grant on the secret scope. For secrets, only one permission + // is allowed. Permission must be one of: "READ", "WRITE", "MANAGE". + Permission types.String `tfsdk:"permission" tf:""` + // Scope of the secret to grant permission on. + Scope types.String `tfsdk:"scope" tf:""` +} + +type AppResourceServingEndpoint struct { + // Name of the serving endpoint to grant permission on. + Name types.String `tfsdk:"name" tf:""` + // Permission to grant on the serving endpoint. Supported permissions are: + // "CAN_MANAGE", "CAN_QUERY", "CAN_VIEW". + Permission types.String `tfsdk:"permission" tf:""` +} + +type AppResourceSqlWarehouse struct { + // Id of the SQL warehouse to grant permission on. + Id types.String `tfsdk:"id" tf:""` + // Permission to grant on the SQL warehouse. Supported permissions are: + // "CAN_MANAGE", "CAN_USE", "IS_OWNER". + Permission types.String `tfsdk:"permission" tf:""` +} + +type ApplicationStatus struct { + // Application status message + Message types.String `tfsdk:"message" tf:"optional"` + // State of the application. + State types.String `tfsdk:"state" tf:"optional"` +} + +type ComputeStatus struct { + // Compute status message Message types.String `tfsdk:"message" tf:"optional"` - // State of the app. + // State of the app compute. State types.String `tfsdk:"state" tf:"optional"` } type CreateAppDeploymentRequest struct { // The name of the app. AppName types.String `tfsdk:"-"` + // The unique id of the deployment. + DeploymentId types.String `tfsdk:"deployment_id" tf:"optional"` // The mode of which the deployment will manage the source code. Mode types.String `tfsdk:"mode" tf:"optional"` // The workspace file system path of the source code used to create the app @@ -151,7 +217,7 @@ type CreateAppDeploymentRequest struct { // the app in the workspace during deployment creation, whereas the latter // provides a system generated stable snapshotted source code path used by // the deployment. - SourceCodePath types.String `tfsdk:"source_code_path" tf:""` + SourceCodePath types.String `tfsdk:"source_code_path" tf:"optional"` } type CreateAppRequest struct { @@ -160,6 +226,8 @@ type CreateAppRequest struct { // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name types.String `tfsdk:"name" tf:""` + // Resources for the app. + Resources []AppResource `tfsdk:"resources" tf:"optional"` } // Delete an app @@ -168,9 +236,6 @@ type DeleteAppRequest struct { Name types.String `tfsdk:"-"` } -type DeleteResponse struct { -} - // Get an app deployment type GetAppDeploymentRequest struct { // The name of the app. @@ -245,13 +310,12 @@ type StopAppRequest struct { Name types.String `tfsdk:"-"` } -type StopAppResponse struct { -} - type UpdateAppRequest struct { // The description of the app. Description types.String `tfsdk:"description" tf:"optional"` // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name types.String `tfsdk:"name" tf:""` + // Resources for the app. + Resources []AppResource `tfsdk:"resources" tf:"optional"` } diff --git a/internal/service/catalog_tf/model.go b/internal/service/catalog_tf/model.go index e84b479703..358885d57d 100755 --- a/internal/service/catalog_tf/model.go +++ b/internal/service/catalog_tf/model.go @@ -88,6 +88,21 @@ type ArtifactMatcher struct { type AssignResponse struct { } +// AWS temporary credentials for API authentication. Read more at +// https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. +type AwsCredentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId types.String `tfsdk:"access_key_id" tf:"optional"` + // The Amazon Resource Name (ARN) of the S3 access point for temporary + // credentials related the external location. + AccessPoint types.String `tfsdk:"access_point" tf:"optional"` + // The secret access key that can be used to sign AWS API requests. + SecretAccessKey types.String `tfsdk:"secret_access_key" tf:"optional"` + // The token that users must pass to AWS API to use the temporary + // credentials. + SessionToken types.String `tfsdk:"session_token" tf:"optional"` +} + type AwsIamRoleRequest struct { // The Amazon Resource Name (ARN) of the AWS IAM role for S3 data access. RoleArn types.String `tfsdk:"role_arn" tf:""` @@ -145,6 +160,13 @@ type AzureServicePrincipal struct { DirectoryId types.String `tfsdk:"directory_id" tf:""` } +// Azure temporary credentials for API authentication. Read more at +// https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas +type AzureUserDelegationSas struct { + // The signed URI (SAS Token) used to access blob services for a given path + SasToken types.String `tfsdk:"sas_token" tf:"optional"` +} + // Cancel refresh type CancelRefreshRequest struct { // ID of the refresh. @@ -404,7 +426,7 @@ type CreateFunction struct { // JSON-serialized key-value pair map, encoded (escaped) as a string. Properties types.String `tfsdk:"properties" tf:"optional"` // Table function return parameters. - ReturnParams FunctionParameterInfos `tfsdk:"return_params" tf:""` + ReturnParams *FunctionParameterInfos `tfsdk:"return_params" tf:"optional"` // Function language. When **EXTERNAL** is used, the language of the routine // function should be specified in the __external_language__ field, and the // __return_params__ of the function cannot be used (as **TABLE** return @@ -414,7 +436,7 @@ type CreateFunction struct { // Function body. RoutineDefinition types.String `tfsdk:"routine_definition" tf:""` // Function dependencies. - RoutineDependencies DependencyList `tfsdk:"routine_dependencies" tf:""` + RoutineDependencies *DependencyList `tfsdk:"routine_dependencies" tf:"optional"` // Name of parent schema relative to its parent catalog. SchemaName types.String `tfsdk:"schema_name" tf:""` // Function security type. @@ -1018,6 +1040,41 @@ type FunctionParameterInfos struct { Parameters []FunctionParameterInfo `tfsdk:"parameters" tf:"optional"` } +// GCP temporary credentials for API authentication. Read more at +// https://developers.google.com/identity/protocols/oauth2/service-account +type GcpOauthToken struct { + OauthToken types.String `tfsdk:"oauth_token" tf:"optional"` +} + +type GenerateTemporaryTableCredentialRequest struct { + // The operation performed against the table data, either READ or + // READ_WRITE. If READ_WRITE is specified, the credentials returned will + // have write permissions, otherwise, it will be read only. + Operation types.String `tfsdk:"operation" tf:"optional"` + // UUID of the table to read or write. + TableId types.String `tfsdk:"table_id" tf:"optional"` +} + +type GenerateTemporaryTableCredentialResponse struct { + // AWS temporary credentials for API authentication. Read more at + // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. + AwsTempCredentials *AwsCredentials `tfsdk:"aws_temp_credentials" tf:"optional"` + // Azure temporary credentials for API authentication. Read more at + // https://docs.microsoft.com/en-us/rest/api/storageservices/create-user-delegation-sas + AzureUserDelegationSas *AzureUserDelegationSas `tfsdk:"azure_user_delegation_sas" tf:"optional"` + // Server time when the credential will expire, in epoch milliseconds. The + // API client is advised to cache the credential given this expiration time. + ExpirationTime types.Int64 `tfsdk:"expiration_time" tf:"optional"` + // GCP temporary credentials for API authentication. Read more at + // https://developers.google.com/identity/protocols/oauth2/service-account + GcpOauthToken *GcpOauthToken `tfsdk:"gcp_oauth_token" tf:"optional"` + // R2 temporary credentials for API authentication. Read more at + // https://developers.cloudflare.com/r2/api/s3/tokens/. + R2TempCredentials *R2Credentials `tfsdk:"r2_temp_credentials" tf:"optional"` + // The URL of the storage path accessible by the temporary credential. + Url types.String `tfsdk:"url" tf:"optional"` +} + // Gets the metastore assignment for a workspace type GetAccountMetastoreAssignmentRequest struct { // Workspace ID. @@ -1150,6 +1207,9 @@ type GetMetastoreSummaryResponse struct { DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds" tf:"optional"` // The scope of Delta Sharing enabled for the metastore. DeltaSharingScope types.String `tfsdk:"delta_sharing_scope" tf:"optional"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled types.Bool `tfsdk:"external_access_enabled" tf:"optional"` // Globally unique metastore ID across clouds and regions, of the form // `cloud:region:metastore_id`. GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` @@ -1262,6 +1322,8 @@ type GetTableRequest struct { IncludeBrowse types.Bool `tfsdk:"-"` // Whether delta metadata should be included in the response. IncludeDeltaMetadata types.Bool `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` } // Get catalog workspace bindings @@ -1546,6 +1608,8 @@ type ListStorageCredentialsResponse struct { type ListSummariesRequest struct { // Name of parent catalog for tables of interest. CatalogName types.String `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` // Maximum number of summaries for tables to return. If not set, the page // length is set to a server configured value (10000, as of 1/5/2024). - // when set to a value greater than 0, the page length is the minimum of @@ -1606,6 +1670,8 @@ type ListTablesRequest struct { IncludeBrowse types.Bool `tfsdk:"-"` // Whether delta metadata should be included in the response. IncludeDeltaMetadata types.Bool `tfsdk:"-"` + // Whether to include a manifest containing capabilities the table has. + IncludeManifestCapabilities types.Bool `tfsdk:"-"` // Maximum number of tables to return. If not set, all the tables are // returned (not recommended). - when set to a value greater than 0, the // page length is the minimum of this value and a server configured value; - @@ -1693,6 +1759,9 @@ type MetastoreInfo struct { DeltaSharingRecipientTokenLifetimeInSeconds types.Int64 `tfsdk:"delta_sharing_recipient_token_lifetime_in_seconds" tf:"optional"` // The scope of Delta Sharing enabled for the metastore. DeltaSharingScope types.String `tfsdk:"delta_sharing_scope" tf:"optional"` + // Whether to allow non-DBR clients to directly access entities under the + // metastore. + ExternalAccessEnabled types.Bool `tfsdk:"external_access_enabled" tf:"optional"` // Globally unique metastore ID across clouds and regions, of the form // `cloud:region:metastore_id`. GlobalMetastoreId types.String `tfsdk:"global_metastore_id" tf:"optional"` @@ -2098,6 +2167,17 @@ type QuotaInfo struct { QuotaName types.String `tfsdk:"quota_name" tf:"optional"` } +// R2 temporary credentials for API authentication. Read more at +// https://developers.cloudflare.com/r2/api/s3/tokens/. +type R2Credentials struct { + // The access key ID that identifies the temporary credentials. + AccessKeyId types.String `tfsdk:"access_key_id" tf:"optional"` + // The secret access key associated with the access key. + SecretAccessKey types.String `tfsdk:"secret_access_key" tf:"optional"` + // The generated JWT that users must pass to use the temporary credentials. + SessionToken types.String `tfsdk:"session_token" tf:"optional"` +} + // Get a Volume type ReadVolumeRequest struct { // Whether to include volumes in the response for which the principal can diff --git a/internal/service/compute_tf/model.go b/internal/service/compute_tf/model.go index e983a492c4..223ba1cb66 100755 --- a/internal/service/compute_tf/model.go +++ b/internal/service/compute_tf/model.go @@ -326,8 +326,14 @@ type ClusterAttributes struct { NodeTypeId types.String `tfsdk:"node_type_id" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -526,8 +532,14 @@ type ClusterDetails struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -844,8 +856,14 @@ type ClusterSpec struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -1040,8 +1058,14 @@ type CreateCluster struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -1423,8 +1447,14 @@ type EditCluster struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` @@ -2963,8 +2993,14 @@ type UpdateClusterResource struct { NumWorkers types.Int64 `tfsdk:"num_workers" tf:"optional"` // The ID of the cluster policy used to create the cluster if applicable. PolicyId types.String `tfsdk:"policy_id" tf:"optional"` - // Decides which runtime engine to be use, e.g. Standard vs. Photon. If - // unspecified, the runtime engine is inferred from spark_version. + // Determines the cluster's runtime engine, either standard or Photon. + // + // This field is not compatible with legacy `spark_version` values that + // contain `-photon-`. Remove `-photon-` from the `spark_version` and set + // `runtime_engine` to `PHOTON`. + // + // If left unspecified, the runtime engine defaults to standard unless the + // spark_version contains -photon-, in which case Photon will be used. RuntimeEngine types.String `tfsdk:"runtime_engine" tf:"optional"` // Single user name if data_security_mode is `SINGLE_USER` SingleUserName types.String `tfsdk:"single_user_name" tf:"optional"` diff --git a/internal/service/dashboards_tf/model.go b/internal/service/dashboards_tf/model.go index fcc9f0adf6..2fcdbdc14c 100755 --- a/internal/service/dashboards_tf/model.go +++ b/internal/service/dashboards_tf/model.go @@ -23,7 +23,12 @@ type CreateDashboardRequest struct { // Dashboards responses. ParentPath types.String `tfsdk:"parent_path" tf:"optional"` // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. + // excluded in List Dashboards responses. Use the [get dashboard API] to + // retrieve an example response, which includes the `serialized_dashboard` + // field. This field provides the structure of the JSON string that + // represents the dashboard's layout and components. + // + // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` // The warehouse ID used to run the dashboard. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` @@ -81,11 +86,17 @@ type Dashboard struct { // leading slash and no trailing slash. This field is excluded in List // Dashboards responses. ParentPath types.String `tfsdk:"parent_path" tf:"optional"` - // The workspace path of the dashboard asset, including the file name. This + // The workspace path of the dashboard asset, including the file name. + // Exported dashboards always have the file extension `.lvdash.json`. This // field is excluded in List Dashboards responses. Path types.String `tfsdk:"path" tf:"optional"` // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. + // excluded in List Dashboards responses. Use the [get dashboard API] to + // retrieve an example response, which includes the `serialized_dashboard` + // field. This field provides the structure of the JSON string that + // represents the dashboard's layout and components. + // + // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` // The timestamp of when the dashboard was last updated by the user. This // field is excluded in List Dashboards responses. @@ -213,9 +224,10 @@ type GenieMessage struct { // Genie space ID SpaceId types.String `tfsdk:"space_id" tf:""` // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching - // metadata from the data sources. * `ASKING_AI`: Waiting for the LLM to - // respond to the users question. * `EXECUTING_QUERY`: Executing AI provided - // SQL query. Get the SQL query result by calling + // metadata from the data sources. * `FILTERING_CONTEXT`: Running smart + // context step to determine relevant context. * `ASKING_AI`: Waiting for + // the LLM to respond to the users question. * `EXECUTING_QUERY`: Executing + // AI provided SQL query. Get the SQL query result by calling // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. // **Important: The message status will stay in the `EXECUTING_QUERY` until // a client calls @@ -510,7 +522,12 @@ type UpdateDashboardRequest struct { // field is excluded in List Dashboards responses. Etag types.String `tfsdk:"etag" tf:"optional"` // The contents of the dashboard in serialized string form. This field is - // excluded in List Dashboards responses. + // excluded in List Dashboards responses. Use the [get dashboard API] to + // retrieve an example response, which includes the `serialized_dashboard` + // field. This field provides the structure of the JSON string that + // represents the dashboard's layout and components. + // + // [get dashboard API]: https://docs.databricks.com/api/workspace/lakeview/get SerializedDashboard types.String `tfsdk:"serialized_dashboard" tf:"optional"` // The warehouse ID used to run the dashboard. WarehouseId types.String `tfsdk:"warehouse_id" tf:"optional"` diff --git a/internal/service/jobs_tf/model.go b/internal/service/jobs_tf/model.go index d5a1b57f58..2699dc2286 100755 --- a/internal/service/jobs_tf/model.go +++ b/internal/service/jobs_tf/model.go @@ -263,7 +263,11 @@ type CreateJob struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be - // referenced by tasks of this job. + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. Environments []JobEnvironment `tfsdk:"environment" tf:"optional"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -309,12 +313,12 @@ type CreateJob struct { Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. Queue *QueueSettings `tfsdk:"queue" tf:"optional"` - // Write-only setting, available only in Create/Update/Reset and Submit - // calls. Specifies the user or service principal that the job runs as. If - // not specified, the job runs as the user who created the job. + // Write-only setting. Specifies the user, service principal or group that + // the job/pipeline runs as. If not specified, the job/pipeline runs as the + // user who created the job/pipeline. // - // Only `user_name` or `service_principal_name` can be specified. If both - // are specified, an error is thrown. + // Exactly one of `user_name`, `service_principal_name`, `group_name` should + // be specified. If not, an error is thrown. RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI @@ -731,7 +735,8 @@ type JobDeployment struct { type JobEmailNotifications struct { // If true, do not send email to recipients specified in `on_failure` if the - // run is skipped. + // run is skipped. This field is `deprecated`. Please use the + // `notification_settings.no_alert_for_skipped_runs` field. NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs" tf:"optional"` // A list of email addresses to be notified when the duration of a run // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in @@ -827,12 +832,12 @@ type JobPermissionsRequest struct { JobId types.String `tfsdk:"-"` } -// Write-only setting, available only in Create/Update/Reset and Submit calls. -// Specifies the user or service principal that the job runs as. If not -// specified, the job runs as the user who created the job. +// Write-only setting. Specifies the user, service principal or group that the +// job/pipeline runs as. If not specified, the job/pipeline runs as the user who +// created the job/pipeline. // -// Only `user_name` or `service_principal_name` can be specified. If both are -// specified, an error is thrown. +// Exactly one of `user_name`, `service_principal_name`, `group_name` should be +// specified. If not, an error is thrown. type JobRunAs struct { // Application ID of an active service principal. Setting this field // requires the `servicePrincipal/user` role. @@ -861,7 +866,11 @@ type JobSettings struct { // begin or complete as well as when this job is deleted. EmailNotifications *JobEmailNotifications `tfsdk:"email_notifications" tf:"optional"` // A list of task execution environment specifications that can be - // referenced by tasks of this job. + // referenced by serverless tasks of this job. An environment is required to + // be present for serverless tasks. For serverless notebook tasks, the + // environment is accessible in the notebook environment panel. For other + // serverless tasks, the task environment is required to be specified using + // environment_key in the task settings. Environments []JobEnvironment `tfsdk:"environment" tf:"optional"` // Used to tell what is the format of the job. This field is ignored in // Create/Update/Reset calls. When using the Jobs API 2.1 this value is @@ -907,12 +916,12 @@ type JobSettings struct { Parameters []JobParameterDefinition `tfsdk:"parameter" tf:"optional"` // The queue settings of the job. Queue *QueueSettings `tfsdk:"queue" tf:"optional"` - // Write-only setting, available only in Create/Update/Reset and Submit - // calls. Specifies the user or service principal that the job runs as. If - // not specified, the job runs as the user who created the job. + // Write-only setting. Specifies the user, service principal or group that + // the job/pipeline runs as. If not specified, the job/pipeline runs as the + // user who created the job/pipeline. // - // Only `user_name` or `service_principal_name` can be specified. If both - // are specified, an error is thrown. + // Exactly one of `user_name`, `service_principal_name`, `group_name` should + // be specified. If not, an error is thrown. RunAs *JobRunAs `tfsdk:"run_as" tf:"optional"` // An optional periodic schedule for this job. The default behavior is that // the job only runs when triggered by clicking “Run Now” in the Jobs UI @@ -1266,7 +1275,7 @@ type RepairRun struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` - + // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` @@ -1622,7 +1631,7 @@ type RunJobTask struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` - + // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` @@ -1725,7 +1734,7 @@ type RunNow struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` - + // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` @@ -1859,7 +1868,7 @@ type RunParameters struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]types.String `tfsdk:"notebook_params" tf:"optional"` - + // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `tfsdk:"pipeline_params" tf:"optional"` PythonNamedParams map[string]types.String `tfsdk:"python_named_params" tf:"optional"` @@ -2605,7 +2614,8 @@ type TaskDependency struct { type TaskEmailNotifications struct { // If true, do not send email to recipients specified in `on_failure` if the - // run is skipped. + // run is skipped. This field is `deprecated`. Please use the + // `notification_settings.no_alert_for_skipped_runs` field. NoAlertForSkippedRuns types.Bool `tfsdk:"no_alert_for_skipped_runs" tf:"optional"` // A list of email addresses to be notified when the duration of a run // exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in @@ -2654,8 +2664,9 @@ type TaskNotificationSettings struct { type TerminationDetails struct { // The code indicates why the run was terminated. Additional codes might be // introduced in future releases. * `SUCCESS`: The run was completed - // successfully. * `CANCELED`: The run was canceled during execution by the - // Databricks platform; for example, if the maximum run duration was + // successfully. * `USER_CANCELED`: The run was successfully canceled during + // execution by a user. * `CANCELED`: The run was canceled during execution + // by the Databricks platform; for example, if the maximum run duration was // exceeded. * `SKIPPED`: Run was never executed, for example, if the // upstream task run failed, the dependency type condition was not met, or // there were no material tasks to execute. * `INTERNAL_ERROR`: The run diff --git a/internal/service/pipelines_tf/model.go b/internal/service/pipelines_tf/model.go index 1caafa7419..1740d0ee0b 100755 --- a/internal/service/pipelines_tf/model.go +++ b/internal/service/pipelines_tf/model.go @@ -19,6 +19,8 @@ type CreatePipeline struct { // If false, deployment will fail if name conflicts with that of another // pipeline. AllowDuplicateNames types.Bool `tfsdk:"allow_duplicate_names" tf:"optional"` + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -58,6 +60,10 @@ type CreatePipeline struct { Notifications []Notifications `tfsdk:"notifications" tf:"optional"` // Whether Photon is enabled for this pipeline. Photon types.Bool `tfsdk:"photon" tf:"optional"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema types.String `tfsdk:"schema" tf:"optional"` // Whether serverless compute is enabled for this pipeline. Serverless types.Bool `tfsdk:"serverless" tf:"optional"` // DBFS root directory for storing checkpoints and tables. @@ -103,6 +109,8 @@ type EditPipeline struct { // If false, deployment will fail if name has changed and conflicts the name // of another pipeline. AllowDuplicateNames types.Bool `tfsdk:"allow_duplicate_names" tf:"optional"` + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -146,6 +154,10 @@ type EditPipeline struct { Photon types.Bool `tfsdk:"photon" tf:"optional"` // Unique identifier for this pipeline. PipelineId types.String `tfsdk:"pipeline_id" tf:"optional"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema types.String `tfsdk:"schema" tf:"optional"` // Whether serverless compute is enabled for this pipeline. Serverless types.Bool `tfsdk:"serverless" tf:"optional"` // DBFS root directory for storing checkpoints and tables. @@ -209,6 +221,8 @@ type GetPipelineResponse struct { ClusterId types.String `tfsdk:"cluster_id" tf:"optional"` // The username of the pipeline creator. CreatorUserName types.String `tfsdk:"creator_user_name" tf:"optional"` + // Serverless budget policy ID of this pipeline. + EffectiveBudgetPolicyId types.String `tfsdk:"effective_budget_policy_id" tf:"optional"` // The health of a pipeline. Health types.String `tfsdk:"health" tf:"optional"` // The last time the pipeline settings were modified or created. @@ -642,6 +656,8 @@ type PipelinePermissionsRequest struct { } type PipelineSpec struct { + // Budget policy of this pipeline. + BudgetPolicyId types.String `tfsdk:"budget_policy_id" tf:"optional"` // A catalog in Unity Catalog to publish data from this pipeline to. If // `target` is specified, tables in this pipeline are published to a // `target` schema inside `catalog` (for example, @@ -679,6 +695,10 @@ type PipelineSpec struct { Notifications []Notifications `tfsdk:"notifications" tf:"optional"` // Whether Photon is enabled for this pipeline. Photon types.Bool `tfsdk:"photon" tf:"optional"` + // The default schema (database) where tables are read from or published to. + // The presence of this field implies that the pipeline is in direct + // publishing mode. + Schema types.String `tfsdk:"schema" tf:"optional"` // Whether serverless compute is enabled for this pipeline. Serverless types.Bool `tfsdk:"serverless" tf:"optional"` // DBFS root directory for storing checkpoints and tables. diff --git a/internal/service/serving_tf/model.go b/internal/service/serving_tf/model.go index c40d18ee63..b22dc911a7 100755 --- a/internal/service/serving_tf/model.go +++ b/internal/service/serving_tf/model.go @@ -30,6 +30,85 @@ type Ai21LabsConfig struct { Ai21labsApiKeyPlaintext types.String `tfsdk:"ai21labs_api_key_plaintext" tf:"optional"` } +type AiGatewayConfig struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + +type AiGatewayGuardrailParameters struct { + // List of invalid keywords. AI guardrail uses keyword or string matching to + // decide if the keyword exists in the request or response content. + InvalidKeywords []types.String `tfsdk:"invalid_keywords" tf:"optional"` + // Configuration for guardrail PII filter. + Pii *AiGatewayGuardrailPiiBehavior `tfsdk:"pii" tf:"optional"` + // Indicates whether the safety filter is enabled. + Safety types.Bool `tfsdk:"safety" tf:"optional"` + // The list of allowed topics. Given a chat request, this guardrail flags + // the request if its topic is not in the allowed topics. + ValidTopics []types.String `tfsdk:"valid_topics" tf:"optional"` +} + +type AiGatewayGuardrailPiiBehavior struct { + // Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' + // is set for the input guardrail and the request contains PII, the request + // is not sent to the model server and 400 status code is returned; if + // 'BLOCK' is set for the output guardrail and the model response contains + // PII, the PII info in the response is redacted and 400 status code is + // returned. + Behavior types.String `tfsdk:"behavior" tf:""` +} + +type AiGatewayGuardrails struct { + // Configuration for input guardrail filters. + Input *AiGatewayGuardrailParameters `tfsdk:"input" tf:"optional"` + // Configuration for output guardrail filters. + Output *AiGatewayGuardrailParameters `tfsdk:"output" tf:"optional"` +} + +type AiGatewayInferenceTableConfig struct { + // The name of the catalog in Unity Catalog. Required when enabling + // inference tables. NOTE: On update, you have to disable inference table + // first in order to change the catalog name. + CatalogName types.String `tfsdk:"catalog_name" tf:"optional"` + // Indicates whether the inference table is enabled. + Enabled types.Bool `tfsdk:"enabled" tf:"optional"` + // The name of the schema in Unity Catalog. Required when enabling inference + // tables. NOTE: On update, you have to disable inference table first in + // order to change the schema name. + SchemaName types.String `tfsdk:"schema_name" tf:"optional"` + // The prefix of the table in Unity Catalog. NOTE: On update, you have to + // disable inference table first in order to change the prefix name. + TableNamePrefix types.String `tfsdk:"table_name_prefix" tf:"optional"` +} + +type AiGatewayRateLimit struct { + // Used to specify how many calls are allowed for a key within the + // renewal_period. + Calls types.Int64 `tfsdk:"calls" tf:""` + // Key field for a rate limit. Currently, only 'user' and 'endpoint' are + // supported, with 'endpoint' being the default if not specified. + Key types.String `tfsdk:"key" tf:"optional"` + // Renewal period field for a rate limit. Currently, only 'minute' is + // supported. + RenewalPeriod types.String `tfsdk:"renewal_period" tf:""` +} + +type AiGatewayUsageTrackingConfig struct { + // Whether to enable usage tracking. + Enabled types.Bool `tfsdk:"enabled" tf:"optional"` +} + type AmazonBedrockConfig struct { // The Databricks secret key reference for an AWS access key ID with // permissions to interact with Bedrock services. If you prefer to paste @@ -147,14 +226,17 @@ type CohereConfig struct { } type CreateServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: only + // external model endpoints are supported as of now. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The core config of the serving endpoint. Config EndpointCoreConfigInput `tfsdk:"config" tf:""` // The name of the serving endpoint. This field is required and must be // unique across a Databricks workspace. An endpoint name can consist of // alphanumeric characters, dashes, and underscores. Name types.String `tfsdk:"name" tf:""` - // Rate limits to be applied to the serving endpoint. NOTE: only external - // and foundation model endpoints are supported as of now. + // Rate limits to be applied to the serving endpoint. NOTE: this field is + // deprecated, please use AI Gateway to manage rate limits. RateLimits []RateLimit `tfsdk:"rate_limits" tf:"optional"` // Enable route optimization for the serving endpoint. RouteOptimized types.Bool `tfsdk:"route_optimized" tf:"optional"` @@ -520,6 +602,42 @@ type PayloadTable struct { StatusMessage types.String `tfsdk:"status_message" tf:"optional"` } +// Update AI Gateway of a serving endpoint +type PutAiGatewayRequest struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality. + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // The name of the serving endpoint whose AI Gateway is being updated. This + // field is required. + Name types.String `tfsdk:"-"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + +type PutAiGatewayResponse struct { + // Configuration for AI Guardrails to prevent unwanted data and unsafe data + // in requests and responses. + Guardrails *AiGatewayGuardrails `tfsdk:"guardrails" tf:"optional"` + // Configuration for payload logging using inference tables. Use these + // tables to monitor and audit data being sent to and received from model + // APIs and to improve model quality . + InferenceTableConfig *AiGatewayInferenceTableConfig `tfsdk:"inference_table_config" tf:"optional"` + // Configuration for rate limits which can be set to limit endpoint traffic. + RateLimits []AiGatewayRateLimit `tfsdk:"rate_limits" tf:"optional"` + // Configuration to enable usage tracking using system tables. These tables + // allow you to monitor operational usage on endpoints and their associated + // costs. + UsageTrackingConfig *AiGatewayUsageTrackingConfig `tfsdk:"usage_tracking_config" tf:"optional"` +} + // Update rate limits of a serving endpoint type PutRequest struct { // The name of the serving endpoint whose rate limits are being updated. @@ -914,6 +1032,9 @@ type ServerLogsResponse struct { } type ServingEndpoint struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model endpoints are currently supported. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigSummary `tfsdk:"config" tf:"optional"` // The timestamp when the endpoint was created in Unix time. @@ -960,6 +1081,9 @@ type ServingEndpointAccessControlResponse struct { } type ServingEndpointDetailed struct { + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model endpoints are currently supported. + AiGateway *AiGatewayConfig `tfsdk:"ai_gateway" tf:"optional"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigOutput `tfsdk:"config" tf:"optional"` // The timestamp when the endpoint was created in Unix time. diff --git a/internal/service/settings_tf/model.go b/internal/service/settings_tf/model.go index 55059248b5..117cf8d113 100755 --- a/internal/service/settings_tf/model.go +++ b/internal/service/settings_tf/model.go @@ -32,6 +32,10 @@ type AutomaticClusterUpdateSetting struct { SettingName types.String `tfsdk:"setting_name" tf:"optional"` } +type BooleanMessage struct { + Value types.Bool `tfsdk:"value" tf:"optional"` +} + type ClusterAutoRestartMessage struct { CanToggle types.Bool `tfsdk:"can_toggle" tf:"optional"` @@ -292,6 +296,54 @@ type DeleteDefaultNamespaceSettingResponse struct { Etag types.String `tfsdk:"etag" tf:""` } +// Delete Legacy Access Disablement Status +type DeleteDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// The etag is returned. +type DeleteDisableLegacyAccessResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + +// Delete the disable legacy features setting +type DeleteDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// The etag is returned. +type DeleteDisableLegacyFeaturesResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"etag" tf:""` +} + // Delete access list type DeleteIpAccessListRequest struct { // The ID for the corresponding IP access list @@ -377,6 +429,42 @@ type DeleteTokenManagementRequest struct { TokenId types.String `tfsdk:"-"` } +type DisableLegacyAccess struct { + DisableLegacyAccess BooleanMessage `tfsdk:"disable_legacy_access" tf:""` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + +type DisableLegacyFeatures struct { + DisableLegacyFeatures BooleanMessage `tfsdk:"disable_legacy_features" tf:""` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag types.String `tfsdk:"etag" tf:"optional"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName types.String `tfsdk:"setting_name" tf:"optional"` +} + type EmailConfig struct { // Email addresses to notify. Addresses []types.String `tfsdk:"addresses" tf:"optional"` @@ -538,6 +626,30 @@ type GetDefaultNamespaceSettingRequest struct { Etag types.String `tfsdk:"-"` } +// Retrieve Legacy Access Disablement Status +type GetDisableLegacyAccessRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + +// Get the disable legacy features setting +type GetDisableLegacyFeaturesRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag types.String `tfsdk:"-"` +} + // Get the enhanced security monitoring setting type GetEnhancedSecurityMonitoringSettingRequest struct { // etag used for versioning. The response is at least as fresh as the eTag @@ -1045,6 +1157,8 @@ type TokenInfo struct { OwnerId types.Int64 `tfsdk:"owner_id" tf:"optional"` // ID of the token. TokenId types.String `tfsdk:"token_id" tf:"optional"` + // If applicable, the ID of the workspace that the token was created in. + WorkspaceId types.Int64 `tfsdk:"workspace_id" tf:"optional"` } type TokenPermission struct { @@ -1137,6 +1251,34 @@ type UpdateDefaultNamespaceSettingRequest struct { Setting DefaultNamespaceSetting `tfsdk:"setting" tf:""` } +// Details required to update a setting. +type UpdateDisableLegacyAccessRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting DisableLegacyAccess `tfsdk:"setting" tf:""` +} + +// Details required to update a setting. +type UpdateDisableLegacyFeaturesRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing types.Bool `tfsdk:"allow_missing" tf:""` + // Field mask is required to be passed into the PATCH request. Field mask + // specifies which fields of the setting payload will be updated. The field + // mask needs to be supplied as single string. To specify multiple fields in + // the field mask, use comma as the separator (no space). + FieldMask types.String `tfsdk:"field_mask" tf:""` + + Setting DisableLegacyFeatures `tfsdk:"setting" tf:""` +} + // Details required to update a setting. type UpdateEnhancedSecurityMonitoringSettingRequest struct { // This should always be set to true for Settings API. Added for AIP diff --git a/internal/service/sql_tf/model.go b/internal/service/sql_tf/model.go index 8bbdb536f4..e912363c30 100755 --- a/internal/service/sql_tf/model.go +++ b/internal/service/sql_tf/model.go @@ -194,6 +194,8 @@ type CancelExecutionRequest struct { type CancelExecutionResponse struct { } +// Configures the channel name and DBSQL version of the warehouse. +// CHANNEL_NAME_CUSTOM should be chosen only when `dbsql_version` is specified. type Channel struct { DbsqlVersion types.String `tfsdk:"dbsql_version" tf:"optional"` @@ -208,15 +210,6 @@ type ChannelInfo struct { Name types.String `tfsdk:"name" tf:"optional"` } -// Client code that triggered the request -type ClientCallContext struct { - // File name that contains the last line that triggered the request. - FileName *EncodedText `tfsdk:"file_name" tf:"optional"` - // Last line number within a file or notebook cell that triggered the - // request. - LineNumber types.Int64 `tfsdk:"line_number" tf:"optional"` -} - type ColumnInfo struct { // The name of the column. Name types.String `tfsdk:"name" tf:"optional"` @@ -356,7 +349,9 @@ type CreateWarehouseRequest struct { // The amount of time in minutes that a SQL warehouse must be idle (i.e., no // RUNNING queries) before it is automatically stopped. // - // Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop. + // Supported values: - Must be >= 0 mins for serverless warehouses - Must be + // == 0 or >= 10 mins for non-serverless warehouses - 0 indicates no + // autostop. // // Defaults to 120 mins AutoStopMins types.Int64 `tfsdk:"auto_stop_mins" tf:"optional"` @@ -710,13 +705,6 @@ type EditWarehouseResponse struct { type Empty struct { } -type EncodedText struct { - // Carry text data in different form. - Encoding types.String `tfsdk:"encoding" tf:"optional"` - // text data - Text types.String `tfsdk:"text" tf:"optional"` -} - type EndpointConfPair struct { Key types.String `tfsdk:"key" tf:"optional"` @@ -1673,8 +1661,6 @@ type QueryInfo struct { QueryEndTimeMs types.Int64 `tfsdk:"query_end_time_ms" tf:"optional"` // The query ID. QueryId types.String `tfsdk:"query_id" tf:"optional"` - - QuerySource *QuerySource `tfsdk:"query_source" tf:"optional"` // The time the query started. QueryStartTimeMs types.Int64 `tfsdk:"query_start_time_ms" tf:"optional"` // The text of the query. @@ -1834,62 +1820,6 @@ type QueryPostContent struct { Tags []types.String `tfsdk:"tags" tf:"optional"` } -type QuerySource struct { - // UUID - AlertId types.String `tfsdk:"alert_id" tf:"optional"` - // Client code that triggered the request - ClientCallContext *ClientCallContext `tfsdk:"client_call_context" tf:"optional"` - // Id associated with a notebook cell - CommandId types.String `tfsdk:"command_id" tf:"optional"` - // Id associated with a notebook run or execution - CommandRunId types.String `tfsdk:"command_run_id" tf:"optional"` - // UUID - DashboardId types.String `tfsdk:"dashboard_id" tf:"optional"` - // UUID for Lakeview Dashboards, separate from DBSQL Dashboards - // (dashboard_id) - DashboardV3Id types.String `tfsdk:"dashboard_v3_id" tf:"optional"` - - DriverInfo *QuerySourceDriverInfo `tfsdk:"driver_info" tf:"optional"` - // Spark service that received and processed the query - EntryPoint types.String `tfsdk:"entry_point" tf:"optional"` - // UUID for Genie space - GenieSpaceId types.String `tfsdk:"genie_space_id" tf:"optional"` - - IsCloudFetch types.Bool `tfsdk:"is_cloud_fetch" tf:"optional"` - - IsDatabricksSqlExecApi types.Bool `tfsdk:"is_databricks_sql_exec_api" tf:"optional"` - - JobId types.String `tfsdk:"job_id" tf:"optional"` - // With background compute, jobs can be managed by different internal teams. - // When not specified, not a background compute job When specified and the - // value is not JOBS, it is a background compute job - JobManagedBy types.String `tfsdk:"job_managed_by" tf:"optional"` - - NotebookId types.String `tfsdk:"notebook_id" tf:"optional"` - // String provided by a customer that'll help them identify the query - QueryTags types.String `tfsdk:"query_tags" tf:"optional"` - // Id associated with a job run or execution - RunId types.String `tfsdk:"run_id" tf:"optional"` - // Id associated with a notebook cell run or execution - RunnableCommandId types.String `tfsdk:"runnable_command_id" tf:"optional"` - - ScheduledBy types.String `tfsdk:"scheduled_by" tf:"optional"` - - ServerlessChannelInfo *ServerlessChannelInfo `tfsdk:"serverless_channel_info" tf:"optional"` - // UUID - SourceQueryId types.String `tfsdk:"source_query_id" tf:"optional"` -} - -type QuerySourceDriverInfo struct { - BiToolEntry types.String `tfsdk:"bi_tool_entry" tf:"optional"` - - DriverName types.String `tfsdk:"driver_name" tf:"optional"` - - SimbaBrandingVendor types.String `tfsdk:"simba_branding_vendor" tf:"optional"` - - VersionNumber types.String `tfsdk:"version_number" tf:"optional"` -} - type RepeatedEndpointConfPairs struct { // Deprecated: Use configuration_pairs ConfigPair []EndpointConfPair `tfsdk:"config_pair" tf:"optional"` @@ -1964,11 +1894,6 @@ type ResultSchema struct { Columns []ColumnInfo `tfsdk:"columns" tf:"optional"` } -type ServerlessChannelInfo struct { - // Name of the Channel - Name types.String `tfsdk:"name" tf:"optional"` -} - type ServiceError struct { ErrorCode types.String `tfsdk:"error_code" tf:"optional"` // A brief summary of the error condition. diff --git a/internal/service/workspace_tf/model.go b/internal/service/workspace_tf/model.go index d11553b3a0..fe451acf89 100755 --- a/internal/service/workspace_tf/model.go +++ b/internal/service/workspace_tf/model.go @@ -29,11 +29,11 @@ type AzureKeyVaultSecretScopeMetadata struct { ResourceId types.String `tfsdk:"resource_id" tf:""` } -type CreateCredentials struct { +type CreateCredentialsRequest struct { // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. GitProvider types.String `tfsdk:"git_provider" tf:""` // The username or email provided with your Git provider account, depending // on which provider you are using. For GitHub, GitHub Enterprise Server, or @@ -45,8 +45,7 @@ type CreateCredentials struct { GitUsername types.String `tfsdk:"git_username" tf:"optional"` // The personal access token used to authenticate to the corresponding Git // provider. For certain providers, support may exist for other types of - // scoped access tokens. [Learn more]. The personal access token used to - // authenticate to the corresponding Git + // scoped access tokens. [Learn more]. // // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html PersonalAccessToken types.String `tfsdk:"personal_access_token" tf:"optional"` @@ -54,31 +53,23 @@ type CreateCredentials struct { type CreateCredentialsResponse struct { // ID of the credential object in the workspace. - CredentialId types.Int64 `tfsdk:"credential_id" tf:"optional"` - // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. - GitProvider types.String `tfsdk:"git_provider" tf:"optional"` - // The username or email provided with your Git provider account, depending - // on which provider you are using. For GitHub, GitHub Enterprise Server, or - // Azure DevOps Services, either email or username may be used. For GitLab, - // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, - // BitBucket or BitBucket Server, username must be used. For all other - // providers please see your provider's Personal Access Token authentication - // documentation to see what is supported. + CredentialId types.Int64 `tfsdk:"credential_id" tf:""` + // The Git provider associated with the credential. + GitProvider types.String `tfsdk:"git_provider" tf:""` + // The username or email provided with your Git provider account and + // associated with the credential. GitUsername types.String `tfsdk:"git_username" tf:"optional"` } -type CreateRepo struct { +type CreateRepoRequest struct { // Desired path for the repo in the workspace. Almost any path in the - // workspace can be chosen. If repo is created in /Repos, path must be in - // the format /Repos/{folder}/{repo-name}. + // workspace can be chosen. If repo is created in `/Repos`, path must be in + // the format `/Repos/{folder}/{repo-name}`. Path types.String `tfsdk:"path" tf:"optional"` // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. Provider types.String `tfsdk:"provider" tf:""` // If specified, the repo will be created with sparse checkout enabled. You // cannot enable/disable sparse checkout after the repo is created. @@ -87,6 +78,24 @@ type CreateRepo struct { Url types.String `tfsdk:"url" tf:""` } +type CreateRepoResponse struct { + // Branch that the Git folder (repo) is checked out to. + Branch types.String `tfsdk:"branch" tf:"optional"` + // SHA-1 hash representing the commit ID of the current HEAD of the Git + // folder (repo). + HeadCommitId types.String `tfsdk:"head_commit_id" tf:"optional"` + // ID of the Git folder (repo) object in the workspace. + Id types.Int64 `tfsdk:"id" tf:"optional"` + // Path of the Git folder (repo) in the workspace. + Path types.String `tfsdk:"path" tf:"optional"` + // Git provider of the linked Git repository. + Provider types.String `tfsdk:"provider" tf:"optional"` + // Sparse checkout settings for the Git folder (repo). + SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + // URL of the linked Git repository. + Url types.String `tfsdk:"url" tf:"optional"` +} + type CreateScope struct { // The metadata for the secret scope if the type is `AZURE_KEYVAULT` BackendAzureKeyvault *AzureKeyVaultSecretScopeMetadata `tfsdk:"backend_azure_keyvault" tf:"optional"` @@ -105,19 +114,11 @@ type CreateScopeResponse struct { type CredentialInfo struct { // ID of the credential object in the workspace. - CredentialId types.Int64 `tfsdk:"credential_id" tf:"optional"` - // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, gitHubOAuth, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. + CredentialId types.Int64 `tfsdk:"credential_id" tf:""` + // The Git provider associated with the credential. GitProvider types.String `tfsdk:"git_provider" tf:"optional"` - // The username or email provided with your Git provider account, depending - // on which provider you are using. For GitHub, GitHub Enterprise Server, or - // Azure DevOps Services, either email or username may be used. For GitLab, - // GitLab Enterprise Edition, email must be used. For AWS CodeCommit, - // BitBucket or BitBucket Server, username must be used. For all other - // providers please see your provider's Personal Access Token authentication - // documentation to see what is supported. + // The username or email provided with your Git provider account and + // associated with the credential. GitUsername types.String `tfsdk:"git_username" tf:"optional"` } @@ -142,17 +143,23 @@ type DeleteAclResponse struct { } // Delete a credential -type DeleteGitCredentialRequest struct { +type DeleteCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` } +type DeleteCredentialsResponse struct { +} + // Delete a repo type DeleteRepoRequest struct { - // The ID for the corresponding repo to access. + // ID of the Git folder (repo) object in the workspace. RepoId types.Int64 `tfsdk:"-"` } +type DeleteRepoResponse struct { +} + type DeleteResponse struct { } @@ -212,16 +219,22 @@ type GetAclRequest struct { Scope types.String `tfsdk:"-"` } -type GetCredentialsResponse struct { - Credentials []CredentialInfo `tfsdk:"credentials" tf:"optional"` -} - // Get a credential entry -type GetGitCredentialRequest struct { +type GetCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` } +type GetCredentialsResponse struct { + // ID of the credential object in the workspace. + CredentialId types.Int64 `tfsdk:"credential_id" tf:""` + // The Git provider associated with the credential. + GitProvider types.String `tfsdk:"git_provider" tf:"optional"` + // The username or email provided with your Git provider account and + // associated with the credential. + GitUsername types.String `tfsdk:"git_username" tf:"optional"` +} + // Get repo permission levels type GetRepoPermissionLevelsRequest struct { // The repo for which to get or manage permissions. @@ -241,10 +254,27 @@ type GetRepoPermissionsRequest struct { // Get a repo type GetRepoRequest struct { - // The ID for the corresponding repo to access. + // ID of the Git folder (repo) object in the workspace. RepoId types.Int64 `tfsdk:"-"` } +type GetRepoResponse struct { + // Branch that the local version of the repo is checked out to. + Branch types.String `tfsdk:"branch" tf:"optional"` + // SHA-1 hash representing the commit ID of the current HEAD of the repo. + HeadCommitId types.String `tfsdk:"head_commit_id" tf:"optional"` + // ID of the Git folder (repo) object in the workspace. + Id types.Int64 `tfsdk:"id" tf:"optional"` + // Path of the Git folder (repo) in the workspace. + Path types.String `tfsdk:"path" tf:"optional"` + // Git provider of the linked Git repository. + Provider types.String `tfsdk:"provider" tf:"optional"` + // Sparse checkout settings for the Git folder (repo). + SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` + // URL of the linked Git repository. + Url types.String `tfsdk:"url" tf:"optional"` +} + // Get a secret type GetSecretRequest struct { // The key to fetch secret for. @@ -334,6 +364,11 @@ type ListAclsResponse struct { Items []AclItem `tfsdk:"items" tf:"optional"` } +type ListCredentialsResponse struct { + // List of credentials. + Credentials []CredentialInfo `tfsdk:"credentials" tf:"optional"` +} + // Get repos type ListReposRequest struct { // Token used to get the next page of results. If not specified, returns the @@ -341,15 +376,16 @@ type ListReposRequest struct { // results. NextPageToken types.String `tfsdk:"-"` // Filters repos that have paths starting with the given path prefix. If not - // provided repos from /Repos will be served. + // provided or when provided an effectively empty prefix (`/` or + // `/Workspace`) Git folders (repos) from `/Workspace/Repos` will be served. PathPrefix types.String `tfsdk:"-"` } type ListReposResponse struct { - // Token that can be specified as a query parameter to the GET /repos + // Token that can be specified as a query parameter to the `GET /repos` // endpoint to retrieve the next page of results. NextPageToken types.String `tfsdk:"next_page_token" tf:"optional"` - + // List of Git folders (repos). Repos []RepoInfo `tfsdk:"repos" tf:"optional"` } @@ -467,25 +503,21 @@ type RepoAccessControlResponse struct { UserName types.String `tfsdk:"user_name" tf:"optional"` } +// Git folder (repo) information. type RepoInfo struct { - // Branch that the local version of the repo is checked out to. + // Name of the current git branch of the git folder (repo). Branch types.String `tfsdk:"branch" tf:"optional"` - // SHA-1 hash representing the commit ID of the current HEAD of the repo. + // Current git commit id of the git folder (repo). HeadCommitId types.String `tfsdk:"head_commit_id" tf:"optional"` - // ID of the repo object in the workspace. + // Id of the git folder (repo) in the Workspace. Id types.Int64 `tfsdk:"id" tf:"optional"` - // Desired path for the repo in the workspace. Almost any path in the - // workspace can be chosen. If repo is created in /Repos, path must be in - // the format /Repos/{folder}/{repo-name}. + // Root path of the git folder (repo) in the Workspace. Path types.String `tfsdk:"path" tf:"optional"` - // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. + // Git provider of the remote git repository, e.g. `gitHub`. Provider types.String `tfsdk:"provider" tf:"optional"` - + // Sparse checkout config for the git folder (repo). SparseCheckout *SparseCheckout `tfsdk:"sparse_checkout" tf:"optional"` - // URL of the Git repository to be linked. + // URL of the remote git repository. Url types.String `tfsdk:"url" tf:"optional"` } @@ -533,24 +565,32 @@ type SecretScope struct { Name types.String `tfsdk:"name" tf:"optional"` } +// Sparse checkout configuration, it contains options like cone patterns. type SparseCheckout struct { - // List of patterns to include for sparse checkout. + // List of sparse checkout cone patterns, see [cone mode handling] for + // details. + // + // [cone mode handling]: https://git-scm.com/docs/git-sparse-checkout#_internalscone_mode_handling Patterns []types.String `tfsdk:"patterns" tf:"optional"` } +// Sparse checkout configuration, it contains options like cone patterns. type SparseCheckoutUpdate struct { - // List of patterns to include for sparse checkout. + // List of sparse checkout cone patterns, see [cone mode handling] for + // details. + // + // [cone mode handling]: https://git-scm.com/docs/git-sparse-checkout#_internalscone_mode_handling Patterns []types.String `tfsdk:"patterns" tf:"optional"` } -type UpdateCredentials struct { +type UpdateCredentialsRequest struct { // The ID for the corresponding credential to access. CredentialId types.Int64 `tfsdk:"-"` // Git provider. This field is case-insensitive. The available Git providers - // are gitHub, bitbucketCloud, gitLab, azureDevOpsServices, - // gitHubEnterprise, bitbucketServer, gitLabEnterpriseEdition and - // awsCodeCommit. - GitProvider types.String `tfsdk:"git_provider" tf:"optional"` + // are `gitHub`, `bitbucketCloud`, `gitLab`, `azureDevOpsServices`, + // `gitHubEnterprise`, `bitbucketServer`, `gitLabEnterpriseEdition` and + // `awsCodeCommit`. + GitProvider types.String `tfsdk:"git_provider" tf:""` // The username or email provided with your Git provider account, depending // on which provider you are using. For GitHub, GitHub Enterprise Server, or // Azure DevOps Services, either email or username may be used. For GitLab, @@ -561,17 +601,19 @@ type UpdateCredentials struct { GitUsername types.String `tfsdk:"git_username" tf:"optional"` // The personal access token used to authenticate to the corresponding Git // provider. For certain providers, support may exist for other types of - // scoped access tokens. [Learn more]. The personal access token used to - // authenticate to the corresponding Git + // scoped access tokens. [Learn more]. // // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html PersonalAccessToken types.String `tfsdk:"personal_access_token" tf:"optional"` } -type UpdateRepo struct { +type UpdateCredentialsResponse struct { +} + +type UpdateRepoRequest struct { // Branch that the local version of the repo is checked out to. Branch types.String `tfsdk:"branch" tf:"optional"` - // The ID for the corresponding repo to access. + // ID of the Git folder (repo) object in the workspace. RepoId types.Int64 `tfsdk:"-"` // If specified, update the sparse checkout settings. The update will fail // if sparse checkout is not enabled for the repo. @@ -583,7 +625,7 @@ type UpdateRepo struct { Tag types.String `tfsdk:"tag" tf:"optional"` } -type UpdateResponse struct { +type UpdateRepoResponse struct { } type WorkspaceObjectAccessControlRequest struct { diff --git a/mlflow/data_mlflow_models.go b/mlflow/data_mlflow_models.go new file mode 100644 index 0000000000..127b4f465f --- /dev/null +++ b/mlflow/data_mlflow_models.go @@ -0,0 +1,27 @@ +package mlflow + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/service/ml" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/terraform-provider-databricks/common" +) + +type modelsData struct { + Names []string `json:"names,omitempty" tf:"computed"` +} + +func DataSourceModels() common.Resource { + return common.WorkspaceData(func(ctx context.Context, data *modelsData, w *databricks.WorkspaceClient) error { + list, err := w.ModelRegistry.ListModelsAll(ctx, ml.ListModelsRequest{}) + if err != nil { + return err + } + for _, m := range list { + data.Names = append(data.Names, m.Name) + } + return nil + }) +} diff --git a/mlflow/data_mlflow_models_test.go b/mlflow/data_mlflow_models_test.go new file mode 100644 index 0000000000..04ff88be8d --- /dev/null +++ b/mlflow/data_mlflow_models_test.go @@ -0,0 +1,33 @@ +package mlflow + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/stretchr/testify/mock" + + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/databricks/terraform-provider-databricks/qa" +) + +func TestDataSourceModels(t *testing.T) { + qa.ResourceFixture{ + MockWorkspaceClientFunc: func(w *mocks.MockWorkspaceClient) { + api := w.GetMockModelRegistryAPI() + api.EXPECT().ListModelsAll(mock.Anything, ml.ListModelsRequest{}).Return([]ml.Model{ + { + Name: "model-01", + }, + { + Name: "model-02", + }, + }, nil) + }, + Read: true, + NonWritable: true, + Resource: DataSourceModels(), + ID: ".", + }.ApplyAndExpectData(t, map[string]interface{}{ + "names": []interface{}{"model-01", "model-02"}, + }) +} diff --git a/permissions/entity/permissions_entity.go b/permissions/entity/permissions_entity.go new file mode 100644 index 0000000000..e8c1f4b067 --- /dev/null +++ b/permissions/entity/permissions_entity.go @@ -0,0 +1,18 @@ +package entity + +import "github.com/databricks/databricks-sdk-go/service/iam" + +// PermissionsEntity is the one used for resource metadata +type PermissionsEntity struct { + ObjectType string `json:"object_type,omitempty" tf:"computed"` + AccessControlList []iam.AccessControlRequest `json:"access_control" tf:"slice_set"` +} + +func (p PermissionsEntity) ContainsUserOrServicePrincipal(name string) bool { + for _, ac := range p.AccessControlList { + if ac.UserName == name || ac.ServicePrincipalName == name { + return true + } + } + return false +} diff --git a/permissions/permission_definitions.go b/permissions/permission_definitions.go new file mode 100644 index 0000000000..fbc9158517 --- /dev/null +++ b/permissions/permission_definitions.go @@ -0,0 +1,731 @@ +package permissions + +import ( + "context" + "fmt" + "sort" + "strconv" + "strings" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/terraform-provider-databricks/common" + "github.com/databricks/terraform-provider-databricks/permissions/entity" + "github.com/databricks/terraform-provider-databricks/permissions/read" + "github.com/databricks/terraform-provider-databricks/permissions/update" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// resourcePermissions captures all the information needed to manage permissions for a given object type. +type resourcePermissions struct { + // Mandatory Fields + + // The attribute name that users configure with the ID of the object to manage + // e.g. "cluster_id" for a cluster + field string + // The object type to use in the Permissions API, e.g. "cluster" for a cluster. + objectType string + // The name of the object in the ID of the TF resource, e.g. "clusters" for a cluster, + // where the ID would be /clusters/. This should also match the prefix of the + // object ID in the API response, unless idMatcher is set. + requestObjectType string + // The allowed permission levels for this object type and its options. + allowedPermissionLevels map[string]permissionLevelOptions + + // ID Remapping Options + + // Returns the object ID for the given user-specified ID. This is necessary because permissions for + // some objects are done by path, whereas others are by ID. Those by path need to be converted to the + // internal object ID before being stored in the state. If not specified, the default ID is "//". + idRetriever func(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) + // By default, a resourcePermissions can be retrieved based on the structure of the ID, as described above. + // If this function is set, it will be used to determine whether the ID matches this resource type. + idMatcher func(id string) bool + // A custom matcher to check whether a given ID matches this resource type. + // Most resources can be determined by looking at the attribute name used to configure the permission, but + // tokens & passwords are special cases where the resource type is determined by the value of this attribute. + stateMatcher func(id string) bool + + // Behavior Options and Customizations + + // The alternative name of the "path" attribute for this resource. E.g. "workspace_file_path" for a file. + // If not set, default is "_path". + pathVariant string + // If true, the provider will allow the user to configure the "admins" group for this resource type. Otherwise, + // validation will fail if the user tries to configure the "admins" group, and admin configurations in API + // responses will be ignored. This should only be set to true for the "authorization = passwords" resource. + allowConfiguringAdmins bool + // Customizers when handling permission resource creation and update. + // + // Most resources that have a CAN_MANAGE permission level should add update.AddCurrentUserAsManage to this list + // to ensure that the user applying the template always has management permissions on the underlying resource. + updateAclCustomizers []update.ACLCustomizer + // Customizers when handling permission resource deletion. + // + // Most resources that have a CAN_MANAGE permission level should add update.AddCurrentUserAsManage to this list + // to ensure that the user applying the template always has management permissions on the underlying resource. + deleteAclCustomizers []update.ACLCustomizer + // Customizers when handling permission resource read. + // + // Resources for which admins inherit permissions should add removeAdminPermissionsCustomizer to this list. This + // prevents the admin group from being included in the permissions when reading the state. + readAclCustomizers []read.ACLCustomizer + + // Returns the creator of the object. Used when deleting databricks_permissions resources, when the + // creator of the object is restored as the owner. + fetchObjectCreator func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) +} + +// getAllowedPermissionLevels returns the list of permission levels that are allowed for this resource type. +func (p resourcePermissions) getAllowedPermissionLevels(includeNonManagementPermissions bool) []string { + levels := make([]string, 0, len(p.allowedPermissionLevels)) + for level := range p.allowedPermissionLevels { + if includeNonManagementPermissions || p.allowedPermissionLevels[level].isManagementPermission { + levels = append(levels, level) + } + } + sort.Strings(levels) + return levels +} + +// resourceStatus captures the status of a resource with permissions. If the resource doesn't exist, +// the provider will not try to update its permissions. Otherwise, the creator will be returned if +// it can be determined for the given resource type. +type resourceStatus struct { + exists bool + creator string +} + +// getObjectStatus returns the creator of the object and whether the object exists. If the object creator cannot be determined for this +// resource type, an empty string is returned. Resources without fetchObjectCreator are assumed to exist and have an unknown creator. +func (p resourcePermissions) getObjectStatus(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (resourceStatus, error) { + if p.fetchObjectCreator != nil { + creator, err := p.fetchObjectCreator(ctx, w, objectID) + if err != nil { + return resourceStatus{}, err + } + return resourceStatus{exists: creator != "", creator: creator}, nil + } + return resourceStatus{exists: true, creator: ""}, nil +} + +// getPathVariant returns the name of the path attribute for this resource type. +func (p resourcePermissions) getPathVariant() string { + if p.pathVariant != "" { + return p.pathVariant + } + return p.objectType + "_path" +} + +// validate checks that the user is not trying to set permissions for the admin group or remove their own management permissions. +func (p resourcePermissions) validate(ctx context.Context, entity entity.PermissionsEntity, currentUsername string) error { + for _, change := range entity.AccessControlList { + // Prevent users from setting permissions for admins. + if change.GroupName == "admins" && !p.allowConfiguringAdmins { + return fmt.Errorf("it is not possible to modify admin permissions for %s resources", p.objectType) + } + // Check that the user is preventing themselves from managing the object + level := p.allowedPermissionLevels[string(change.PermissionLevel)] + if (change.UserName == currentUsername || change.ServicePrincipalName == currentUsername) && !level.isManagementPermission { + allowedLevelsForCurrentUser := p.getAllowedPermissionLevels(false) + return fmt.Errorf("cannot remove management permissions for the current user for %s, allowed levels: %s", p.objectType, strings.Join(allowedLevelsForCurrentUser, ", ")) + } + + if level.deprecated != "" { + tflog.Debug(ctx, fmt.Sprintf("the permission level %s for %s is deprecated: %s", change.PermissionLevel, p.objectType, level.deprecated)) + } + } + return nil +} + +// getID returns the object ID for the given user-specified ID. +func (p resourcePermissions) getID(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { + var err error + if p.idRetriever != nil { + id, err = p.idRetriever(ctx, w, id) + if err != nil { + return "", err + } + } + return fmt.Sprintf("/%s/%s", p.requestObjectType, id), nil +} + +// prepareForUpdate prepares the access control list for an update request by calling all update customizers. +func (p resourcePermissions) prepareForUpdate(objectID string, e entity.PermissionsEntity, currentUser string) (entity.PermissionsEntity, error) { + cachedCurrentUser := func() (string, error) { return currentUser, nil } + ctx := update.ACLCustomizerContext{ + GetCurrentUser: cachedCurrentUser, + GetId: func() string { return objectID }, + } + var err error + for _, customizer := range p.updateAclCustomizers { + e.AccessControlList, err = customizer(ctx, e.AccessControlList) + if err != nil { + return entity.PermissionsEntity{}, err + } + } + return e, nil +} + +// prepareForDelete prepares the access control list for a delete request by calling all delete customizers. +func (p resourcePermissions) prepareForDelete(objectACL *iam.ObjectPermissions, getCurrentUser func() (string, error)) ([]iam.AccessControlRequest, error) { + accl := make([]iam.AccessControlRequest, 0, len(objectACL.AccessControlList)) + // By default, only admins have access to a resource when databricks_permissions for that resource are deleted. + for _, acl := range objectACL.AccessControlList { + if acl.GroupName != "admins" { + continue + } + for _, permission := range acl.AllPermissions { + if !permission.Inherited { + // keep everything direct for admin group + accl = append(accl, iam.AccessControlRequest{ + GroupName: acl.GroupName, + PermissionLevel: permission.PermissionLevel, + }) + break + } + } + } + ctx := update.ACLCustomizerContext{ + GetCurrentUser: getCurrentUser, + GetId: func() string { return objectACL.ObjectId }, + } + var err error + for _, customizer := range p.deleteAclCustomizers { + accl, err = customizer(ctx, accl) + if err != nil { + return nil, err + } + } + return accl, nil +} + +// prepareResponse prepares the access control list for a read response by calling all read customizers. +// +// If the user does not include an access_control block for themselves, it will not be included in the state. This +// prevents diffs when the applying user is not included in the access_control block for the resource but is +// added by update.AddCurrentUserAsManage. +// +// Read customizers are able to access the current state of the object in order to customize the response accordingly. +// For example, the SQL API previously used CAN_VIEW for read-only permission, but the GA API uses CAN_READ. Users may +// have CAN_VIEW in their resource configuration, so the read customizer will rewrite the response from CAN_READ to +// CAN_VIEW to match the user's configuration. +func (p resourcePermissions) prepareResponse(objectID string, objectACL *iam.ObjectPermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { + ctx := read.ACLCustomizerContext{ + GetId: func() string { return objectID }, + GetExistingPermissionsEntity: func() entity.PermissionsEntity { return existing }, + } + acl := *objectACL + for _, customizer := range p.readAclCustomizers { + acl = customizer(ctx, acl) + } + if acl.ObjectType != p.objectType { + return entity.PermissionsEntity{}, fmt.Errorf("expected object type %s, got %s", p.objectType, objectACL.ObjectType) + } + entity := entity.PermissionsEntity{} + for _, accessControl := range acl.AccessControlList { + // If the user doesn't include an access_control block for themselves, do not include it in the state. + // On create/update, the provider will automatically include the current user in the access_control block + // for appropriate resources. Otherwise, it must be included in state to prevent configuration drift. + if me == accessControl.UserName || me == accessControl.ServicePrincipalName { + if !existing.ContainsUserOrServicePrincipal(me) { + continue + } + } + // Skip admin permissions for resources where users are not allowed to explicitly configure them. + if accessControl.GroupName == "admins" && !p.allowConfiguringAdmins { + continue + } + for _, permission := range accessControl.AllPermissions { + // Inherited permissions can be ignored, as they are not set by the user. + if permission.Inherited { + continue + } + entity.AccessControlList = append(entity.AccessControlList, iam.AccessControlRequest{ + GroupName: accessControl.GroupName, + UserName: accessControl.UserName, + ServicePrincipalName: accessControl.ServicePrincipalName, + PermissionLevel: permission.PermissionLevel, + }) + } + } + return entity, nil +} + +// addOwnerPermissionIfNeeded adds the owner permission to the object ACL if the owner permission is allowed and not already set. +func (p resourcePermissions) addOwnerPermissionIfNeeded(objectACL []iam.AccessControlRequest, ownerOpt string) []iam.AccessControlRequest { + _, ok := p.allowedPermissionLevels["IS_OWNER"] + if !ok { + return objectACL + } + + for _, acl := range objectACL { + if acl.PermissionLevel == "IS_OWNER" { + return objectACL + } + } + + return append(objectACL, iam.AccessControlRequest{ + UserName: ownerOpt, + PermissionLevel: "IS_OWNER", + }) +} + +// permissionLevelOptions indicates the properties of a permissions level. Today, the only property +// is whether the current user can set the permission level for themselves. +type permissionLevelOptions struct { + // Whether users with this permission level are allowed to manage the resource. + // For some resources where ACLs don't define who can manage the resource, this might be unintuitive, + // e.g. all cluster policies permissions are considered management permissions because cluster policy + // ACLs don't define who can manage the cluster policy. + isManagementPermission bool + + // If non-empty, the permission level is deprecated. The string is a message to display to the user when + // this permission level is used. + deprecated string +} + +func getResourcePermissionsFromId(id string) (resourcePermissions, error) { + idParts := strings.Split(id, "/") + objectType := strings.Join(idParts[1:len(idParts)-1], "/") + for _, mapping := range allResourcePermissions() { + if mapping.idMatcher != nil { + if mapping.idMatcher(id) { + return mapping, nil + } + continue + } + if mapping.requestObjectType == objectType { + return mapping, nil + } + } + return resourcePermissions{}, fmt.Errorf("resource type for %s not found", id) +} + +// getResourcePermissionsFromState returns the resourcePermissions for the given state. +func getResourcePermissionsFromState(d interface{ GetOk(string) (any, bool) }) (resourcePermissions, string, error) { + allPermissions := allResourcePermissions() + for _, mapping := range allPermissions { + if v, ok := d.GetOk(mapping.field); ok { + id := v.(string) + if mapping.stateMatcher != nil && !mapping.stateMatcher(id) { + continue + } + return mapping, id, nil + } + } + allFields := make([]string, 0, len(allPermissions)) + seen := make(map[string]struct{}) + for _, mapping := range allPermissions { + if _, ok := seen[mapping.field]; ok { + continue + } + seen[mapping.field] = struct{}{} + allFields = append(allFields, mapping.field) + } + sort.Strings(allFields) + return resourcePermissions{}, "", fmt.Errorf("at least one type of resource identifier must be set; allowed fields: %s", strings.Join(allFields, ", ")) +} + +// getResourcePermissionsForObjectAcl returns the resourcePermissions for the given ObjectAclApiResponse. +// allResourcePermissions is the list of all resource types that can be managed by the databricks_permissions resource. +func allResourcePermissions() []resourcePermissions { + PATH := func(ctx context.Context, w *databricks.WorkspaceClient, path string) (string, error) { + info, err := w.Workspace.GetStatusByPath(ctx, path) + if err != nil { + return "", fmt.Errorf("cannot load path %s: %s", path, err) + } + return strconv.FormatInt(info.ObjectId, 10), nil + } + rewriteCanViewToCanRead := update.RewritePermissions(map[iam.PermissionLevel]iam.PermissionLevel{ + iam.PermissionLevelCanView: iam.PermissionLevelCanRead, + }) + rewriteCanReadToCanView := read.RewritePermissions(map[iam.PermissionLevel]iam.PermissionLevel{ + iam.PermissionLevelCanRead: iam.PermissionLevelCanView, + }) + return []resourcePermissions{ + { + field: "cluster_policy_id", + objectType: "cluster-policy", + requestObjectType: "cluster-policies", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: true}, + }, + }, + { + field: "instance_pool_id", + objectType: "instance-pool", + requestObjectType: "instance-pools", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_ATTACH_TO": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + }, + { + field: "cluster_id", + objectType: "cluster", + requestObjectType: "clusters", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_ATTACH_TO": {isManagementPermission: false}, + "CAN_RESTART": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + }, + { + field: "pipeline_id", + objectType: "pipelines", + requestObjectType: "pipelines", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_VIEW": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "IS_OWNER": {isManagementPermission: true}, + }, + fetchObjectCreator: func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) { + pipeline, err := w.Pipelines.GetByPipelineId(ctx, strings.ReplaceAll(objectID, "/pipelines/", "")) + if err != nil { + return "", common.IgnoreNotFoundError(err) + } + return pipeline.CreatorUserName, nil + }, + }, + { + field: "job_id", + objectType: "job", + requestObjectType: "jobs", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_VIEW": {isManagementPermission: false}, + "CAN_MANAGE_RUN": {isManagementPermission: false}, + "IS_OWNER": {isManagementPermission: true}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + fetchObjectCreator: func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) { + jobId, err := strconv.ParseInt(strings.ReplaceAll(objectID, "/jobs/", ""), 10, 64) + if err != nil { + return "", err + } + job, err := w.Jobs.GetByJobId(ctx, jobId) + if err != nil { + return "", common.IgnoreNotFoundError(err) + } + return job.CreatorUserName, nil + }, + }, + { + field: "notebook_id", + objectType: "notebook", + requestObjectType: "notebooks", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + }, + { + field: "notebook_path", + objectType: "notebook", + requestObjectType: "notebooks", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + idRetriever: PATH, + }, + { + field: "directory_id", + objectType: "directory", + requestObjectType: "directories", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/directories/0"), update.AddAdmin), + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/directories/0"), update.AddAdmin), + }, + }, + { + field: "directory_path", + objectType: "directory", + requestObjectType: "directories", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + idRetriever: PATH, + updateAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/directories/0"), update.AddAdmin), + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/directories/0"), update.AddAdmin), + }, + }, + { + field: "workspace_file_id", + objectType: "file", + requestObjectType: "files", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + pathVariant: "workspace_file_path", + }, + { + field: "workspace_file_path", + objectType: "file", + requestObjectType: "files", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + idRetriever: PATH, + pathVariant: "workspace_file_path", + }, + { + field: "repo_id", + objectType: "repo", + requestObjectType: "repos", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + }, + { + field: "repo_path", + objectType: "repo", + requestObjectType: "repos", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + idRetriever: PATH, + }, + { + field: "authorization", + objectType: "tokens", + requestObjectType: "authorization", + stateMatcher: func(id string) bool { + return id == "tokens" + }, + idMatcher: func(id string) bool { + return id == "/authorization/tokens" + }, + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: true}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.If(update.ObjectIdMatches("/authorization/tokens"), update.AddAdmin), + }, + }, + { + field: "authorization", + objectType: "passwords", + requestObjectType: "authorization", + stateMatcher: func(id string) bool { + return id == "passwords" + }, + idMatcher: func(id string) bool { + return id == "/authorization/passwords" + }, + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: true}, + }, + allowConfiguringAdmins: true, + }, + { + field: "sql_endpoint_id", + objectType: "warehouses", + requestObjectType: "sql/warehouses", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_USE": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_MONITOR": {isManagementPermission: false}, + "IS_OWNER": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + fetchObjectCreator: func(ctx context.Context, w *databricks.WorkspaceClient, objectID string) (string, error) { + warehouse, err := w.Warehouses.GetById(ctx, strings.ReplaceAll(objectID, "/sql/warehouses/", "")) + if err != nil { + return "", common.IgnoreNotFoundError(err) + } + return warehouse.CreatorName, nil + }, + }, + { + field: "sql_dashboard_id", + objectType: "dashboard", + requestObjectType: "dbsql-dashboards", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_EDIT": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_READ": {isManagementPermission: false}, + // This was part of the original SQL permissions API but was replaced by CAN_READ in the GA API. + "CAN_VIEW": { + isManagementPermission: false, + deprecated: "use CAN_READ instead", + }, + }, + idMatcher: func(id string) bool { + return strings.HasPrefix(id, "/dbsql-dashboards/") || strings.HasPrefix(id, "/sql/dashboards/") + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + readAclCustomizers: []read.ACLCustomizer{ + rewriteCanReadToCanView, + func(ctx read.ACLCustomizerContext, objectAcls iam.ObjectPermissions) iam.ObjectPermissions { + // The object type in the new API is "dbsql-dashboard", but for compatibility this should + // be "dashboard" in the state. + objectAcls.ObjectType = "dashboard" + return objectAcls + }, + }, + }, + { + field: "sql_alert_id", + objectType: "alert", + requestObjectType: "sql/alerts", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_EDIT": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_READ": {isManagementPermission: false}, + // This was part of the original SQL permissions API but was replaced by CAN_READ in the GA API. + // It should eventually be deprecated. + "CAN_VIEW": { + isManagementPermission: false, + deprecated: "use CAN_READ instead", + }, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + readAclCustomizers: []read.ACLCustomizer{ + rewriteCanReadToCanView, + }, + }, + { + field: "sql_query_id", + objectType: "query", + requestObjectType: "sql/queries", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_EDIT": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_READ": {isManagementPermission: false}, + // This was part of the original SQL permissions API but was replaced by CAN_READ in the GA API. + // It should eventually be deprecated. + "CAN_VIEW": { + isManagementPermission: false, + deprecated: "use CAN_READ instead", + }, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + rewriteCanViewToCanRead, + }, + readAclCustomizers: []read.ACLCustomizer{ + rewriteCanReadToCanView, + }, + }, + { + field: "dashboard_id", + objectType: "dashboard", + requestObjectType: "dashboards", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_EDIT": {isManagementPermission: false}, + "CAN_RUN": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + "CAN_READ": {isManagementPermission: false}, + }, + readAclCustomizers: []read.ACLCustomizer{ + func(ctx read.ACLCustomizerContext, objectAcls iam.ObjectPermissions) iam.ObjectPermissions { + if strings.HasPrefix(objectAcls.ObjectId, "/dashboards/") { + // workaround for inconsistent API response returning object ID of file in the workspace + objectAcls.ObjectId = ctx.GetId() + } + return objectAcls + }, + }, + }, + { + field: "experiment_id", + objectType: "mlflowExperiment", + requestObjectType: "experiments", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + }, + { + field: "registered_model_id", + objectType: "registered-model", + requestObjectType: "registered-models", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_READ": {isManagementPermission: false}, + "CAN_EDIT": {isManagementPermission: false}, + "CAN_MANAGE_STAGING_VERSIONS": {isManagementPermission: false}, + "CAN_MANAGE_PRODUCTION_VERSIONS": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{ + update.AddCurrentUserAsManage, + update.If(update.ObjectIdMatches("/registered-models/root"), update.AddAdmin), + }, + deleteAclCustomizers: []update.ACLCustomizer{ + update.If(update.Not(update.ObjectIdMatches("/registered-models/root")), update.AddCurrentUserAsManage), + }, + }, + { + field: "serving_endpoint_id", + objectType: "serving-endpoint", + requestObjectType: "serving-endpoints", + allowedPermissionLevels: map[string]permissionLevelOptions{ + "CAN_VIEW": {isManagementPermission: false}, + "CAN_QUERY": {isManagementPermission: false}, + "CAN_MANAGE": {isManagementPermission: true}, + }, + updateAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + deleteAclCustomizers: []update.ACLCustomizer{update.AddCurrentUserAsManage}, + }, + } +} diff --git a/permissions/read/customizers.go b/permissions/read/customizers.go new file mode 100644 index 0000000000..3cee278fbb --- /dev/null +++ b/permissions/read/customizers.go @@ -0,0 +1,54 @@ +package read + +import ( + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/terraform-provider-databricks/permissions/entity" +) + +// Context that is available to aclReadCustomizer implementations. +type ACLCustomizerContext struct { + GetId func() string + GetExistingPermissionsEntity func() entity.PermissionsEntity +} + +// ACLCustomizer is a function that modifies the access control list of an object after it is read. +type ACLCustomizer func(ctx ACLCustomizerContext, objectAcls iam.ObjectPermissions) iam.ObjectPermissions + +// Rewrites the permission level of the access control list of an object after it is read. +// This is done only for resources in state where the permission level is equal to the replacement value +// in the mapping. For example, the permissons endpoint used to use the "CAN_VIEW" permission level for +// read-only access, but this was changed to "CAN_READ". Users who previously used "CAN_VIEW" should not +// be forced to change to "CAN_READ". This customizer will rewrite "CAN_READ" to "CAN_VIEW" when the +// user-specified value is CAN_VIEW and the API response is CAN_READ. +func RewritePermissions(mapping map[iam.PermissionLevel]iam.PermissionLevel) ACLCustomizer { + findOriginalAcl := func(new iam.AccessControlResponse, original entity.PermissionsEntity) (iam.AccessControlRequest, bool) { + for _, old := range original.AccessControlList { + if new.GroupName != "" && old.GroupName == new.GroupName { + return old, true + } + if new.UserName != "" && old.UserName == new.UserName { + return old, true + } + if new.ServicePrincipalName != "" && old.ServicePrincipalName == new.ServicePrincipalName { + return old, true + } + } + return iam.AccessControlRequest{}, false + } + return func(ctx ACLCustomizerContext, acl iam.ObjectPermissions) iam.ObjectPermissions { + original := ctx.GetExistingPermissionsEntity() + for i := range acl.AccessControlList { + inState, found := findOriginalAcl(acl.AccessControlList[i], original) + for j := range acl.AccessControlList[i].AllPermissions { + // If the original permission level is remapped to a replacement level, and the permission level + // in state is equal to the replacement level, we rewrite it to the replacement level. + original := acl.AccessControlList[i].AllPermissions[j].PermissionLevel + replacement, ok := mapping[original] + if ok && found && inState.PermissionLevel == replacement { + acl.AccessControlList[i].AllPermissions[j].PermissionLevel = replacement + } + } + } + return acl + } +} diff --git a/permissions/resource_permissions.go b/permissions/resource_permissions.go index fb0b24eebf..6eb138fb80 100644 --- a/permissions/resource_permissions.go +++ b/permissions/resource_permissions.go @@ -4,96 +4,17 @@ import ( "context" "errors" "fmt" - "log" "path" - "strconv" "strings" - "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/terraform-provider-databricks/common" + "github.com/databricks/terraform-provider-databricks/permissions/entity" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -// ObjectACL is a structure to generically describe access control -type ObjectACL struct { - ObjectID string `json:"object_id,omitempty"` - ObjectType string `json:"object_type,omitempty"` - AccessControlList []AccessControl `json:"access_control_list"` -} - -// AccessControl is a structure to describe user/group permissions -type AccessControl struct { - UserName string `json:"user_name,omitempty"` - GroupName string `json:"group_name,omitempty"` - ServicePrincipalName string `json:"service_principal_name,omitempty"` - AllPermissions []Permission `json:"all_permissions,omitempty"` - - // SQLA entities don't use the `all_permissions` nesting, but rather a simple - // top level string with the permission level when retrieving permissions. - PermissionLevel string `json:"permission_level,omitempty"` -} - -func (ac AccessControl) toAccessControlChange() (AccessControlChange, bool) { - for _, permission := range ac.AllPermissions { - if permission.Inherited { - continue - } - return AccessControlChange{ - PermissionLevel: permission.PermissionLevel, - UserName: ac.UserName, - GroupName: ac.GroupName, - ServicePrincipalName: ac.ServicePrincipalName, - }, true - } - if ac.PermissionLevel != "" { - return AccessControlChange{ - PermissionLevel: ac.PermissionLevel, - UserName: ac.UserName, - GroupName: ac.GroupName, - ServicePrincipalName: ac.ServicePrincipalName, - }, true - } - return AccessControlChange{}, false -} - -func (ac AccessControl) String() string { - return fmt.Sprintf("%s%s%s%v", ac.GroupName, ac.UserName, ac.ServicePrincipalName, ac.AllPermissions) -} - -// Permission is a structure to describe permission level -type Permission struct { - PermissionLevel string `json:"permission_level"` - Inherited bool `json:"inherited,omitempty"` - InheritedFromObject []string `json:"inherited_from_object,omitempty"` -} - -func (p Permission) String() string { - if len(p.InheritedFromObject) > 0 { - return fmt.Sprintf("%s (from %s)", p.PermissionLevel, p.InheritedFromObject) - } - return p.PermissionLevel -} - -// AccessControlChangeList is wrapper around ACL changes for REST API -type AccessControlChangeList struct { - AccessControlList []AccessControlChange `json:"access_control_list"` -} - -// AccessControlChange is API wrapper for changing permissions -type AccessControlChange struct { - UserName string `json:"user_name,omitempty"` - GroupName string `json:"group_name,omitempty"` - ServicePrincipalName string `json:"service_principal_name,omitempty"` - PermissionLevel string `json:"permission_level"` -} - -func (acc AccessControlChange) String() string { - return fmt.Sprintf("%v%v%v %s", acc.UserName, acc.GroupName, acc.ServicePrincipalName, - acc.PermissionLevel) -} - // NewPermissionsAPI creates PermissionsAPI instance from provider meta func NewPermissionsAPI(ctx context.Context, m any) PermissionsAPI { return PermissionsAPI{ @@ -108,187 +29,103 @@ type PermissionsAPI struct { context context.Context } -func isDbsqlPermissionsWorkaroundNecessary(objectID string) bool { - return strings.HasPrefix(objectID, "/sql/") && !strings.HasPrefix(objectID, "/sql/warehouses") -} - -func urlPathForObjectID(objectID string) string { - if isDbsqlPermissionsWorkaroundNecessary(objectID) { - // Permissions for SQLA entities are routed differently from the others. - return "/preview/sql/permissions" + objectID[4:] - } - return "/permissions" + objectID -} - -// As described in https://github.com/databricks/terraform-provider-databricks/issues/1504, -// certain object types require that we explicitly grant the calling user CAN_MANAGE -// permissions when POSTing permissions changes through the REST API, to avoid accidentally -// revoking the calling user's ability to manage the current object. -func (a PermissionsAPI) shouldExplicitlyGrantCallingUserManagePermissions(objectID string) bool { - for _, prefix := range [...]string{"/registered-models/", "/clusters/", "/instance-pools/", "/serving-endpoints/", "/queries/", "/sql/warehouses"} { - if strings.HasPrefix(objectID, prefix) { - return true - } - } - return isDbsqlPermissionsWorkaroundNecessary(objectID) -} - -func isOwnershipWorkaroundNecessary(objectID string) bool { - return strings.HasPrefix(objectID, "/jobs") || strings.HasPrefix(objectID, "/pipelines") || strings.HasPrefix(objectID, "/sql/warehouses") -} - -func (a PermissionsAPI) getObjectCreator(objectID string) (string, error) { +// safePutWithOwner is a workaround for the limitation where warehouse without owners cannot have IS_OWNER set +func (a PermissionsAPI) safePutWithOwner(objectID string, objectACL []iam.AccessControlRequest, mapping resourcePermissions, ownerOpt string) error { w, err := a.client.WorkspaceClient() if err != nil { - return "", err + return err } - if strings.HasPrefix(objectID, "/jobs") { - jobId, err := strconv.ParseInt(strings.ReplaceAll(objectID, "/jobs/", ""), 10, 64) - if err != nil { - return "", err - } - job, err := w.Jobs.GetByJobId(a.context, jobId) - if err != nil { - return "", common.IgnoreNotFoundError(err) - } - return job.CreatorUserName, nil - } else if strings.HasPrefix(objectID, "/pipelines") { - pipeline, err := w.Pipelines.GetByPipelineId(a.context, strings.ReplaceAll(objectID, "/pipelines/", "")) - if err != nil { - return "", common.IgnoreNotFoundError(err) - } - return pipeline.CreatorUserName, nil - } else if strings.HasPrefix(objectID, "/sql/warehouses") { - warehouse, err := w.Warehouses.GetById(a.context, strings.ReplaceAll(objectID, "/sql/warehouses/", "")) - if err != nil { - return "", common.IgnoreNotFoundError(err) + idParts := strings.Split(objectID, "/") + id := idParts[len(idParts)-1] + withOwner := mapping.addOwnerPermissionIfNeeded(objectACL, ownerOpt) + _, err = w.Permissions.Set(a.context, iam.PermissionsRequest{ + RequestObjectId: id, + RequestObjectType: mapping.requestObjectType, + AccessControlList: withOwner, + }) + if err != nil { + if strings.Contains(err.Error(), "with no existing owner must provide a new owner") { + _, err = w.Permissions.Set(a.context, iam.PermissionsRequest{ + RequestObjectId: id, + RequestObjectType: mapping.requestObjectType, + AccessControlList: objectACL, + }) } - return warehouse.CreatorName, nil + return err } - return "", nil + return nil } -func (a PermissionsAPI) ensureCurrentUserCanManageObject(objectID string, objectACL AccessControlChangeList) (AccessControlChangeList, error) { - if !a.shouldExplicitlyGrantCallingUserManagePermissions(objectID) { - return objectACL, nil - } +func (a PermissionsAPI) getCurrentUser() (string, error) { w, err := a.client.WorkspaceClient() if err != nil { - return objectACL, err + return "", err } me, err := w.CurrentUser.Me(a.context) if err != nil { - return objectACL, err + return "", err } - objectACL.AccessControlList = append(objectACL.AccessControlList, AccessControlChange{ - UserName: me.UserName, - PermissionLevel: "CAN_MANAGE", - }) - return objectACL, nil + return me.UserName, nil } -// Helper function for applying permissions changes. Ensures that -// we select the correct HTTP method based on the object type and preserve the calling -// user's ability to manage the specified object when applying permissions changes. -func (a PermissionsAPI) put(objectID string, objectACL AccessControlChangeList) error { - objectACL, err := a.ensureCurrentUserCanManageObject(objectID, objectACL) +// Update updates object permissions. Technically, it's using method named SetOrDelete, but here we do more +func (a PermissionsAPI) Update(objectID string, entity entity.PermissionsEntity, mapping resourcePermissions) error { + currentUser, err := a.getCurrentUser() if err != nil { return err } - if isDbsqlPermissionsWorkaroundNecessary(objectID) { - // SQLA entities use POST for permission updates. - return a.client.Post(a.context, urlPathForObjectID(objectID), objectACL, nil) + // this logic was moved from CustomizeDiff because of undeterministic auth behavior + // in the corner-case scenarios. + // see https://github.com/databricks/terraform-provider-databricks/issues/2052 + err = mapping.validate(a.context, entity, currentUser) + if err != nil { + return err } - log.Printf("[DEBUG] PUT %s %v", objectID, objectACL) - return a.client.Put(a.context, urlPathForObjectID(objectID), objectACL) -} - -// safePutWithOwner is a workaround for the limitation where warehouse without owners cannot have IS_OWNER set -func (a PermissionsAPI) safePutWithOwner(objectID string, objectACL AccessControlChangeList, originalAcl []AccessControlChange) error { - err := a.put(objectID, objectACL) + prepared, err := mapping.prepareForUpdate(objectID, entity, currentUser) if err != nil { - if strings.Contains(err.Error(), "with no existing owner must provide a new owner") { - objectACL.AccessControlList = originalAcl - return a.put(objectID, objectACL) - } return err } - return nil + return a.safePutWithOwner(objectID, prepared.AccessControlList, mapping, currentUser) } -// Update updates object permissions. Technically, it's using method named SetOrDelete, but here we do more -func (a PermissionsAPI) Update(objectID string, objectACL AccessControlChangeList) error { - if objectID == "/authorization/tokens" || objectID == "/registered-models/root" || objectID == "/directories/0" { - // Prevent "Cannot change permissions for group 'admins' to None." - objectACL.AccessControlList = append(objectACL.AccessControlList, AccessControlChange{ - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }) +// Delete gracefully removes permissions of non-admin users. After this operation, the object is managed +// by the current user and admin group. If the resource has IS_OWNER permissions, they are reset to the +// object creator, if it can be determined. +func (a PermissionsAPI) Delete(objectID string, mapping resourcePermissions) error { + objectACL, err := a.readRaw(objectID, mapping) + if err != nil { + return err } - originalAcl := make([]AccessControlChange, len(objectACL.AccessControlList)) - _ = copy(originalAcl, objectACL.AccessControlList) - if isOwnershipWorkaroundNecessary(objectID) { - owners := 0 - for _, acl := range objectACL.AccessControlList { - if acl.PermissionLevel == "IS_OWNER" { - owners++ - } - } - if owners == 0 { - w, err := a.client.WorkspaceClient() - if err != nil { - return err - } - me, err := w.CurrentUser.Me(a.context) - if err != nil { - return err - } - // add owner if it's missing, otherwise automated planning might be difficult - objectACL.AccessControlList = append(objectACL.AccessControlList, AccessControlChange{ - UserName: me.UserName, - PermissionLevel: "IS_OWNER", - }) - } + accl, err := mapping.prepareForDelete(objectACL, a.getCurrentUser) + if err != nil { + return err } - return a.safePutWithOwner(objectID, objectACL, originalAcl) -} - -// Delete gracefully removes permissions. Technically, it's using method named SetOrDelete, but here we do more -func (a PermissionsAPI) Delete(objectID string) error { - objectACL, err := a.Read(objectID) + w, err := a.client.WorkspaceClient() if err != nil { return err } - accl := AccessControlChangeList{} - for _, acl := range objectACL.AccessControlList { - if acl.GroupName == "admins" && objectID != "/authorization/passwords" { - if change, direct := acl.toAccessControlChange(); direct { - // keep everything direct for admin group - accl.AccessControlList = append(accl.AccessControlList, change) - } - } + resourceStatus, err := mapping.getObjectStatus(a.context, w, objectID) + if err != nil { + return err } - originalAcl := make([]AccessControlChange, len(accl.AccessControlList)) - _ = copy(originalAcl, accl.AccessControlList) - if isOwnershipWorkaroundNecessary(objectID) { - creator, err := a.getObjectCreator(objectID) - if err != nil { - return err - } - if creator == "" { - return nil - } - accl.AccessControlList = append(accl.AccessControlList, AccessControlChange{ - UserName: creator, - PermissionLevel: "IS_OWNER", - }) + // Do not bother resetting permissions for deleted resources + if !resourceStatus.exists { + return nil } - return a.safePutWithOwner(objectID, accl, originalAcl) + return a.safePutWithOwner(objectID, accl, mapping, resourceStatus.creator) } -// Read gets all relevant permissions for the object, including inherited ones -func (a PermissionsAPI) Read(objectID string) (objectACL ObjectACL, err error) { - err = a.client.Get(a.context, urlPathForObjectID(objectID), nil, &objectACL) +func (a PermissionsAPI) readRaw(objectID string, mapping resourcePermissions) (*iam.ObjectPermissions, error) { + w, err := a.client.WorkspaceClient() + if err != nil { + return nil, err + } + idParts := strings.Split(objectID, "/") + id := idParts[len(idParts)-1] + permissions, err := w.Permissions.Get(a.context, iam.GetPermissionRequest{ + RequestObjectId: id, + RequestObjectType: mapping.requestObjectType, + }) var apiErr *apierr.APIError // https://github.com/databricks/terraform-provider-databricks/issues/1227 // platform propagates INVALID_STATE error for auto-purged clusters in @@ -296,143 +133,34 @@ func (a PermissionsAPI) Read(objectID string) (objectACL ObjectACL, err error) { // cross-package dependency on "clusters". if errors.As(err, &apiErr) && strings.Contains(apiErr.Message, "Cannot access cluster") && apiErr.StatusCode == 400 { apiErr.StatusCode = 404 + apiErr.ErrorCode = "RESOURCE_DOES_NOT_EXIST" err = apiErr - return - } - if strings.HasPrefix(objectID, "/dashboards/") { - // workaround for inconsistent API response returning object ID of file in the workspace - objectACL.ObjectID = objectID - } - return -} - -// permissionsIDFieldMapping holds mapping -type permissionsIDFieldMapping struct { - field, objectType, resourceType string - - allowedPermissionLevels []string - - idRetriever func(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) -} - -// PermissionsResourceIDFields shows mapping of id columns to resource types -func permissionsResourceIDFields() []permissionsIDFieldMapping { - SIMPLE := func(ctx context.Context, w *databricks.WorkspaceClient, id string) (string, error) { - return id, nil - } - PATH := func(ctx context.Context, w *databricks.WorkspaceClient, path string) (string, error) { - info, err := w.Workspace.GetStatusByPath(ctx, path) - if err != nil { - return "", fmt.Errorf("cannot load path %s: %s", path, err) - } - return strconv.FormatInt(info.ObjectId, 10), nil - } - return []permissionsIDFieldMapping{ - {"cluster_policy_id", "cluster-policy", "cluster-policies", []string{"CAN_USE"}, SIMPLE}, - {"instance_pool_id", "instance-pool", "instance-pools", []string{"CAN_ATTACH_TO", "CAN_MANAGE"}, SIMPLE}, - {"cluster_id", "cluster", "clusters", []string{"CAN_ATTACH_TO", "CAN_RESTART", "CAN_MANAGE"}, SIMPLE}, - {"pipeline_id", "pipelines", "pipelines", []string{"CAN_VIEW", "CAN_RUN", "CAN_MANAGE", "IS_OWNER"}, SIMPLE}, - {"job_id", "job", "jobs", []string{"CAN_VIEW", "CAN_MANAGE_RUN", "IS_OWNER", "CAN_MANAGE"}, SIMPLE}, - {"notebook_id", "notebook", "notebooks", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"notebook_path", "notebook", "notebooks", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, PATH}, - {"directory_id", "directory", "directories", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"directory_path", "directory", "directories", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, PATH}, - {"workspace_file_id", "file", "files", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"workspace_file_path", "file", "files", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, PATH}, - {"repo_id", "repo", "repos", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"repo_path", "repo", "repos", []string{"CAN_READ", "CAN_RUN", "CAN_EDIT", "CAN_MANAGE"}, PATH}, - {"authorization", "tokens", "authorization", []string{"CAN_USE"}, SIMPLE}, - {"authorization", "passwords", "authorization", []string{"CAN_USE"}, SIMPLE}, - {"sql_endpoint_id", "warehouses", "sql/warehouses", []string{"CAN_USE", "CAN_MANAGE", "CAN_MONITOR", "IS_OWNER"}, SIMPLE}, - {"sql_dashboard_id", "dashboard", "sql/dashboards", []string{"CAN_EDIT", "CAN_RUN", "CAN_MANAGE", "CAN_VIEW"}, SIMPLE}, - {"sql_alert_id", "alert", "sql/alerts", []string{"CAN_EDIT", "CAN_RUN", "CAN_MANAGE", "CAN_VIEW"}, SIMPLE}, - {"sql_query_id", "query", "sql/queries", []string{"CAN_EDIT", "CAN_RUN", "CAN_MANAGE", "CAN_VIEW"}, SIMPLE}, - {"dashboard_id", "dashboard", "dashboards", []string{"CAN_EDIT", "CAN_RUN", "CAN_MANAGE", "CAN_READ"}, SIMPLE}, - {"experiment_id", "mlflowExperiment", "experiments", []string{"CAN_READ", "CAN_EDIT", "CAN_MANAGE"}, SIMPLE}, - {"registered_model_id", "registered-model", "registered-models", []string{ - "CAN_READ", "CAN_EDIT", "CAN_MANAGE_STAGING_VERSIONS", "CAN_MANAGE_PRODUCTION_VERSIONS", "CAN_MANAGE"}, SIMPLE}, - {"serving_endpoint_id", "serving-endpoint", "serving-endpoints", []string{"CAN_VIEW", "CAN_QUERY", "CAN_MANAGE"}, SIMPLE}, - } -} - -// PermissionsEntity is the one used for resource metadata -type PermissionsEntity struct { - ObjectType string `json:"object_type,omitempty" tf:"computed"` - AccessControlList []AccessControlChange `json:"access_control" tf:"slice_set"` -} - -func (oa *ObjectACL) isMatchingMapping(mapping permissionsIDFieldMapping) bool { - if mapping.objectType != oa.ObjectType { - return false - } - if oa.ObjectID != "" && oa.ObjectID[0] == '/' { - return strings.HasPrefix(oa.ObjectID[1:], mapping.resourceType) - } - if strings.HasPrefix(oa.ObjectID, "dashboards/") || strings.HasPrefix(oa.ObjectID, "alerts/") || strings.HasPrefix(oa.ObjectID, "queries/") { - idx := strings.Index(oa.ObjectID, "/") - if idx != -1 { - return mapping.resourceType == "sql/"+oa.ObjectID[:idx] - } - } - - return false -} - -func (oa *ObjectACL) ToPermissionsEntity(d *schema.ResourceData, me string) (PermissionsEntity, error) { - entity := PermissionsEntity{} - for _, accessControl := range oa.AccessControlList { - if accessControl.GroupName == "admins" && d.Id() != "/authorization/passwords" { - // not possible to lower admins permissions anywhere from CAN_MANAGE - continue - } - if me == accessControl.UserName || me == accessControl.ServicePrincipalName { - // not possible to lower one's permissions anywhere from CAN_MANAGE - continue - } - if change, direct := accessControl.toAccessControlChange(); direct { - entity.AccessControlList = append(entity.AccessControlList, change) - } } - for _, mapping := range permissionsResourceIDFields() { - if !oa.isMatchingMapping(mapping) { - continue - } - entity.ObjectType = mapping.objectType - var pathVariant any - if mapping.objectType == "file" { - pathVariant = d.Get("workspace_file_path") - } else { - pathVariant = d.Get(mapping.objectType + "_path") - } - if pathVariant != nil && pathVariant.(string) != "" { - // we're not importing and it's a path... it's set, so let's not re-set it - return entity, nil - } - identifier := path.Base(oa.ObjectID) - return entity, d.Set(mapping.field, identifier) + if err != nil { + return nil, err } - return entity, fmt.Errorf("unknown object type %s", oa.ObjectType) + return permissions, nil } -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } +// Read gets all relevant permissions for the object, including inherited ones +func (a PermissionsAPI) Read(objectID string, mapping resourcePermissions, existing entity.PermissionsEntity, me string) (entity.PermissionsEntity, error) { + permissions, err := a.readRaw(objectID, mapping) + if err != nil { + return entity.PermissionsEntity{}, err } - return false + return mapping.prepareResponse(objectID, permissions, existing, me) } // ResourcePermissions definition func ResourcePermissions() common.Resource { - s := common.StructToSchema(PermissionsEntity{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { - for _, mapping := range permissionsResourceIDFields() { + s := common.StructToSchema(entity.PermissionsEntity{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { + for _, mapping := range allResourcePermissions() { s[mapping.field] = &schema.Schema{ ForceNew: true, Type: schema.TypeString, Optional: true, } - for _, m := range permissionsResourceIDFields() { + for _, m := range allResourcePermissions() { if m.field == mapping.field { continue } @@ -445,38 +173,44 @@ func ResourcePermissions() common.Resource { return common.Resource{ Schema: s, CustomizeDiff: func(ctx context.Context, diff *schema.ResourceDiff) error { + mapping, _, err := getResourcePermissionsFromState(diff) + if err != nil { + // This preserves current behavior but is likely only exercised in tests where + // the original config is not specified. + return nil + } + planned := entity.PermissionsEntity{} + common.DiffToStructPointer(diff, s, &planned) // Plan time validation for object permission levels - for _, mapping := range permissionsResourceIDFields() { - if _, ok := diff.GetOk(mapping.field); !ok { + for _, accessControl := range planned.AccessControlList { + permissionLevel := accessControl.PermissionLevel + // No diff in permission level, so don't need to check. + if permissionLevel == "" { continue } - access_control_list := diff.Get("access_control").(*schema.Set).List() - for _, access_control := range access_control_list { - m := access_control.(map[string]any) - permission_level := m["permission_level"].(string) - if !stringInSlice(permission_level, mapping.allowedPermissionLevels) { - return fmt.Errorf(`permission_level %s is not supported with %s objects`, - permission_level, mapping.field) - } + // TODO: only warn on unknown permission levels, as new levels may be released that the TF provider + // is not aware of. + if _, ok := mapping.allowedPermissionLevels[string(permissionLevel)]; !ok { + return fmt.Errorf(`permission_level %s is not supported with %s objects; allowed levels: %s`, + permissionLevel, mapping.field, strings.Join(mapping.getAllowedPermissionLevels(true), ", ")) } } return nil }, Read: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - id := d.Id() - w, err := c.WorkspaceClient() - if err != nil { - return err - } - objectACL, err := NewPermissionsAPI(ctx, c).Read(id) + a := NewPermissionsAPI(ctx, c) + mapping, err := getResourcePermissionsFromId(d.Id()) if err != nil { return err } - me, err := w.CurrentUser.Me(ctx) + var existing entity.PermissionsEntity + common.DataToStructPointer(d, s, &existing) + me, err := a.getCurrentUser() if err != nil { return err } - entity, err := objectACL.ToPermissionsEntity(d, me.UserName) + id := d.Id() + entity, err := a.Read(id, mapping, existing, me) if err != nil { return err } @@ -485,61 +219,53 @@ func ResourcePermissions() common.Resource { d.SetId("") return nil } + entity.ObjectType = mapping.objectType + pathVariant := d.Get(mapping.getPathVariant()) + if pathVariant == nil || pathVariant.(string) == "" { + identifier := path.Base(id) + if err = d.Set(mapping.field, identifier); err != nil { + return err + } + } return common.StructToData(entity, s, d) }, Create: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - var entity PermissionsEntity + var entity entity.PermissionsEntity common.DataToStructPointer(d, s, &entity) w, err := c.WorkspaceClient() if err != nil { return err } - me, err := w.CurrentUser.Me(ctx) + mapping, configuredValue, err := getResourcePermissionsFromState(d) if err != nil { return err } - for _, mapping := range permissionsResourceIDFields() { - if v, ok := d.GetOk(mapping.field); ok { - id, err := mapping.idRetriever(ctx, w, v.(string)) - if err != nil { - return err - } - objectID := fmt.Sprintf("/%s/%s", mapping.resourceType, id) - // this logic was moved from CustomizeDiff because of undeterministic auth behavior - // in the corner-case scenarios. - // see https://github.com/databricks/terraform-provider-databricks/issues/2052 - for _, v := range entity.AccessControlList { - if v.UserName == me.UserName { - format := "it is not possible to decrease administrative permissions for the current user: %s" - return fmt.Errorf(format, me.UserName) - } - - if v.GroupName == "admins" && mapping.resourceType != "authorization" { - // should allow setting admins permissions for passwords and tokens usage - return fmt.Errorf("it is not possible to restrict any permissions from `admins`") - } - } - err = NewPermissionsAPI(ctx, c).Update(objectID, AccessControlChangeList{ - AccessControlList: entity.AccessControlList, - }) - if err != nil { - return err - } - d.SetId(objectID) - return nil - } + objectID, err := mapping.getID(ctx, w, configuredValue) + if err != nil { + return err + } + err = NewPermissionsAPI(ctx, c).Update(objectID, entity, mapping) + if err != nil { + return err } - return errors.New("at least one type of resource identifiers must be set") + d.SetId(objectID) + return nil }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - var entity PermissionsEntity + var entity entity.PermissionsEntity common.DataToStructPointer(d, s, &entity) - return NewPermissionsAPI(ctx, c).Update(d.Id(), AccessControlChangeList{ - AccessControlList: entity.AccessControlList, - }) + mapping, err := getResourcePermissionsFromId(d.Id()) + if err != nil { + return err + } + return NewPermissionsAPI(ctx, c).Update(d.Id(), entity, mapping) }, Delete: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - return NewPermissionsAPI(ctx, c).Delete(d.Id()) + mapping, err := getResourcePermissionsFromId(d.Id()) + if err != nil { + return err + } + return NewPermissionsAPI(ctx, c).Delete(d.Id(), mapping) }, } } diff --git a/permissions/resource_permissions_test.go b/permissions/resource_permissions_test.go index b01fddb1ca..7019ae5c56 100644 --- a/permissions/resource_permissions_test.go +++ b/permissions/resource_permissions_test.go @@ -2,17 +2,21 @@ package permissions import ( "context" - "net/http" + "fmt" "testing" + "github.com/stretchr/testify/mock" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/databricks/terraform-provider-databricks/common" - "github.com/databricks/terraform-provider-databricks/scim" - + "github.com/databricks/terraform-provider-databricks/permissions/entity" "github.com/databricks/terraform-provider-databricks/qa" - "github.com/databricks/terraform-provider-databricks/workspace" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -22,70 +26,39 @@ var ( TestingUser = "ben" TestingAdminUser = "admin" TestingOwner = "testOwner" - me = qa.HTTPFixture{ - ReuseRequest: true, - Method: "GET", - Resource: "/api/2.0/preview/scim/v2/Me", - Response: scim.User{ - UserName: TestingAdminUser, - }, - } ) -func TestEntityAccessControlChangeString(t *testing.T) { - assert.Equal(t, "me CAN_READ", AccessControlChange{ - UserName: "me", - PermissionLevel: "CAN_READ", - }.String()) -} - -func TestEntityAccessControlString(t *testing.T) { - assert.Equal(t, "me[CAN_READ (from [parent]) CAN_MANAGE]", AccessControl{ - UserName: "me", - AllPermissions: []Permission{ - { - InheritedFromObject: []string{"parent"}, - PermissionLevel: "CAN_READ", - }, - { - PermissionLevel: "CAN_MANAGE", - }, - }, - }.String()) -} - func TestResourcePermissionsRead(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "cluster", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), Read: true, @@ -104,17 +77,16 @@ func TestResourcePermissionsRead(t *testing.T) { // https://github.com/databricks/terraform-provider-databricks/issues/1227 func TestResourcePermissionsRead_RemovedCluster(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Status: 400, - Response: apierr.APIError{ - ErrorCode: "INVALID_STATE", - Message: "Cannot access cluster X that was terminated or unpinned more than Y days ago.", - }, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(nil, &apierr.APIError{ + StatusCode: 400, + ErrorCode: "INVALID_STATE", + Message: "Cannot access cluster X that was terminated or unpinned more than Y days ago.", + }) }, Resource: ResourcePermissions(), Read: true, @@ -126,27 +98,25 @@ func TestResourcePermissionsRead_RemovedCluster(t *testing.T) { func TestResourcePermissionsRead_Mlflow_Model(t *testing.T) { d, err := qa.ResourceFixture{ - // Pass list of API request mocks - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - Response: ObjectACL{ - ObjectID: "/registered-models/fakeuuid123", - ObjectType: "registered-model", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/registered-models/fakeuuid123", + ObjectType: "registered-model", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), Read: true, @@ -164,42 +134,40 @@ func TestResourcePermissionsRead_Mlflow_Model(t *testing.T) { func TestResourcePermissionsCreate_Mlflow_Model(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - Response: ObjectACL{ - ObjectID: "/registered-models/fakeuuid123", - ObjectType: "registered-model", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/registered-models/fakeuuid123", + ObjectType: "registered-model", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -223,42 +191,40 @@ func TestResourcePermissionsCreate_Mlflow_Model(t *testing.T) { func TestResourcePermissionsUpdate_Mlflow_Model(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - Response: ObjectACL{ - ObjectID: "/registered-models/fakeuuid123", - ObjectType: "registered-model", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/registered-models/fakeuuid123", + ObjectType: "registered-model", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, InstanceState: map[string]string{ "registered_model_id": "fakeuuid123", @@ -287,38 +253,36 @@ func TestResourcePermissionsUpdate_Mlflow_Model(t *testing.T) { func TestResourcePermissionsDelete_Mlflow_Model(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - Response: ObjectACL{ - ObjectID: "/registered-models/fakeuuid123", - ObjectType: "registered-model", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/registered-models/fakeuuid123", + ObjectType: "registered-model", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/registered-models/fakeuuid123", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "fakeuuid123", + RequestObjectType: "registered-models", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), Delete: true, @@ -330,31 +294,38 @@ func TestResourcePermissionsDelete_Mlflow_Model(t *testing.T) { func TestResourcePermissionsRead_SQLA_Asset(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/sql/permissions/dashboards/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "dbsql-dashboards", + }).Return(&iam.ObjectPermissions{ + ObjectId: "dashboards/abc", + ObjectType: "dashboard", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), Read: true, New: true, ID: "/sql/dashboards/abc", + HCL: ` + sql_dashboard_id = "abc" + access_control { + user_name = "ben" + permission_level = "CAN_VIEW" + } + `, }.Apply(t) assert.NoError(t, err) assert.Equal(t, "/sql/dashboards/abc", d.Id()) @@ -362,31 +333,31 @@ func TestResourcePermissionsRead_SQLA_Asset(t *testing.T) { require.Equal(t, 1, len(ac.List())) firstElem := ac.List()[0].(map[string]any) assert.Equal(t, TestingUser, firstElem["user_name"]) - assert.Equal(t, "CAN_READ", firstElem["permission_level"]) + assert.Equal(t, "CAN_VIEW", firstElem["permission_level"]) } func TestResourcePermissionsRead_Dashboard(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/dashboards/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "dashboards", + }).Return(&iam.ObjectPermissions{ + ObjectId: "dashboards/abc", + ObjectType: "dashboard", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), Read: true, @@ -405,17 +376,16 @@ func TestResourcePermissionsRead_Dashboard(t *testing.T) { func TestResourcePermissionsRead_NotFound(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: apierr.APIError{ - ErrorCode: "NOT_FOUND", - Message: "Cluster does not exist", - }, - Status: 404, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(nil, &apierr.APIError{ + StatusCode: 404, + ErrorCode: "NOT_FOUND", + Message: "Cluster does not exist", + }) }, Resource: ResourcePermissions(), Read: true, @@ -427,17 +397,16 @@ func TestResourcePermissionsRead_NotFound(t *testing.T) { func TestResourcePermissionsRead_some_error(t *testing.T) { _, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(nil, &apierr.APIError{ + StatusCode: 400, + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }) }, Resource: ResourcePermissions(), Read: true, @@ -455,56 +424,17 @@ func TestResourcePermissionsCustomizeDiff_ErrorOnCreate(t *testing.T) { access_control { permission_level = "WHATEVER" }`, - }.ExpectError(t, "permission_level WHATEVER is not supported with cluster_id objects") -} - -func TestResourcePermissionsCustomizeDiff_ErrorOnPermissionsDecreate(t *testing.T) { - qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - }, - Resource: ResourcePermissions(), - Create: true, - HCL: ` - cluster_id = "abc" - access_control { - permission_level = "CAN_ATTACH_TO" - user_name = "admin" - }`, - }.ExpectError(t, "it is not possible to decrease administrative permissions for the current user: admin") + }.ExpectError(t, "permission_level WHATEVER is not supported with cluster_id objects; allowed levels: CAN_ATTACH_TO, CAN_MANAGE, CAN_RESTART") } func TestResourcePermissionsRead_ErrorOnScimMe(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "clusters", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, - }, - }, - }, - }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/scim/v2/Me", - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, - }, func(ctx context.Context, client *common.DatabricksClient) { + mock := func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + }) + } + qa.MockWorkspaceApply(t, mock, func(ctx context.Context, client *common.DatabricksClient) { r := ResourcePermissions().ToResource() d := r.TestResourceData() d.SetId("/clusters/abc") @@ -516,35 +446,33 @@ func TestResourcePermissionsRead_ErrorOnScimMe(t *testing.T) { func TestResourcePermissionsRead_ToPermissionsEntity_Error(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectType: "teapot", - }, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectType: "teapot", + }, nil) }, Resource: ResourcePermissions(), Read: true, New: true, ID: "/clusters/abc", - }.ExpectError(t, "unknown object type teapot") + }.ExpectError(t, "expected object type cluster, got teapot") } func TestResourcePermissionsRead_EmptyListResultsInRemoval(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "cluster", - }, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + }, nil) }, Resource: ResourcePermissions(), Read: true, @@ -558,48 +486,46 @@ func TestResourcePermissionsRead_EmptyListResultsInRemoval(t *testing.T) { func TestResourcePermissionsDelete(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "clusters", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/clusters/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), Delete: true, @@ -611,53 +537,50 @@ func TestResourcePermissionsDelete(t *testing.T) { func TestResourcePermissionsDelete_error(t *testing.T) { _, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "clusters", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/clusters/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, + }).Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + StatusCode: 400, + }) }, Resource: ResourcePermissions(), Delete: true, @@ -668,15 +591,13 @@ func TestResourcePermissionsDelete_error(t *testing.T) { func TestResourcePermissionsCreate_invalid(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{me}, Resource: ResourcePermissions(), Create: true, - }.ExpectError(t, "at least one type of resource identifiers must be set") + }.ExpectError(t, "at least one type of resource identifier must be set; allowed fields: authorization, cluster_id, cluster_policy_id, dashboard_id, directory_id, directory_path, experiment_id, instance_pool_id, job_id, notebook_id, notebook_path, pipeline_id, registered_model_id, repo_id, repo_path, serving_endpoint_id, sql_alert_id, sql_dashboard_id, sql_endpoint_id, sql_query_id, workspace_file_id, workspace_file_path") } func TestResourcePermissionsCreate_no_access_control(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{}, Resource: ResourcePermissions(), Create: true, State: map[string]any{ @@ -687,7 +608,6 @@ func TestResourcePermissionsCreate_no_access_control(t *testing.T) { func TestResourcePermissionsCreate_conflicting_fields(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{}, Resource: ResourcePermissions(), Create: true, State: map[string]any{ @@ -705,7 +625,9 @@ func TestResourcePermissionsCreate_conflicting_fields(t *testing.T) { func TestResourcePermissionsCreate_AdminsThrowError(t *testing.T) { _, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{me}, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + }, Resource: ResourcePermissions(), Create: true, HCL: ` @@ -716,57 +638,55 @@ func TestResourcePermissionsCreate_AdminsThrowError(t *testing.T) { } `, }.Apply(t) - assert.EqualError(t, err, "it is not possible to restrict any permissions from `admins`") + assert.EqualError(t, err, "it is not possible to modify admin permissions for cluster resources") } func TestResourcePermissionsCreate(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/clusters/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_ATTACH_TO", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, - }, - }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/clusters/abc", - Response: ObjectACL{ - ObjectID: "/clusters/abc", - ObjectType: "cluster", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_ATTACH_TO", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/clusters/abc", + ObjectType: "cluster", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_ATTACH_TO", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "clusters", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_ATTACH_TO", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", + }, + }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -790,42 +710,50 @@ func TestResourcePermissionsCreate(t *testing.T) { func TestResourcePermissionsCreate_SQLA_Asset(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPost, - Resource: "/api/2.0/preview/sql/permissions/dashboards/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "dbsql-dashboards", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/dashboards/abc", + ObjectType: "dashboard", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_RUN", + Inherited: false, + }, }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, + }, }, }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/sql/permissions/dashboards/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "dbsql-dashboards", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_RUN", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -849,50 +777,48 @@ func TestResourcePermissionsCreate_SQLA_Asset(t *testing.T) { func TestResourcePermissionsCreate_SQLA_Endpoint(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: "PUT", - Resource: "/api/2.0/permissions/sql/warehouses/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_USE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "IS_OWNER", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/sql/warehouses/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + }).Return(&iam.ObjectPermissions{ + ObjectId: "warehouses/abc", + ObjectType: "warehouses", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanUse}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelIsOwner}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -916,71 +842,66 @@ func TestResourcePermissionsCreate_SQLA_Endpoint(t *testing.T) { func TestResourcePermissionsCreate_SQLA_Endpoint_WithOwnerError(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: "PUT", - Resource: "/api/2.0/permissions/sql/warehouses/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_USE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "IS_OWNER", }, }, - Response: apierr.APIError{ - ErrorCode: "INVALID_PARAMETER_VALUE", - Message: "PUT requests for warehouse *** with no existing owner must provide a new owner.", - }, - Status: 400, - }, - { - Method: "PUT", - Resource: "/api/2.0/permissions/sql/warehouses/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_PARAMETER_VALUE", + Message: "PUT requests for warehouse *** with no existing owner must provide a new owner.", + StatusCode: 400, + }) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_USE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/sql/warehouses/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + }).Return(&iam.ObjectPermissions{ + ObjectId: "warehouses/abc", + ObjectType: "warehouses", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanUse}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelIsOwner}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1004,50 +925,48 @@ func TestResourcePermissionsCreate_SQLA_Endpoint_WithOwnerError(t *testing.T) { func TestResourcePermissionsCreate_SQLA_Endpoint_WithOwner(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: "PUT", - Resource: "/api/2.0/permissions/sql/warehouses/abc", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingOwner, - PermissionLevel: "IS_OWNER", - }, - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingOwner, + PermissionLevel: "IS_OWNER", + }, + { + UserName: TestingUser, + PermissionLevel: "CAN_USE", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/sql/warehouses/abc", - Response: ObjectACL{ - ObjectID: "dashboards/abc", - ObjectType: "dashboard", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_USE", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, - { - UserName: TestingOwner, - PermissionLevel: "IS_OWNER", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "abc", + RequestObjectType: "sql/warehouses", + }).Return(&iam.ObjectPermissions{ + ObjectId: "warehouses/abc", + ObjectType: "warehouses", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanUse}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, + }, + { + UserName: TestingOwner, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelIsOwner}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1094,17 +1013,12 @@ func TestResourcePermissionsCreate_SQLA_Endpoint_WithOwner(t *testing.T) { func TestResourcePermissionsCreate_NotebookPath_NotExists(t *testing.T) { _, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FDevelopment%2FInit", - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/Development/Init").Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "Internal error happened", + StatusCode: 400, + }) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1112,7 +1026,7 @@ func TestResourcePermissionsCreate_NotebookPath_NotExists(t *testing.T) { "access_control": []any{ map[string]any{ "user_name": TestingUser, - "permission_level": "CAN_USE", + "permission_level": "CAN_READ", }, }, }, @@ -1124,56 +1038,50 @@ func TestResourcePermissionsCreate_NotebookPath_NotExists(t *testing.T) { func TestResourcePermissionsCreate_NotebookPath(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FDevelopment%2FInit", - Response: workspace.ObjectStatus{ - ObjectID: 988765, - ObjectType: "NOTEBOOK", - }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/notebooks/988765", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/Development/Init").Return(&workspace.ObjectInfo{ + ObjectId: 988765, + ObjectType: workspace.ObjectTypeNotebook, + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "988765", + RequestObjectType: "notebooks", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/notebooks/988765", - Response: ObjectACL{ - ObjectID: "/notebooks/988765", - ObjectType: "notebook", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "988765", + RequestObjectType: "notebooks", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/notebooks/988765", + ObjectType: "notebook", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1198,56 +1106,50 @@ func TestResourcePermissionsCreate_NotebookPath(t *testing.T) { func TestResourcePermissionsCreate_WorkspaceFilePath(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FDevelopment%2FInit", - Response: workspace.ObjectStatus{ - ObjectID: 988765, - ObjectType: workspace.File, - }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/files/988765", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/Development/Init").Return(&workspace.ObjectInfo{ + ObjectId: 988765, + ObjectType: workspace.ObjectTypeFile, + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "988765", + RequestObjectType: "files", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/files/988765", - Response: ObjectACL{ - ObjectID: "/files/988765", - ObjectType: "file", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "988765", + RequestObjectType: "files", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/files/988765", + ObjectType: "file", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1272,18 +1174,6 @@ func TestResourcePermissionsCreate_WorkspaceFilePath(t *testing.T) { func TestResourcePermissionsCreate_error(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/clusters/abc", - Response: apierr.APIError{ - ErrorCode: "INVALID_REQUEST", - Message: "Internal error happened", - }, - Status: 400, - }, - }, Resource: ResourcePermissions(), State: map[string]any{ "cluster_id": "abc", @@ -1295,14 +1185,17 @@ func TestResourcePermissionsCreate_error(t *testing.T) { }, }, Create: true, - }.ExpectError(t, "permission_level CAN_USE is not supported with cluster_id objects") + }.ExpectError(t, "permission_level CAN_USE is not supported with cluster_id objects; allowed levels: CAN_ATTACH_TO, CAN_MANAGE, CAN_RESTART") } func TestResourcePermissionsCreate_PathIdRetriever_Error(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - qa.HTTPFailures[0], + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/foo/bar").Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "i'm a teapot", + StatusCode: 418, + }) }, Resource: ResourcePermissions(), Create: true, @@ -1317,9 +1210,13 @@ func TestResourcePermissionsCreate_PathIdRetriever_Error(t *testing.T) { func TestResourcePermissionsCreate_ActualUpdate_Error(t *testing.T) { qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - qa.HTTPFailures[0], + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Set(mock.Anything, mock.Anything).Return(nil, &apierr.APIError{ + ErrorCode: "INVALID_REQUEST", + Message: "i'm a teapot", + StatusCode: 418, + }) }, Resource: ResourcePermissions(), Create: true, @@ -1334,52 +1231,50 @@ func TestResourcePermissionsCreate_ActualUpdate_Error(t *testing.T) { func TestResourcePermissionsUpdate(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/jobs/9", - Response: ObjectACL{ - ObjectID: "/jobs/9", - ObjectType: "job", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_VIEW", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "admin"}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "9", + RequestObjectType: "jobs", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/jobs/9", + ObjectType: "job", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_VIEW", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/jobs/9", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_VIEW", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "IS_OWNER", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "9", + RequestObjectType: "jobs", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_VIEW", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "IS_OWNER", }, }, - }, + }).Return(nil, nil) }, InstanceState: map[string]string{ "job_id": "9", @@ -1405,235 +1300,202 @@ func TestResourcePermissionsUpdate(t *testing.T) { assert.Equal(t, "CAN_VIEW", firstElem["permission_level"]) } +func getResourcePermissions(field, objectType string) resourcePermissions { + for _, mapping := range allResourcePermissions() { + if mapping.field == field && mapping.objectType == objectType { + return mapping + } + } + panic(fmt.Sprintf("could not find resource permissions for field %s and object type %s", field, objectType)) +} + func TestResourcePermissionsUpdateTokensAlwaysThereForAdmins(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "PUT", - Resource: "/api/2.0/permissions/authorization/tokens", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: "me", - PermissionLevel: "CAN_MANAGE", - }, - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: "me"}, nil) + mwc.GetMockPermissionsAPI().EXPECT().Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "tokens", + RequestObjectType: "authorization", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: "me", + PermissionLevel: "CAN_MANAGE", + }, + { + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, func(ctx context.Context, client *common.DatabricksClient) { p := NewPermissionsAPI(ctx, client) - err := p.Update("/authorization/tokens", AccessControlChangeList{ - AccessControlList: []AccessControlChange{ + mapping := getResourcePermissions("authorization", "tokens") + err := p.Update("/authorization/tokens", entity.PermissionsEntity{ + AccessControlList: []iam.AccessControlRequest{ { UserName: "me", PermissionLevel: "CAN_MANAGE", }, }, - }) + }, mapping) assert.NoError(t, err) }) } func TestShouldKeepAdminsOnAnythingExceptPasswordsAndAssignsOwnerForJob(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/permissions/jobs/123", - Response: ObjectACL{ - ObjectID: "/jobs/123", - ObjectType: "job", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_DO_EVERYTHING", - Inherited: true, - }, - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockJobsAPI().EXPECT().GetByJobId(mock.Anything, int64(123)).Return(&jobs.Job{ + CreatorUserName: "creator@example.com", + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "123", + RequestObjectType: "jobs", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/jobs/123", + ObjectType: "job", + AccessControlList: []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_DO_EVERYTHING", + Inherited: true, + }, + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: "GET", - Resource: "/api/2.1/jobs/get?job_id=123", - Response: jobs.Job{ - CreatorUserName: "creator@example.com", - }, - }, - { - Method: "PUT", - Resource: "/api/2.0/permissions/jobs/123", - ExpectedRequest: ObjectACL{ - AccessControlList: []AccessControl{ - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, - { - UserName: "creator@example.com", - PermissionLevel: "IS_OWNER", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "123", + RequestObjectType: "jobs", + AccessControlList: []iam.AccessControlRequest{ + { + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", + }, + { + UserName: "creator@example.com", + PermissionLevel: "IS_OWNER", }, }, - }, + }).Return(nil, nil) }, func(ctx context.Context, client *common.DatabricksClient) { p := NewPermissionsAPI(ctx, client) - err := p.Delete("/jobs/123") + mapping := getResourcePermissions("job_id", "job") + err := p.Delete("/jobs/123", mapping) assert.NoError(t, err) }) } func TestShouldDeleteNonExistentJob(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/permissions/jobs/123", - Response: ObjectACL{ - ObjectID: "/jobs/123", - ObjectType: "job", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_DO_EVERYTHING", - Inherited: true, - }, - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "123", + RequestObjectType: "jobs", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/jobs/123", + ObjectType: "job", + AccessControlList: []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_DO_EVERYTHING", + Inherited: true, + }, + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: "GET", - Resource: "/api/2.1/jobs/get?job_id=123", - Status: 400, - Response: apierr.APIError{ - StatusCode: 400, - Message: "Job 123 does not exist.", - ErrorCode: "INVALID_PARAMETER_VALUE", - }, - }, + }, nil) + mwc.GetMockJobsAPI().EXPECT().GetByJobId(mock.Anything, int64(123)).Return(nil, &apierr.APIError{ + StatusCode: 400, + Message: "Job 123 does not exist.", + ErrorCode: "INVALID_PARAMETER_VALUE", + }) }, func(ctx context.Context, client *common.DatabricksClient) { p := NewPermissionsAPI(ctx, client) - err := p.Delete("/jobs/123") + mapping := getResourcePermissions("job_id", "job") + err := p.Delete("/jobs/123", mapping) assert.NoError(t, err) }) } func TestShouldKeepAdminsOnAnythingExceptPasswordsAndAssignsOwnerForPipeline(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - Method: "GET", - Resource: "/api/2.0/permissions/pipelines/123", - Response: ObjectACL{ - ObjectID: "/pipelines/123", - ObjectType: "pipeline", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_DO_EVERYTHING", - Inherited: true, - }, - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockPipelinesAPI().EXPECT().GetByPipelineId(mock.Anything, "123").Return(&pipelines.GetPipelineResponse{ + CreatorUserName: "creator@example.com", + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "123", + RequestObjectType: "pipelines", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/pipelines/123", + ObjectType: "pipeline", + AccessControlList: []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_DO_EVERYTHING", + Inherited: true, + }, + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, - { - Method: "GET", - Resource: "/api/2.0/pipelines/123?", - Response: jobs.Job{ - CreatorUserName: "creator@example.com", - }, - }, - { - Method: "PUT", - Resource: "/api/2.0/permissions/pipelines/123", - ExpectedRequest: ObjectACL{ - AccessControlList: []AccessControl{ - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, - { - UserName: "creator@example.com", - PermissionLevel: "IS_OWNER", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "123", + RequestObjectType: "pipelines", + AccessControlList: []iam.AccessControlRequest{ + { + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", + }, + { + UserName: "creator@example.com", + PermissionLevel: "IS_OWNER", }, }, - }, + }).Return(nil, nil) }, func(ctx context.Context, client *common.DatabricksClient) { p := NewPermissionsAPI(ctx, client) - err := p.Delete("/pipelines/123") + mapping := getResourcePermissions("pipeline_id", "pipelines") + err := p.Delete("/pipelines/123", mapping) assert.NoError(t, err) }) } func TestPathPermissionsResourceIDFields(t *testing.T) { - var m permissionsIDFieldMapping - for _, x := range permissionsResourceIDFields() { - if x.field == "notebook_path" { - m = x - } - } + m := getResourcePermissions("notebook_path", "notebook") w, err := databricks.NewWorkspaceClient(&databricks.Config{}) require.NoError(t, err) _, err = m.idRetriever(context.Background(), w, "x") assert.ErrorContains(t, err, "cannot load path x") } -func TestObjectACLToPermissionsEntityCornerCases(t *testing.T) { - _, err := (&ObjectACL{ - ObjectType: "bananas", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - }, - }, - }).ToPermissionsEntity(ResourcePermissions().ToResource().TestResourceData(), "me") - assert.EqualError(t, err, "unknown object type bananas") -} - -func TestEntityAccessControlToAccessControlChange(t *testing.T) { - _, res := AccessControl{}.toAccessControlChange() - assert.False(t, res) -} - -func TestCornerCases(t *testing.T) { - qa.ResourceCornerCases(t, ResourcePermissions(), qa.CornerCaseSkipCRUD("create")) -} - func TestDeleteMissing(t *testing.T) { - qa.HTTPFixturesApply(t, []qa.HTTPFixture{ - { - MatchAny: true, - Status: 404, - Response: apierr.NotFound("missing"), - }, + qa.MockWorkspaceApply(t, func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockPermissionsAPI().EXPECT().Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "x", + RequestObjectType: "clusters", + }).Return(nil, apierr.ErrNotFound) }, func(ctx context.Context, client *common.DatabricksClient) { p := ResourcePermissions().ToResource() d := p.TestResourceData() - d.SetId("x") + d.SetId("/clusters/x") diags := p.DeleteContext(ctx, d, client) assert.Nil(t, diags) }) @@ -1641,65 +1503,59 @@ func TestDeleteMissing(t *testing.T) { func TestResourcePermissionsCreate_RepoPath(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FRepos%2FDevelopment%2FInit", - Response: workspace.ObjectStatus{ - ObjectID: 988765, - ObjectType: "repo", - }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/repos/988765", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - }, - }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/repos/988765", - Response: ObjectACL{ - ObjectID: "/repos/988765", - ObjectType: "repo", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/Repos/Development/Init").Return(&workspace.ObjectInfo{ + ObjectId: 988765, + ObjectType: workspace.ObjectTypeRepo, + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "988765", + RequestObjectType: "repos", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/repos/988765", + ObjectType: "repo", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_RUN", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_RUN", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "988765", + RequestObjectType: "repos", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", + }, + }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1725,42 +1581,40 @@ func TestResourcePermissionsCreate_RepoPath(t *testing.T) { // when caller does not specify CAN_MANAGE permission during create, it should be explictly added func TestResourcePermissionsCreate_Sql_Queries(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPost, - Resource: "/api/2.0/preview/sql/permissions/queries/id111", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "id111", + RequestObjectType: "sql/queries", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_RUN", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/sql/permissions/queries/id111", - Response: ObjectACL{ - ObjectID: "queries/id111", - ObjectType: "query", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "id111", + RequestObjectType: "sql/queries", + }).Return(&iam.ObjectPermissions{ + ObjectId: "queries/id111", + ObjectType: "query", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRun}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1785,42 +1639,40 @@ func TestResourcePermissionsCreate_Sql_Queries(t *testing.T) { // when caller does not specify CAN_MANAGE permission during update, it should be explictly added func TestResourcePermissionsUpdate_Sql_Queries(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPost, - Resource: "/api/2.0/preview/sql/permissions/queries/id111", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "id111", + RequestObjectType: "sql/queries", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_RUN", + }, + { + UserName: TestingAdminUser, + PermissionLevel: "CAN_MANAGE", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/preview/sql/permissions/queries/id111", - Response: ObjectACL{ - ObjectID: "queries/id111", - ObjectType: "query", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_RUN", - }, - { - UserName: TestingAdminUser, - PermissionLevel: "CAN_MANAGE", - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "id111", + RequestObjectType: "sql/queries", + }).Return(&iam.ObjectPermissions{ + ObjectId: "queries/id111", + ObjectType: "query", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRun}}, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, + }, nil) }, InstanceState: map[string]string{ "sql_query_id": "id111", @@ -1847,65 +1699,59 @@ func TestResourcePermissionsUpdate_Sql_Queries(t *testing.T) { func TestResourcePermissionsCreate_DirectoryPath(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodGet, - Resource: "/api/2.0/workspace/get-status?path=%2FFirst", - Response: workspace.ObjectStatus{ - ObjectID: 123456, - ObjectType: "directory", - }, - }, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/directories/123456", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + mwc.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/First").Return(&workspace.ObjectInfo{ + ObjectId: 123456, + ObjectType: workspace.ObjectTypeDirectory, + }, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "123456", + RequestObjectType: "directories", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/directories/123456", - Response: ObjectACL{ - ObjectID: "/directories/123456", - ObjectType: "directory", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_READ", - Inherited: false, - }, + }).Return(nil, nil) + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "123456", + RequestObjectType: "directories", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/directories/123456", + ObjectType: "directory", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_READ", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_RUN", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_RUN", + Inherited: false, }, }, - { - UserName: TestingAdminUser, - AllPermissions: []Permission{ - { - PermissionLevel: "CAN_MANAGE", - Inherited: false, - }, + }, + { + UserName: TestingAdminUser, + AllPermissions: []iam.Permission{ + { + PermissionLevel: "CAN_MANAGE", + Inherited: false, }, }, }, }, - }, + }, nil) }, Resource: ResourcePermissions(), State: map[string]any{ @@ -1930,34 +1776,32 @@ func TestResourcePermissionsCreate_DirectoryPath(t *testing.T) { func TestResourcePermissionsPasswordUsage(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/authorization/passwords", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - GroupName: "admins", - PermissionLevel: "CAN_USE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "passwords", + RequestObjectType: "authorization", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/authorization/passwords", + ObjectType: "passwords", + AccessControlList: []iam.AccessControlResponse{ + { + GroupName: "admins", + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanUse}}, }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/authorization/passwords", - Response: ObjectACL{ - ObjectID: "/authorization/passwords", - ObjectType: "passwords", - AccessControlList: []AccessControl{ - { - GroupName: "admins", - PermissionLevel: "CAN_USE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "passwords", + RequestObjectType: "authorization", + AccessControlList: []iam.AccessControlRequest{ + { + GroupName: "admins", + PermissionLevel: "CAN_USE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), HCL: ` @@ -1979,42 +1823,40 @@ func TestResourcePermissionsPasswordUsage(t *testing.T) { func TestResourcePermissionsRootDirectory(t *testing.T) { d, err := qa.ResourceFixture{ - Fixtures: []qa.HTTPFixture{ - me, - { - Method: http.MethodPut, - Resource: "/api/2.0/permissions/directories/0", - ExpectedRequest: AccessControlChangeList{ - AccessControlList: []AccessControlChange{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, + MockWorkspaceClientFunc: func(mwc *mocks.MockWorkspaceClient) { + mwc.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{UserName: TestingAdminUser}, nil) + e := mwc.GetMockPermissionsAPI().EXPECT() + e.Get(mock.Anything, iam.GetPermissionRequest{ + RequestObjectId: "0", + RequestObjectType: "directories", + }).Return(&iam.ObjectPermissions{ + ObjectId: "/directories/0", + ObjectType: "directory", + AccessControlList: []iam.AccessControlResponse{ + { + UserName: TestingUser, + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanRead}}, + }, + { + GroupName: "admins", + AllPermissions: []iam.Permission{{PermissionLevel: iam.PermissionLevelCanManage}}, }, }, - }, - { - Method: http.MethodGet, - Resource: "/api/2.0/permissions/directories/0", - Response: ObjectACL{ - ObjectID: "/directories/0", - ObjectType: "directory", - AccessControlList: []AccessControl{ - { - UserName: TestingUser, - PermissionLevel: "CAN_READ", - }, - { - GroupName: "admins", - PermissionLevel: "CAN_MANAGE", - }, + }, nil) + e.Set(mock.Anything, iam.PermissionsRequest{ + RequestObjectId: "0", + RequestObjectType: "directories", + AccessControlList: []iam.AccessControlRequest{ + { + UserName: TestingUser, + PermissionLevel: "CAN_READ", + }, + { + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", }, }, - }, + }).Return(nil, nil) }, Resource: ResourcePermissions(), HCL: ` diff --git a/permissions/update/customizers.go b/permissions/update/customizers.go new file mode 100644 index 0000000000..ea2c5dd5db --- /dev/null +++ b/permissions/update/customizers.go @@ -0,0 +1,97 @@ +package update + +import ( + "github.com/databricks/databricks-sdk-go/service/iam" +) + +// Context that is available to aclUpdateCustomizer implementations. +type ACLCustomizerContext struct { + GetCurrentUser func() (string, error) + GetId func() string +} + +// ACLCustomizer is a function that modifies the access control list of an object before it is updated. +type ACLCustomizer func(ctx ACLCustomizerContext, objectAcls []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) + +// If applies ths customizer if the condition is true. +func If(condition func(ACLCustomizerContext, []iam.AccessControlRequest) bool, customizer ACLCustomizer) ACLCustomizer { + return func(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) { + if condition(ctx, acl) { + return customizer(ctx, acl) + } + return acl, nil + } +} + +func Not(condition func(ACLCustomizerContext, []iam.AccessControlRequest) bool) func(ACLCustomizerContext, []iam.AccessControlRequest) bool { + return func(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) bool { + return !condition(ctx, acl) + } +} + +// ObjectIdMatches returns a condition that checks if the object ID matches the expected value. +func ObjectIdMatches(expected string) func(ACLCustomizerContext, []iam.AccessControlRequest) bool { + return func(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) bool { + return ctx.GetId() == expected + } +} + +// AddAdmin adds an explicit CAN_MANAGE permission for the 'admins' group if explicitAdminPermissionCheck returns true +// for the provided object ID. +func AddAdmin(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) { + found := false + for _, acl := range acl { + if acl.GroupName == "admins" { + found = true + break + } + } + if !found { + // Prevent "Cannot change permissions for group 'admins' to None." + acl = append(acl, iam.AccessControlRequest{ + GroupName: "admins", + PermissionLevel: "CAN_MANAGE", + }) + } + return acl, nil +} + +// Whether the object requires explicit manage permissions for the calling user if not set. +// As described in https://github.com/databricks/terraform-provider-databricks/issues/1504, +// certain object types require that we explicitly grant the calling user CAN_MANAGE +// permissions when POSTing permissions changes through the REST API, to avoid accidentally +// revoking the calling user's ability to manage the current object. +func AddCurrentUserAsManage(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) { + currentUser, err := ctx.GetCurrentUser() + if err != nil { + return nil, err + } + // The validate() method called in Update() ensures that the current user's permissions are either CAN_MANAGE + // or IS_OWNER if they are specified. If the current user is not specified in the access control list, we add + // them with CAN_MANAGE permissions. + found := false + for _, acl := range acl { + if acl.UserName == currentUser || acl.ServicePrincipalName == currentUser { + found = true + break + } + } + if !found { + acl = append(acl, iam.AccessControlRequest{ + UserName: currentUser, + PermissionLevel: "CAN_MANAGE", + }) + } + return acl, nil +} + +func RewritePermissions(mapping map[iam.PermissionLevel]iam.PermissionLevel) ACLCustomizer { + return func(ctx ACLCustomizerContext, acl []iam.AccessControlRequest) ([]iam.AccessControlRequest, error) { + for i := range acl { + if new, ok := mapping[acl[i].PermissionLevel]; ok { + acl[i].PermissionLevel = new + } + } + return acl, nil + } +} diff --git a/repos/resource_git_credential.go b/repos/resource_git_credential.go index 38f7b94044..9858ff9683 100644 --- a/repos/resource_git_credential.go +++ b/repos/resource_git_credential.go @@ -12,7 +12,7 @@ import ( ) func ResourceGitCredential() common.Resource { - s := common.StructToSchema(workspace.CreateCredentials{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { + s := common.StructToSchema(workspace.CreateCredentialsRequest{}, func(s map[string]*schema.Schema) map[string]*schema.Schema { s["force"] = &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -34,7 +34,7 @@ func ResourceGitCredential() common.Resource { return err } - var req workspace.CreateCredentials + var req workspace.CreateCredentialsRequest common.DataToStructPointer(d, s, &req) resp, err := w.GitCredentials.Create(ctx, req) @@ -49,7 +49,7 @@ func ResourceGitCredential() common.Resource { if len(creds) != 1 { return fmt.Errorf("list of credentials is either empty or have more than one entry (%d)", len(creds)) } - var req workspace.UpdateCredentials + var req workspace.UpdateCredentialsRequest common.DataToStructPointer(d, s, &req) req.CredentialId = creds[0].CredentialId @@ -71,7 +71,7 @@ func ResourceGitCredential() common.Resource { if err != nil { return err } - resp, err := w.GitCredentials.Get(ctx, workspace.GetGitCredentialRequest{CredentialId: cred_id}) + resp, err := w.GitCredentials.Get(ctx, workspace.GetCredentialsRequest{CredentialId: cred_id}) if err != nil { return err } @@ -80,7 +80,7 @@ func ResourceGitCredential() common.Resource { return nil }, Update: func(ctx context.Context, d *schema.ResourceData, c *common.DatabricksClient) error { - var req workspace.UpdateCredentials + var req workspace.UpdateCredentialsRequest common.DataToStructPointer(d, s, &req) cred_id, err := strconv.ParseInt(d.Id(), 10, 64) diff --git a/repos/resource_git_credential_test.go b/repos/resource_git_credential_test.go index 911a48eeb0..1a64cf273c 100644 --- a/repos/resource_git_credential_test.go +++ b/repos/resource_git_credential_test.go @@ -85,7 +85,7 @@ func TestResourceGitCredentialUpdate(t *testing.T) { { Method: "PATCH", Resource: fmt.Sprintf("/api/2.0/git-credentials/%d", credID), - ExpectedRequest: workspace.UpdateCredentials{ + ExpectedRequest: workspace.UpdateCredentialsRequest{ CredentialId: int64(credID), GitProvider: provider, GitUsername: user, @@ -125,7 +125,7 @@ func TestResourceGitCredentialUpdate_Error(t *testing.T) { { Method: "PATCH", Resource: fmt.Sprintf("/api/2.0/git-credentials/%d", credID), - ExpectedRequest: workspace.UpdateCredentials{ + ExpectedRequest: workspace.UpdateCredentialsRequest{ CredentialId: int64(credID), GitProvider: provider, GitUsername: user, @@ -168,7 +168,7 @@ func TestResourceGitCredentialCreate(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -200,7 +200,7 @@ func TestResourceGitCredentialCreate_Error(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -236,7 +236,7 @@ func TestResourceGitCredentialCreateWithForce(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -250,14 +250,14 @@ func TestResourceGitCredentialCreateWithForce(t *testing.T) { { Method: http.MethodGet, Resource: "/api/2.0/git-credentials", - Response: workspace.GetCredentialsResponse{ + Response: workspace.ListCredentialsResponse{ Credentials: []workspace.CredentialInfo{resp}, }, }, { Method: http.MethodPatch, Resource: fmt.Sprintf("/api/2.0/git-credentials/%d", resp.CredentialId), - ExpectedRequest: workspace.UpdateCredentials{ + ExpectedRequest: workspace.UpdateCredentialsRequest{ CredentialId: resp.CredentialId, GitProvider: provider, GitUsername: user, @@ -291,7 +291,7 @@ func TestResourceGitCredentialCreateWithForce_Error_List(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -332,7 +332,7 @@ func TestResourceGitCredentialCreateWithForce_ErrorEmptyList(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -374,7 +374,7 @@ func TestResourceGitCredentialCreateWithForce_ErrorUpdate(t *testing.T) { { Method: "POST", Resource: "/api/2.0/git-credentials", - ExpectedRequest: workspace.CreateCredentials{ + ExpectedRequest: workspace.CreateCredentialsRequest{ GitProvider: provider, GitUsername: user, PersonalAccessToken: token, @@ -388,7 +388,7 @@ func TestResourceGitCredentialCreateWithForce_ErrorUpdate(t *testing.T) { { Method: http.MethodGet, Resource: "/api/2.0/git-credentials", - Response: workspace.GetCredentialsResponse{ + Response: workspace.ListCredentialsResponse{ Credentials: []workspace.CredentialInfo{resp}, }, },